aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--build_msvc/bitcoin_config.h.in2
-rw-r--r--ci/README.md6
-rwxr-xr-xci/lint/04_install.sh1
-rwxr-xr-xci/test/00_setup_env.sh5
-rwxr-xr-xci/test/00_setup_env_i686_multiprocess.sh3
-rwxr-xr-xci/test/00_setup_env_native_fuzz_with_msan.sh2
-rwxr-xr-xci/test/00_setup_env_native_qt5.sh4
-rwxr-xr-xci/test/01_base_install.sh2
-rwxr-xr-xci/test/04_install.sh2
-rwxr-xr-xci/test/06_script_b.sh21
-rw-r--r--configure.ac30
-rwxr-xr-xcontrib/devtools/security-check.py2
-rwxr-xr-xcontrib/devtools/symbol-check.py2
-rwxr-xr-xcontrib/devtools/test-security-check.py2
-rw-r--r--contrib/guix/manifest.scm4
-rw-r--r--contrib/guix/patches/glibc-ldd-x86_64.patch10
-rw-r--r--contrib/guix/patches/glibc-versioned-locpath.patch240
-rw-r--r--contrib/init/bitcoind.service11
-rw-r--r--depends/hosts/darwin.mk3
-rw-r--r--doc/build-unix.md46
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/release-notes-27302.md4
-rw-r--r--doc/release-notes/release-notes-25.0.md340
-rw-r--r--doc/release-process.md2
-rw-r--r--src/addrdb.cpp14
-rw-r--r--src/addrdb.h3
-rw-r--r--src/bench/wallet_balance.cpp4
-rw-r--r--src/bitcoin-chainstate.cpp2
-rw-r--r--src/chainparams.cpp21
-rw-r--r--src/chainparams.h13
-rw-r--r--src/common/args.cpp3
-rw-r--r--src/common/args.h2
-rw-r--r--src/common/config.cpp8
-rw-r--r--src/common/init.cpp40
-rw-r--r--src/init.cpp36
-rw-r--r--src/interfaces/chain.h3
-rw-r--r--src/kernel/chain.cpp1
-rw-r--r--src/kernel/checks.cpp10
-rw-r--r--src/kernel/checks.h4
-rw-r--r--src/key_io.cpp16
-rw-r--r--src/net.h2
-rw-r--r--src/net_processing.cpp230
-rw-r--r--src/net_processing.h2
-rw-r--r--src/node/blockmanager_args.cpp10
-rw-r--r--src/node/blockmanager_args.h6
-rw-r--r--src/node/chainstatemanager_args.cpp8
-rw-r--r--src/node/chainstatemanager_args.h6
-rw-r--r--src/node/mempool_args.cpp12
-rw-r--r--src/node/mempool_args.h4
-rw-r--r--src/qt/test/test_main.cpp3
-rw-r--r--src/random.cpp33
-rw-r--r--src/rpc/blockchain.cpp2
-rw-r--r--src/rpc/client.cpp3
-rw-r--r--src/rpc/rawtransaction.cpp93
-rw-r--r--src/rpc/util.cpp5
-rw-r--r--src/rpc/util.h2
-rw-r--r--src/test/fuzz/fuzz.cpp27
-rw-r--r--src/test/fuzz/process_message.cpp80
-rw-r--r--src/test/fuzz/rpc.cpp1
-rw-r--r--src/test/util/setup_common.h2
-rw-r--r--src/test/util/txmempool.cpp4
-rw-r--r--src/txdb.cpp10
-rw-r--r--src/txdb.h3
-rw-r--r--src/util/fs.h2
-rw-r--r--src/util/result.h5
-rw-r--r--src/util/strencodings.h4
-rw-r--r--src/util/string.h4
-rw-r--r--src/validation.cpp10
-rw-r--r--src/wallet/scriptpubkeyman.cpp4
-rw-r--r--src/wallet/scriptpubkeyman.h18
-rw-r--r--src/wallet/wallet.cpp176
-rw-r--r--src/wallet/wallet.h18
-rw-r--r--src/wallet/walletutil.h14
-rw-r--r--test/README.md1
-rwxr-xr-xtest/functional/feature_config_args.py94
-rwxr-xr-xtest/functional/feature_signet.py3
-rwxr-xr-xtest/functional/interface_usdt_mempool.py4
-rwxr-xr-xtest/functional/mempool_compatibility.py2
-rwxr-xr-xtest/functional/mempool_package_limits.py3
-rwxr-xr-xtest/functional/mempool_packages.py15
-rwxr-xr-xtest/functional/mempool_persist.py1
-rwxr-xr-xtest/functional/p2p_compactblocks.py88
-rwxr-xr-xtest/functional/rpc_invalid_address_message.py6
-rwxr-xr-xtest/functional/rpc_psbt.py47
-rwxr-xr-xtest/functional/rpc_validateaddress.py203
-rwxr-xr-xtest/functional/test_framework/test_node.py4
-rw-r--r--test/functional/test_framework/util.py20
-rw-r--r--test/functional/test_framework/wallet.py7
-rwxr-xr-xtest/functional/test_runner.py11
-rwxr-xr-xtest/functional/wallet_abandonconflict.py12
-rwxr-xr-xtest/functional/wallet_basic.py33
-rwxr-xr-xtest/functional/wallet_conflicts.py127
-rwxr-xr-xtest/lint/lint-python.py2
-rwxr-xr-xtest/lint/run-lint-format-strings.py30
-rwxr-xr-xtest/util/test_runner.py5
95 files changed, 1676 insertions, 771 deletions
diff --git a/build_msvc/bitcoin_config.h.in b/build_msvc/bitcoin_config.h.in
index 05e38b1571..1716647486 100644
--- a/build_msvc/bitcoin_config.h.in
+++ b/build_msvc/bitcoin_config.h.in
@@ -38,7 +38,7 @@
/* Define to 1 to enable SQLite wallet */
#define USE_SQLITE 1
-/* Define to 1 to enable ZMQ functions */
+/* Define this symbol to enable ZMQ functions */
#define ENABLE_ZMQ 1
/* define if external signer support is enabled (requires Boost::Process) */
diff --git a/ci/README.md b/ci/README.md
index d014565f44..b4158d0183 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -20,12 +20,6 @@ requires `bash`, `docker`, and `python3` to be installed. To install all require
sudo apt install bash docker.io python3
```
-To run the default test stage,
-
-```
-./ci/test_run_all.sh
-```
-
To run the test stage with a specific configuration,
```
diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh
index f7147582dc..737cf3e2fb 100755
--- a/ci/lint/04_install.sh
+++ b/ci/lint/04_install.sh
@@ -35,6 +35,7 @@ fi
${CI_RETRY_EXE} pip3 install codespell==2.2.1
${CI_RETRY_EXE} pip3 install flake8==5.0.4
+${CI_RETRY_EXE} pip3 install lief==0.13.1
${CI_RETRY_EXE} pip3 install mypy==0.971
${CI_RETRY_EXE} pip3 install pyzmq==24.0.1
${CI_RETRY_EXE} pip3 install vulture==2.6
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index 4a54f47b03..69fd05051e 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -6,6 +6,8 @@
export LC_ALL=C.UTF-8
+set -ex
+
# The root dir.
# The ci system copies this folder.
BASE_ROOT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../ >/dev/null 2>&1 && pwd )
@@ -37,7 +39,6 @@ export USE_BUSY_BOX=${USE_BUSY_BOX:-false}
export RUN_UNIT_TESTS=${RUN_UNIT_TESTS:-true}
export RUN_FUNCTIONAL_TESTS=${RUN_FUNCTIONAL_TESTS:-true}
export RUN_TIDY=${RUN_TIDY:-false}
-export RUN_SECURITY_TESTS=${RUN_SECURITY_TESTS:-false}
# By how much to scale the test_runner timeouts (option --timeout-factor).
# This is needed because some ci machines have slow CPU or disk, so sanitizers
# might be slow or a reindex might be waiting on disk IO.
@@ -45,8 +46,6 @@ export TEST_RUNNER_TIMEOUT_FACTOR=${TEST_RUNNER_TIMEOUT_FACTOR:-40}
export TEST_RUNNER_ENV=${TEST_RUNNER_ENV:-}
export RUN_FUZZ_TESTS=${RUN_FUZZ_TESTS:-false}
-export CONTAINER_NAME=${CONTAINER_NAME:-ci_unnamed}
-export CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG:-ubuntu:20.04}
# Randomize test order.
# See https://www.boost.org/doc/libs/1_71_0/libs/test/doc/html/boost_test/utf_reference/rt_param_reference/random.html
export BOOST_TEST_RANDOM=${BOOST_TEST_RANDOM:-1}
diff --git a/ci/test/00_setup_env_i686_multiprocess.sh b/ci/test/00_setup_env_i686_multiprocess.sh
index 9e3ea0d383..8ab4d54d31 100755
--- a/ci/test/00_setup_env_i686_multiprocess.sh
+++ b/ci/test/00_setup_env_i686_multiprocess.sh
@@ -12,6 +12,7 @@ export CI_IMAGE_NAME_TAG=ubuntu:20.04
export PACKAGES="cmake python3 llvm clang g++-multilib"
export DEP_OPTS="DEBUG=1 MULTIPROCESS=1"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-debug CC='clang -m32' CXX='clang++ -m32' LDFLAGS='--rtlib=compiler-rt -lgcc_s'"
+export BITCOIN_CONFIG="--enable-debug CC='clang -m32' CXX='clang++ -m32' \
+LDFLAGS='--rtlib=compiler-rt -lgcc_s' CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE'"
export TEST_RUNNER_ENV="BITCOIND=bitcoin-node"
export TEST_RUNNER_EXTRA="--nosandbox"
diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh
index 35a0de8034..dd694f818c 100755
--- a/ci/test/00_setup_env_native_fuzz_with_msan.sh
+++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh
@@ -17,7 +17,7 @@ export PACKAGES="clang-16 llvm-16 libclang-rt-16-dev cmake"
# BDB generates false-positives and will be removed in future
export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,memory --disable-hardening --with-asm=no CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
+export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,memory --disable-hardening --with-asm=no CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
export USE_MEMORY_SANITIZER="true"
export RUN_UNIT_TESTS="false"
export RUN_FUNCTIONAL_TESTS="false"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index bb10a2a2de..3a1d7808f1 100755
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -17,5 +17,5 @@ export RUN_UNIT_TESTS="false"
export GOAL="install"
export NO_WERROR=1 # -Werror=maybe-uninitialized
export DOWNLOAD_PREVIOUS_RELEASES="true"
-export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-reduce-exports \
---enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
+export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-reduce-exports --enable-debug \
+CFLAGS=\"-g0 -O2 -funsigned-char\" CPPFLAGS='-DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE' CXXFLAGS=\"-g0 -O2 -funsigned-char\""
diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh
index beb5aa9242..98f96f0ece 100755
--- a/ci/test/01_base_install.sh
+++ b/ci/test/01_base_install.sh
@@ -6,6 +6,8 @@
export LC_ALL=C.UTF-8
+set -ex
+
CFG_DONE="ci.base-install-done" # Use a global git setting to remember whether this script ran to avoid running it twice
if [ "$(git config --global ${CFG_DONE})" == "true" ]; then
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index e9c54139a7..09d68ee063 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -28,7 +28,7 @@ export BINS_SCRATCH_DIR="${BASE_SCRATCH_DIR}/bins/"
if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
# Export all env vars to avoid missing some.
# Though, exclude those with newlines to avoid parsing problems.
- python3 -c 'import os; [print(f"{key}={value}") for key, value in os.environ.items() if "\n" not in value]' | tee /tmp/env
+ python3 -c 'import os; [print(f"{key}={value}") for key, value in os.environ.items() if "\n" not in value and "HOME" not in key]' | tee /tmp/env
echo "Creating $CI_IMAGE_NAME_TAG container to run in"
DOCKER_BUILDKIT=1 ${CI_RETRY_EXE} docker build \
--file "${BASE_ROOT_DIR}/ci/test_imagefile" \
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index 17e9dc1b5c..4c8b07c6a5 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -24,6 +24,11 @@ if [ "$RUN_FUZZ_TESTS" = "true" ]; then
if [ ! -d "$DIR_FUZZ_IN" ]; then
git clone --depth=1 https://github.com/bitcoin-core/qa-assets "${DIR_QA_ASSETS}"
fi
+ (
+ cd "${DIR_QA_ASSETS}"
+ echo "Using qa-assets repo from commit ..."
+ git log -1
+ )
elif [ "$RUN_UNIT_TESTS" = "true" ] || [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then
export DIR_UNIT_TEST_DATA=${DIR_QA_ASSETS}/unit_test_data/
if [ ! -d "$DIR_UNIT_TEST_DATA" ]; then
@@ -101,14 +106,6 @@ cd "${BASE_BUILD_DIR}/bitcoin-$HOST"
bash -c "./configure --cache-file=../config.cache $BITCOIN_CONFIG_ALL $BITCOIN_CONFIG" || ( (cat config.log) && false)
-if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
- # MemorySanitizer (MSAN) does not support tracking memory initialization done by
- # using the Linux getrandom syscall. Avoid using getrandom by undefining
- # HAVE_SYS_GETRANDOM. See https://github.com/google/sanitizers/issues/852 for
- # details.
- grep -v HAVE_SYS_GETRANDOM src/config/bitcoin-config.h > src/config/bitcoin-config.h.tmp && mv src/config/bitcoin-config.h.tmp src/config/bitcoin-config.h
-fi
-
if [[ "${RUN_TIDY}" == "true" ]]; then
MAYBE_BEAR="bear --config src/.bear-tidy-config"
MAYBE_TOKEN="--"
@@ -158,24 +155,20 @@ if [ "${RUN_TIDY}" = "true" ]; then
# accepted in src/.bear-tidy-config
# Filter out:
# * qt qrc and moc generated files
- # * walletutil (temporarily)
# * secp256k1
- jq 'map(select(.file | test("src/qt/qrc_.*\\.cpp$|/moc_.*\\.cpp$|src/wallet/walletutil|src/secp256k1/src/") | not))' ../compile_commands.json > tmp.json
+ jq 'map(select(.file | test("src/qt/qrc_.*\\.cpp$|/moc_.*\\.cpp$|src/secp256k1/src/") | not))' ../compile_commands.json > tmp.json
mv tmp.json ../compile_commands.json
cd "${BASE_BUILD_DIR}/bitcoin-$HOST/"
python3 "${DIR_IWYU}/include-what-you-use/iwyu_tool.py" \
-p . "${MAKEJOBS}" \
-- -Xiwyu --cxx17ns -Xiwyu --mapping_file="${BASE_BUILD_DIR}/bitcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp" \
+ -Xiwyu --max_line_length=160 \
2>&1 | tee /tmp/iwyu_ci.out
cd "${BASE_ROOT_DIR}/src"
python3 "${DIR_IWYU}/include-what-you-use/fix_includes.py" --nosafe_headers < /tmp/iwyu_ci.out
git --no-pager diff
fi
-if [ "$RUN_SECURITY_TESTS" = "true" ]; then
- make test-security-check
-fi
-
if [ "$RUN_FUZZ_TESTS" = "true" ]; then
bash -c "LD_LIBRARY_PATH=${DEPENDS_DIR}/${HOST}/lib test/fuzz/test_runner.py ${FUZZ_TESTS_CONFIG} $MAKEJOBS -l DEBUG ${DIR_FUZZ_IN}"
fi
diff --git a/configure.ac b/configure.ac
index 97b970161c..8015813ec7 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1170,17 +1170,16 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <ctime>]],
)
dnl Check for different ways of gathering OS randomness
-AC_MSG_CHECKING([for Linux getrandom syscall])
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>
- #include <sys/syscall.h>
- #include <linux/random.h>]],
- [[ syscall(SYS_getrandom, nullptr, 32, 0); ]])],
- [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_SYS_GETRANDOM], [1], [Define this symbol if the Linux getrandom system call is available]) ],
+AC_MSG_CHECKING([for Linux getrandom function])
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
+ #include <sys/random.h>]],
+ [[ getrandom(nullptr, 32, 0); ]])],
+ [ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_GETRANDOM], [1], [Define this symbol if the Linux getrandom function call is available]) ],
[ AC_MSG_RESULT([no])]
)
-AC_MSG_CHECKING([for getentropy via random.h])
-AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <unistd.h>
+AC_MSG_CHECKING([for getentropy via sys/random.h])
+AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
#include <sys/random.h>]],
[[ getentropy(nullptr, 32) ]])],
[ AC_MSG_RESULT([yes]); AC_DEFINE([HAVE_GETENTROPY_RAND], [1], [Define this symbol if the BSD getentropy system call is available with sys/random.h]) ],
@@ -1492,10 +1491,6 @@ if test "$use_boost" = "yes"; then
AX_CHECK_PREPROC_FLAG([-DBOOST_NO_CXX98_FUNCTION_BASE], [BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_CXX98_FUNCTION_BASE"], [], [$CXXFLAG_WERROR],
[AC_LANG_PROGRAM([[#include <boost/config.hpp>]])])
- if test "$enable_debug" = "yes" || test "$enable_fuzz" = "yes"; then
- BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_MULTI_INDEX_ENABLE_SAFE_MODE"
- fi
-
if test "$suppress_external_warnings" != "no"; then
BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS)
fi
@@ -1632,12 +1627,9 @@ dnl ZMQ check
if test "$use_zmq" = "yes"; then
PKG_CHECK_MODULES([ZMQ], [libzmq >= 4],
- AC_DEFINE([ENABLE_ZMQ], [1], [Define to 1 to enable ZMQ functions]),
- [AC_DEFINE([ENABLE_ZMQ], [0], [Define to 1 to enable ZMQ functions])
- AC_MSG_WARN([libzmq version 4.x or greater not found, disabling])
+ AC_DEFINE([ENABLE_ZMQ], [1], [Define this symbol to enable ZMQ functions]),
+ [AC_MSG_WARN([libzmq version 4.x or greater not found, disabling])
use_zmq=no])
-else
- AC_DEFINE_UNQUOTED([ENABLE_ZMQ], [0], [Define to 1 to enable ZMQ functions])
fi
if test "$use_zmq" = "yes"; then
@@ -1649,6 +1641,8 @@ if test "$use_zmq" = "yes"; then
esac
fi
+AM_CONDITIONAL([ENABLE_ZMQ], [test "$use_zmq" = "yes"])
+
dnl libmultiprocess library check
libmultiprocess_found=no
@@ -1843,8 +1837,6 @@ if test "$bitcoin_enable_qt" != "no"; then
fi
fi
-AM_CONDITIONAL([ENABLE_ZMQ], [test "$use_zmq" = "yes"])
-
AC_MSG_CHECKING([whether to build test_bitcoin])
if test "$use_tests" = "yes"; then
if test "$enable_fuzz" = "yes"; then
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 6cd022ef17..452a1d42d6 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -10,7 +10,7 @@ Otherwise the exit status will be 1 and it will log which executables failed whi
import sys
from typing import List
-import lief #type:ignore
+import lief
def check_ELF_RELRO(binary) -> bool:
'''
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 3507f954f3..4fb997b023 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -13,7 +13,7 @@ Example usage:
import sys
from typing import List, Dict
-import lief #type:ignore
+import lief
# Debian 10 (Buster) EOL: 2024. https://wiki.debian.org/LTS
#
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index 54718fd7a1..d666291cba 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -5,7 +5,7 @@
'''
Test script for security-check.py
'''
-import lief #type:ignore
+import lief
import os
import subprocess
from typing import List
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index d83ff08713..54ddd47353 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -559,9 +559,7 @@ inspecting signatures in Mach-O binaries.")
(sha256
(base32
"0azpb9cvnbv25zg8019rqz48h8i2257ngyjg566dlnp74ivrs9vq"))
- (patches (search-our-patches "glibc-ldd-x86_64.patch"
- "glibc-versioned-locpath.patch"
- "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch"
+ (patches (search-our-patches "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch"
"glibc-2.27-fcommon.patch"
"glibc-2.27-guix-prefix.patch"))))))
diff --git a/contrib/guix/patches/glibc-ldd-x86_64.patch b/contrib/guix/patches/glibc-ldd-x86_64.patch
deleted file mode 100644
index a23b095caa..0000000000
--- a/contrib/guix/patches/glibc-ldd-x86_64.patch
+++ /dev/null
@@ -1,10 +0,0 @@
-By default, 'RTDLLIST' in 'ldd' refers to 'lib64/ld-linux-x86-64.so', whereas
-it's in 'lib/' for us. This patch fixes that.
-
---- a/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed
-+++ b/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed
-@@ -1,3 +1,3 @@
- /LD_TRACE_LOADED_OBJECTS=1/a\
- add_env="$add_env LD_LIBRARY_VERSION=\\$verify_out"
--s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \264\4-x86-64\6 \2x32\4-x32\6"_
-+s_^\(RTLDLIST=\)\(.*lib\)\(\|64\|x32\)\(/[^/]*\)\(-x86-64\|-x32\)\(\.so\.[0-9.]*\)[ ]*$_\1"\2\4\6 \2\4-x86-64\6 \2x32\4-x32\6"_
diff --git a/contrib/guix/patches/glibc-versioned-locpath.patch b/contrib/guix/patches/glibc-versioned-locpath.patch
deleted file mode 100644
index bc7652127f..0000000000
--- a/contrib/guix/patches/glibc-versioned-locpath.patch
+++ /dev/null
@@ -1,240 +0,0 @@
-The format of locale data can be incompatible between libc versions, and
-loading incompatible data can lead to 'setlocale' returning EINVAL at best
-or triggering an assertion failure at worst. See
-https://lists.gnu.org/archive/html/guix-devel/2015-09/msg00717.html
-for background information.
-
-To address that, this patch changes libc to honor a new 'GUIX_LOCPATH'
-variable, and to look for locale data in version-specific sub-directories of
-that variable. So, if GUIX_LOCPATH=/foo:/bar, locale data is searched for in
-/foo/X.Y and /bar/X.Y, where X.Y is the libc version number.
-
-That way, a single 'GUIX_LOCPATH' setting can work even if different libc
-versions coexist on the system.
-
---- a/locale/newlocale.c
-+++ b/locale/newlocale.c
-@@ -30,6 +30,7 @@
- /* Lock for protecting global data. */
- __libc_rwlock_define (extern , __libc_setlocale_lock attribute_hidden)
-
-+extern error_t compute_locale_search_path (char **, size_t *);
-
- /* Use this when we come along an error. */
- #define ERROR_RETURN \
-@@ -48,7 +49,6 @@ __newlocale (int category_mask, const char *locale, __locale_t base)
- __locale_t result_ptr;
- char *locale_path;
- size_t locale_path_len;
-- const char *locpath_var;
- int cnt;
- size_t names_len;
-
-@@ -102,17 +102,8 @@ __newlocale (int category_mask, const char *locale, __locale_t base)
- locale_path = NULL;
- locale_path_len = 0;
-
-- locpath_var = getenv ("LOCPATH");
-- if (locpath_var != NULL && locpath_var[0] != '\0')
-- {
-- if (__argz_create_sep (locpath_var, ':',
-- &locale_path, &locale_path_len) != 0)
-- return NULL;
--
-- if (__argz_add_sep (&locale_path, &locale_path_len,
-- _nl_default_locale_path, ':') != 0)
-- return NULL;
-- }
-+ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0)
-+ return NULL;
-
- /* Get the names for the locales we are interested in. We either
- allow a composite name or a single name. */
-diff --git a/locale/setlocale.c b/locale/setlocale.c
-index ead030d..0c0e314 100644
---- a/locale/setlocale.c
-+++ b/locale/setlocale.c
-@@ -215,12 +215,65 @@ setdata (int category, struct __locale_data *data)
- }
- }
-
-+/* Return in *LOCALE_PATH and *LOCALE_PATH_LEN the locale data search path as
-+ a colon-separated list. Return ENOMEN on error, zero otherwise. */
-+error_t
-+compute_locale_search_path (char **locale_path, size_t *locale_path_len)
-+{
-+ char* guix_locpath_var = getenv ("GUIX_LOCPATH");
-+ char *locpath_var = getenv ("LOCPATH");
-+
-+ if (guix_locpath_var != NULL && guix_locpath_var[0] != '\0')
-+ {
-+ /* Entries in 'GUIX_LOCPATH' take precedence over 'LOCPATH'. These
-+ entries are systematically prefixed with "/X.Y" where "X.Y" is the
-+ libc version. */
-+ if (__argz_create_sep (guix_locpath_var, ':',
-+ locale_path, locale_path_len) != 0
-+ || __argz_suffix_entries (locale_path, locale_path_len,
-+ "/" VERSION) != 0)
-+ goto bail_out;
-+ }
-+
-+ if (locpath_var != NULL && locpath_var[0] != '\0')
-+ {
-+ char *reg_locale_path = NULL;
-+ size_t reg_locale_path_len = 0;
-+
-+ if (__argz_create_sep (locpath_var, ':',
-+ &reg_locale_path, &reg_locale_path_len) != 0)
-+ goto bail_out;
-+
-+ if (__argz_append (locale_path, locale_path_len,
-+ reg_locale_path, reg_locale_path_len) != 0)
-+ goto bail_out;
-+
-+ free (reg_locale_path);
-+ }
-+
-+ if (*locale_path != NULL)
-+ {
-+ /* Append the system default locale directory. */
-+ if (__argz_add_sep (locale_path, locale_path_len,
-+ _nl_default_locale_path, ':') != 0)
-+ goto bail_out;
-+ }
-+
-+ return 0;
-+
-+ bail_out:
-+ free (*locale_path);
-+ *locale_path = NULL;
-+ *locale_path_len = 0;
-+
-+ return ENOMEM;
-+}
-+
- char *
- setlocale (int category, const char *locale)
- {
- char *locale_path;
- size_t locale_path_len;
-- const char *locpath_var;
- char *composite;
-
- /* Sanity check for CATEGORY argument. */
-@@ -251,17 +304,10 @@ setlocale (int category, const char *locale)
- locale_path = NULL;
- locale_path_len = 0;
-
-- locpath_var = getenv ("LOCPATH");
-- if (locpath_var != NULL && locpath_var[0] != '\0')
-+ if (compute_locale_search_path (&locale_path, &locale_path_len) != 0)
- {
-- if (__argz_create_sep (locpath_var, ':',
-- &locale_path, &locale_path_len) != 0
-- || __argz_add_sep (&locale_path, &locale_path_len,
-- _nl_default_locale_path, ':') != 0)
-- {
-- __libc_rwlock_unlock (__libc_setlocale_lock);
-- return NULL;
-- }
-+ __libc_rwlock_unlock (__libc_setlocale_lock);
-+ return NULL;
- }
-
- if (category == LC_ALL)
-diff --git a/string/Makefile b/string/Makefile
-index 8424a61..f925503 100644
---- a/string/Makefile
-+++ b/string/Makefile
-@@ -38,7 +38,7 @@ routines := strcat strchr strcmp strcoll strcpy strcspn \
- swab strfry memfrob memmem rawmemchr strchrnul \
- $(addprefix argz-,append count create ctsep next \
- delete extract insert stringify \
-- addsep replace) \
-+ addsep replace suffix) \
- envz basename \
- strcoll_l strxfrm_l string-inlines memrchr \
- xpg-strerror strerror_l
-diff --git a/string/argz-suffix.c b/string/argz-suffix.c
-new file mode 100644
-index 0000000..505b0f2
---- /dev/null
-+++ b/string/argz-suffix.c
-@@ -0,0 +1,56 @@
-+/* Copyright (C) 2015 Free Software Foundation, Inc.
-+ This file is part of the GNU C Library.
-+ Contributed by Ludovic Courtès <ludo@gnu.org>.
-+
-+ The GNU C Library is free software; you can redistribute it and/or
-+ modify it under the terms of the GNU Lesser General Public
-+ License as published by the Free Software Foundation; either
-+ version 2.1 of the License, or (at your option) any later version.
-+
-+ The GNU C Library is distributed in the hope that it will be useful,
-+ but WITHOUT ANY WARRANTY; without even the implied warranty of
-+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
-+ Lesser General Public License for more details.
-+
-+ You should have received a copy of the GNU Lesser General Public
-+ License along with the GNU C Library; if not, see
-+ <http://www.gnu.org/licenses/>. */
-+
-+#include <argz.h>
-+#include <errno.h>
-+#include <stdlib.h>
-+#include <string.h>
-+
-+
-+error_t
-+__argz_suffix_entries (char **argz, size_t *argz_len, const char *suffix)
-+
-+{
-+ size_t suffix_len = strlen (suffix);
-+ size_t count = __argz_count (*argz, *argz_len);
-+ size_t new_argz_len = *argz_len + count * suffix_len;
-+ char *new_argz = malloc (new_argz_len);
-+
-+ if (new_argz)
-+ {
-+ char *p = new_argz, *entry;
-+
-+ for (entry = *argz;
-+ entry != NULL;
-+ entry = argz_next (*argz, *argz_len, entry))
-+ {
-+ p = stpcpy (p, entry);
-+ p = stpcpy (p, suffix);
-+ p++;
-+ }
-+
-+ free (*argz);
-+ *argz = new_argz;
-+ *argz_len = new_argz_len;
-+
-+ return 0;
-+ }
-+ else
-+ return ENOMEM;
-+}
-+weak_alias (__argz_suffix_entries, argz_suffix_entries)
-diff --git a/string/argz.h b/string/argz.h
-index bb62a31..d276a35 100644
---- a/string/argz.h
-+++ b/string/argz.h
-@@ -134,6 +134,16 @@ extern error_t argz_replace (char **__restrict __argz,
- const char *__restrict __str,
- const char *__restrict __with,
- unsigned int *__restrict __replace_count);
-+
-+/* Suffix each entry of ARGZ & ARGZ_LEN with SUFFIX. Return 0 on success,
-+ and ENOMEN if memory cannot be allocated. */
-+extern error_t __argz_suffix_entries (char **__restrict __argz,
-+ size_t *__restrict __argz_len,
-+ const char *__restrict __suffix);
-+extern error_t argz_suffix_entries (char **__restrict __argz,
-+ size_t *__restrict __argz_len,
-+ const char *__restrict __suffix);
-+
-
- /* Returns the next entry in ARGZ & ARGZ_LEN after ENTRY, or NULL if there
- are no more. If entry is NULL, then the first entry is returned. This
diff --git a/contrib/init/bitcoind.service b/contrib/init/bitcoind.service
index 93de353bb4..87da17f955 100644
--- a/contrib/init/bitcoind.service
+++ b/contrib/init/bitcoind.service
@@ -18,10 +18,11 @@ After=network-online.target
Wants=network-online.target
[Service]
-ExecStart=/usr/bin/bitcoind -daemonwait \
- -pid=/run/bitcoind/bitcoind.pid \
+ExecStart=/usr/bin/bitcoind -pid=/run/bitcoind/bitcoind.pid \
-conf=/etc/bitcoin/bitcoin.conf \
- -datadir=/var/lib/bitcoind
+ -datadir=/var/lib/bitcoind \
+ -startupnotify='systemd-notify --ready' \
+ -shutdownnotify='systemd-notify --stopping'
# Make sure the config directory is readable by the service user
PermissionsStartOnly=true
@@ -30,8 +31,10 @@ ExecStartPre=/bin/chgrp bitcoin /etc/bitcoin
# Process management
####################
-Type=forking
+Type=notify
+NotifyAccess=all
PIDFile=/run/bitcoind/bitcoind.pid
+
Restart=on-failure
TimeoutStartSec=infinity
TimeoutStopSec=600
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index 522a6b17ef..08d81cbc99 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -63,7 +63,7 @@ $(foreach TOOL,$(cctools_TOOLS),$(eval darwin_$(TOOL) = $$(build_prefix)/bin/$$(
# Explicitly point to our binaries (e.g. cctools) so that they are
# ensured to be found and preferred over other possibilities.
#
-# -stdlib=libc++ -stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1
+# -stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1
#
# Forces clang to use the libc++ headers from our SDK and completely
# forget about the libc++ headers from the standard directories
@@ -107,7 +107,6 @@ darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \
$(clangxx_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \
-B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \
-isysroot$(OSX_SDK) \
- -stdlib=libc++ \
-stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1 \
-Xclang -internal-externc-isystem -Xclang $(clang_resource_dir)/include \
-Xclang -internal-externc-isystem -Xclang $(OSX_SDK)/usr/include
diff --git a/doc/build-unix.md b/doc/build-unix.md
index 0c761d9043..3633d4f811 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -184,52 +184,6 @@ export BDB_PREFIX="/path/to/bitcoin/depends/x86_64-pc-linux-gnu"
**Note**: You only need Berkeley DB if the legacy wallet is enabled (see [*Disable-wallet mode*](#disable-wallet-mode)).
-Security
---------
-To help make your Bitcoin Core installation more secure by making certain attacks impossible to
-exploit even if a vulnerability is found, binaries are hardened by default.
-This can be disabled with:
-
-Hardening Flags:
-
- ./configure --enable-hardening
- ./configure --disable-hardening
-
-
-Hardening enables the following features:
-* _Position Independent Executable_: Build position independent code to take advantage of Address Space Layout Randomization
- offered by some kernels. Attackers who can cause execution of code at an arbitrary memory
- location are thwarted if they don't know where anything useful is located.
- The stack and heap are randomly located by default, but this allows the code section to be
- randomly located as well.
-
- On an AMD64 processor where a library was not compiled with -fPIC, this will cause an error
- such as: "relocation R_X86_64_32 against `......' can not be used when making a shared object;"
-
- To test that you have built PIE executable, install scanelf, part of paxutils, and use:
-
- scanelf -e ./bitcoin
-
- The output should contain:
-
- TYPE
- ET_DYN
-
-* _Non-executable Stack_: If the stack is executable then trivial stack-based buffer overflow exploits are possible if
- vulnerable buffers are found. By default, Bitcoin Core should be built with a non-executable stack,
- but if one of the libraries it uses asks for an executable stack or someone makes a mistake
- and uses a compiler extension which requires an executable stack, it will silently build an
- executable without the non-executable stack protection.
-
- To verify that the stack is non-executable after compiling use:
- `scanelf -e ./bitcoin`
-
- The output should contain:
- STK/REL/PTL
- RW- R-- RW-
-
- The STK RW- means that the stack is readable and writeable but not executable.
-
Disable-wallet mode
--------------------
When the intention is to only run a P2P node, without a wallet, Bitcoin Core can
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 4344ed259e..804f796abe 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -20,7 +20,7 @@ You can find installation instructions in the `build-*.md` file for your platfor
| [Boost](../depends/packages/boost.mk) | [link](https://www.boost.org/users/download/) | [1.81.0](https://github.com/bitcoin/bitcoin/pull/26557) | [1.64.0](https://github.com/bitcoin/bitcoin/pull/22320) | No |
| [libevent](../depends/packages/libevent.mk) | [link](https://github.com/libevent/libevent/releases) | [2.1.12-stable](https://github.com/bitcoin/bitcoin/pull/21991) | [2.1.8](https://github.com/bitcoin/bitcoin/pull/24681) | No |
| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.27](https://github.com/bitcoin/bitcoin/pull/27029) | Yes |
-| Linux Kernel | [link](https://www.kernel.org/) | N/A | 3.2.0 | Yes |
+| Linux Kernel | [link](https://www.kernel.org/) | N/A | [3.17.0](https://github.com/bitcoin/bitcoin/pull/27699) | Yes |
## Optional
diff --git a/doc/release-notes-27302.md b/doc/release-notes-27302.md
new file mode 100644
index 0000000000..e67a6c8b06
--- /dev/null
+++ b/doc/release-notes-27302.md
@@ -0,0 +1,4 @@
+Configuration
+---
+
+- `bitcoind` and `bitcoin-qt` will now raise an error on startup if a datadir that is being used contains a bitcoin.conf file that will be ignored, which can happen when a datadir= line is used in a bitcoin.conf file. The error message is just a diagnostic intended to prevent accidental misconfiguration, and it can be disabled to restore the previous behavior of using the datadir while ignoring the bitcoin.conf contained in it.
diff --git a/doc/release-notes/release-notes-25.0.md b/doc/release-notes/release-notes-25.0.md
new file mode 100644
index 0000000000..919cb3b2f3
--- /dev/null
+++ b/doc/release-notes/release-notes-25.0.md
@@ -0,0 +1,340 @@
+25.0 Release Notes
+==================
+
+Bitcoin Core version 25.0 is now available from:
+
+ <https://bitcoincore.org/bin/bitcoin-core-25.0/>
+
+This release includes new features, various bug fixes and performance
+improvements, as well as updated translations.
+
+Please report bugs using the issue tracker at GitHub:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+How to Upgrade
+==============
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes in some cases), then run the
+installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on macOS)
+or `bitcoind`/`bitcoin-qt` (on Linux).
+
+Upgrading directly from a version of Bitcoin Core that has reached its EOL is
+possible, but it might take some time if the data directory needs to be migrated. Old
+wallet versions of Bitcoin Core are generally supported.
+
+Compatibility
+==============
+
+Bitcoin Core is supported and extensively tested on operating systems
+using the Linux kernel, macOS 10.15+, and Windows 7 and newer. Bitcoin
+Core should also work on most other Unix-like systems but is not as
+frequently tested on them. It is not recommended to use Bitcoin Core on
+unsupported systems.
+
+Notable changes
+===============
+
+P2P and network changes
+-----------------------
+
+- Transactions of non-witness size 65 bytes and above are now allowed by mempool
+ and relay policy. This is to better reflect the actual afforded protections
+ against CVE-2017-12842 and open up additional use-cases of smaller transaction sizes. (#26265)
+
+New RPCs
+--------
+
+- The scanblocks RPC returns the relevant blockhashes from a set of descriptors by
+ scanning all blockfilters in the given range. It can be used in combination with
+ the getblockheader and rescanblockchain RPCs to achieve fast wallet rescans. Note
+ that this functionality can only be used if a compact block filter index
+ (-blockfilterindex=1) has been constructed by the node. (#23549)
+
+Updated RPCs
+------------
+
+- All JSON-RPC methods accept a new [named
+ parameter](https://github.com/bitcoin/bitcoin/blob/master/doc/JSON-RPC-interface.md#parameter-passing) called `args` that can
+ contain positional parameter values. This is a convenience to allow some
+ parameter values to be passed by name without having to name every value. The
+ python test framework and `bitcoin-cli` tool both take advantage of this, so
+ for example:
+
+```sh
+bitcoin-cli -named createwallet wallet_name=mywallet load_on_startup=1
+```
+
+Can now be shortened to:
+
+```sh
+bitcoin-cli -named createwallet mywallet load_on_startup=1
+```
+
+- The `verifychain` RPC will now return `false` if the checks didn't fail,
+ but couldn't be completed at the desired depth and level. This could be due
+ to missing data while pruning, due to an insufficient dbcache or due to
+ the node being shutdown before the call could finish. (#25574)
+
+- `sendrawtransaction` has a new, optional argument, `maxburnamount` with a default value of `0`.
+ Any transaction containing an unspendable output with a value greater than `maxburnamount` will
+ not be submitted. At present, the outputs deemed unspendable are those with scripts that begin
+ with an `OP_RETURN` code (known as 'datacarriers'), scripts that exceed the maximum script size,
+ and scripts that contain invalid opcodes.
+
+- The `testmempoolaccept` RPC now returns 2 additional results within the "fees" result:
+ "effective-feerate" is the feerate including fees and sizes of transactions validated together if
+ package validation was used, and also includes any modified fees from prioritisetransaction. The
+ "effective-includes" result lists the wtxids of transactions whose modified fees and sizes were used
+ in the effective-feerate (#26646).
+
+- `decodescript` may now infer a Miniscript descriptor under P2WSH context if it is not lacking
+ information. (#27037)
+
+- `finalizepsbt` is now able to finalize a transaction with inputs spending Miniscript-compatible
+ P2WSH scripts. (#24149)
+
+Changes to wallet related RPCs can be found in the Wallet section below.
+
+Build System
+------------
+
+- The `--enable-upnp-default` and `--enable-natpmp-default` options
+ have been removed. If you want to use port mapping, you can
+ configure it using a .conf file, or by passing the relevant
+ options at runtime. (#26896)
+
+Updated settings
+----------------
+
+- If the `-checkblocks` or `-checklevel` options are explicitly provided by the
+user, but the verification checks cannot be completed due to an insufficient
+dbcache, Bitcoin Core will now return an error at startup. (#25574)
+
+- Ports specified in `-port` and `-rpcport` options are now validated at startup.
+ Values that previously worked and were considered valid can now result in errors. (#22087)
+
+- Setting `-blocksonly` will now reduce the maximum mempool memory
+ to 5MB (users may still use `-maxmempool` to override). Previously,
+ the default 300MB would be used, leading to unexpected memory usage
+ for users running with `-blocksonly` expecting it to eliminate
+ mempool memory usage.
+
+ As unused mempool memory is shared with dbcache, this also reduces
+ the dbcache size for users running with `-blocksonly`, potentially
+ impacting performance.
+- Setting `-maxconnections=0` will now disable `-dnsseed`
+ and `-listen` (users may still set them to override).
+
+Changes to GUI or wallet related settings can be found in the GUI or Wallet section below.
+
+New settings
+------------
+
+- The `shutdownnotify` option is used to specify a command to execute synchronously
+before Bitcoin Core has begun its shutdown sequence. (#23395)
+
+
+Wallet
+------
+
+- The `minconf` option, which allows a user to specify the minimum number
+of confirmations a UTXO being spent has, and the `maxconf` option,
+which allows specifying the maximum number of confirmations, have been
+added to the following RPCs in #25375:
+ - `fundrawtransaction`
+ - `send`
+ - `walletcreatefundedpsbt`
+ - `sendall`
+
+- Added a new `next_index` field in the response in `listdescriptors` to
+ have the same format as `importdescriptors` (#26194)
+
+- RPC `listunspent` now has a new argument `include_immature_coinbase`
+ to include coinbase UTXOs that don't meet the minimum spendability
+ depth requirement (which before were silently skipped). (#25730)
+
+- Rescans for descriptor wallets are now significantly faster if compact
+ block filters (BIP158) are available. Since those are not constructed
+ by default, the configuration option "-blockfilterindex=1" has to be
+ provided to take advantage of the optimization. This improves the
+ performance of the RPC calls `rescanblockchain`, `importdescriptors`
+ and `restorewallet`. (#25957)
+
+- RPC `unloadwallet` now fails if a rescan is in progress. (#26618)
+
+- Wallet passphrases may now contain null characters.
+ Prior to this change, only characters up to the first
+ null character were recognized and accepted. (#27068)
+
+- Address Purposes strings are now restricted to the currently known values of "send",
+ "receive", and "refund". Wallets that have unrecognized purpose strings will have
+ loading warnings, and the `listlabels` RPC will raise an error if an unrecognized purpose
+ is requested. (#27217)
+
+- In the `createwallet`, `loadwallet`, `unloadwallet`, and `restorewallet` RPCs, the
+ "warning" string field is deprecated in favor of a "warnings" field that
+ returns a JSON array of strings to better handle multiple warning messages and
+ for consistency with other wallet RPCs. The "warning" field will be fully
+ removed from these RPCs in v26. It can be temporarily re-enabled during the
+ deprecation period by launching bitcoind with the configuration option
+ `-deprecatedrpc=walletwarningfield`. (#27279)
+
+- Descriptor wallets can now spend coins sent to P2WSH Miniscript descriptors. (#24149)
+
+GUI changes
+-----------
+
+- The "Mask values" is a persistent option now. (gui#701)
+- The "Mask values" option affects the "Transaction" view now, in addition to the
+ "Overview" one. (gui#708)
+
+REST
+----
+
+- A new `/rest/deploymentinfo` endpoint has been added for fetching various
+ state info regarding deployments of consensus changes. (#25412)
+
+Binary verification
+----
+
+- The binary verification script has been updated. In previous releases it
+ would verify that the binaries had been signed with a single "release key".
+ In this release and moving forward it will verify that the binaries are
+ signed by a _threshold of trusted keys_. For more details and
+ examples, see:
+ https://github.com/bitcoin/bitcoin/blob/master/contrib/verify-binaries/README.md
+ (#27358)
+
+Low-level changes
+=================
+
+RPC
+---
+
+- The JSON-RPC server now rejects requests where a parameter is specified multiple
+ times with the same name, instead of silently overwriting earlier parameter values
+ with later ones. (#26628)
+- RPC `listsinceblock` now accepts an optional `label` argument
+ to fetch incoming transactions having the specified label. (#25934)
+- Previously `setban`, `addpeeraddress`, `walletcreatefundedpsbt`, methods
+ allowed non-boolean and non-null values to be passed as boolean parameters.
+ Any string, number, array, or object value that was passed would be treated
+ as false. After this change, passing any value except `true`, `false`, or
+ `null` now triggers a JSON value is not of expected type error. (#26213)
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+
+- 0xb10c
+- 721217.xyz
+- @RandyMcMillan
+- amadeuszpawlik
+- Amiti Uttarwar
+- Andrew Chow
+- Andrew Toth
+- Anthony Towns
+- Antoine Poinsot
+- Aurèle Oulès
+- Ben Woosley
+- Bitcoin Hodler
+- brunoerg
+- Bushstar
+- Carl Dong
+- Chris Geihsler
+- Cory Fields
+- David Gumberg
+- dergoegge
+- Dhruv Mehta
+- Dimitris Tsapakidis
+- dougEfish
+- Douglas Chimento
+- ekzyis
+- Elichai Turkel
+- Ethan Heilman
+- Fabian Jahr
+- FractalEncrypt
+- furszy
+- Gleb Naumenko
+- glozow
+- Greg Sanders
+- Hennadii Stepanov
+- hernanmarino
+- ishaanam
+- ismaelsadeeq
+- James O'Beirne
+- jdjkelly@gmail.com
+- Jeff Ruane
+- Jeffrey Czyz
+- Jeremy Rubin
+- Jesse Barton
+- João Barbosa
+- JoaoAJMatos
+- John Moffett
+- Jon Atack
+- Jonas Schnelli
+- jonatack
+- Joshua Kelly
+- josibake
+- Juan Pablo Civile
+- kdmukai
+- klementtan
+- Kolby ML
+- kouloumos
+- Kristaps Kaupe
+- laanwj
+- Larry Ruane
+- Leonardo Araujo
+- Leonardo Lazzaro
+- Luke Dashjr
+- MacroFake
+- MarcoFalke
+- Martin Leitner-Ankerl
+- Martin Zumsande
+- Matt Whitlock
+- Matthew Zipkin
+- Michael Ford
+- Miles Liu
+- mruddy
+- Murray Nesbitt
+- muxator
+- omahs
+- pablomartin4btc
+- Pasta
+- Pieter Wuille
+- Pttn
+- Randall Naar
+- Riahiamirreza
+- roconnor-blockstream
+- Russell O'Connor
+- Ryan Ofsky
+- S3RK
+- Sebastian Falbesoner
+- Seibart Nedor
+- sinetek
+- Sjors Provoost
+- Skuli Dulfari
+- SomberNight
+- Stacie Waleyko
+- stickies-v
+- stratospher
+- Suhas Daftuar
+- Suriyaa Sundararuban
+- TheCharlatan
+- Vasil Dimov
+- Vasil Stoyanov
+- virtu
+- w0xlt
+- willcl-ark
+- yancy
+- Yusuf Sahin HAMZA
+
+As well as to everyone that helped with translations on
+[Transifex](https://www.transifex.com/bitcoin/bitcoin/). \ No newline at end of file
diff --git a/doc/release-process.md b/doc/release-process.md
index cf014c940b..930110922c 100644
--- a/doc/release-process.md
+++ b/doc/release-process.md
@@ -284,7 +284,7 @@ cat "$VERSION"/*/all.SHA256SUMS.asc > SHA256SUMS.asc
- Push the flatpak to flathub, e.g. https://github.com/flathub/org.bitcoincore.bitcoin-qt/pull/2
- - Push the snap, see https://github.com/bitcoin-core/packaging/blob/master/snap/build.md
+ - Push the snap, see https://github.com/bitcoin-core/packaging/blob/main/snap/local/build.md
- This repo
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index b679ad0602..fcb0f0719b 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -183,10 +183,10 @@ void ReadFromStream(AddrMan& addr, CDataStream& ssPeers)
DeserializeDB(ssPeers, addr, false);
}
-std::optional<bilingual_str> LoadAddrman(const NetGroupManager& netgroupman, const ArgsManager& args, std::unique_ptr<AddrMan>& addrman)
+util::Result<std::unique_ptr<AddrMan>> LoadAddrman(const NetGroupManager& netgroupman, const ArgsManager& args)
{
auto check_addrman = std::clamp<int32_t>(args.GetIntArg("-checkaddrman", DEFAULT_ADDRMAN_CONSISTENCY_CHECKS), 0, 1000000);
- addrman = std::make_unique<AddrMan>(netgroupman, /*deterministic=*/false, /*consistency_check_ratio=*/check_addrman);
+ auto addrman{std::make_unique<AddrMan>(netgroupman, /*deterministic=*/false, /*consistency_check_ratio=*/check_addrman)};
const auto start{SteadyClock::now()};
const auto path_addr{args.GetDataDirNet() / "peers.dat"};
@@ -200,19 +200,17 @@ std::optional<bilingual_str> LoadAddrman(const NetGroupManager& netgroupman, con
DumpPeerAddresses(args, *addrman);
} catch (const InvalidAddrManVersionError&) {
if (!RenameOver(path_addr, (fs::path)path_addr + ".bak")) {
- addrman = nullptr;
- return strprintf(_("Failed to rename invalid peers.dat file. Please move or delete it and try again."));
+ return util::Error{strprintf(_("Failed to rename invalid peers.dat file. Please move or delete it and try again."))};
}
// Addrman can be in an inconsistent state after failure, reset it
addrman = std::make_unique<AddrMan>(netgroupman, /*deterministic=*/false, /*consistency_check_ratio=*/check_addrman);
LogPrintf("Creating new peers.dat because the file version was not compatible (%s). Original backed up to peers.dat.bak\n", fs::quoted(fs::PathToString(path_addr)));
DumpPeerAddresses(args, *addrman);
} catch (const std::exception& e) {
- addrman = nullptr;
- return strprintf(_("Invalid or corrupt peers.dat (%s). If you believe this is a bug, please report it to %s. As a workaround, you can move the file (%s) out of the way (rename, move, or delete) to have a new one created on the next start."),
- e.what(), PACKAGE_BUGREPORT, fs::quoted(fs::PathToString(path_addr)));
+ return util::Error{strprintf(_("Invalid or corrupt peers.dat (%s). If you believe this is a bug, please report it to %s. As a workaround, you can move the file (%s) out of the way (rename, move, or delete) to have a new one created on the next start."),
+ e.what(), PACKAGE_BUGREPORT, fs::quoted(fs::PathToString(path_addr)))};
}
- return std::nullopt;
+ return std::move(addrman); // std::move should be unneccessary but is temporarily needed to work around clang bug (https://github.com/bitcoin/bitcoin/pull/25977#issuecomment-1561270092)
}
void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors)
diff --git a/src/addrdb.h b/src/addrdb.h
index 08d86d0f01..3da630fff8 100644
--- a/src/addrdb.h
+++ b/src/addrdb.h
@@ -9,6 +9,7 @@
#include <net_types.h> // For banmap_t
#include <univalue.h>
#include <util/fs.h>
+#include <util/result.h>
#include <optional>
#include <vector>
@@ -49,7 +50,7 @@ public:
};
/** Returns an error string on failure */
-std::optional<bilingual_str> LoadAddrman(const NetGroupManager& netgroupman, const ArgsManager& args, std::unique_ptr<AddrMan>& addrman);
+util::Result<std::unique_ptr<AddrMan>> LoadAddrman(const NetGroupManager& netgroupman, const ArgsManager& args);
/**
* Dump the anchor IP address database (anchors.dat)
diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp
index 2c77b9b22b..6cf71f4b83 100644
--- a/src/bench/wallet_balance.cpp
+++ b/src/bench/wallet_balance.cpp
@@ -4,6 +4,7 @@
#include <bench/bench.h>
#include <interfaces/chain.h>
+#include <node/chainstate.h>
#include <node/context.h>
#include <test/util/mining.h>
#include <test/util/setup_common.h>
@@ -28,6 +29,9 @@ static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const b
const auto& ADDRESS_WATCHONLY = ADDRESS_BCRT1_UNSPENDABLE;
+ // Set clock to genesis block, so the descriptors/keys creation time don't interfere with the blocks scanning process.
+ // The reason is 'generatetoaddress', which creates a chain with deterministic timestamps in the past.
+ SetMockTime(test_setup->m_node.chainman->GetParams().GenesisBlock().nTime);
CWallet wallet{test_setup->m_node.chain.get(), "", CreateMockableWalletDatabase()};
{
LOCK(wallet.cs_wallet);
diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp
index 6ee1ff4594..5678d4a526 100644
--- a/src/bitcoin-chainstate.cpp
+++ b/src/bitcoin-chainstate.cpp
@@ -64,7 +64,7 @@ int main(int argc, char* argv[])
// We can't use a goto here, but we can use an assert since none of the
// things instantiated so far requires running the epilogue to be torn down
// properly
- assert(!kernel::SanityChecks(kernel_context).has_value());
+ assert(kernel::SanityChecks(kernel_context));
// Necessary for CheckInputScripts (eventually called by ProcessNewBlock),
// which will try the script cache first and fall back to actually
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index 6f4453d1fe..539578085b 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -6,17 +6,20 @@
#include <chainparams.h>
#include <chainparamsbase.h>
-#include <chainparamsseeds.h>
#include <common/args.h>
-#include <consensus/merkle.h>
+#include <consensus/params.h>
#include <deploymentinfo.h>
-#include <hash.h> // for signet block challenge hash
#include <logging.h>
-#include <script/interpreter.h>
+#include <tinyformat.h>
#include <util/chaintype.h>
+#include <util/strencodings.h>
#include <util/string.h>
-#include <assert.h>
+#include <cassert>
+#include <cstdint>
+#include <limits>
+#include <stdexcept>
+#include <vector>
void ReadSigNetArgs(const ArgsManager& args, CChainParams::SigNetOptions& options)
{
@@ -26,9 +29,13 @@ void ReadSigNetArgs(const ArgsManager& args, CChainParams::SigNetOptions& option
if (args.IsArgSet("-signetchallenge")) {
const auto signet_challenge = args.GetArgs("-signetchallenge");
if (signet_challenge.size() != 1) {
- throw std::runtime_error(strprintf("%s: -signetchallenge cannot be multiple values.", __func__));
+ throw std::runtime_error("-signetchallenge cannot be multiple values.");
}
- options.challenge.emplace(ParseHex(signet_challenge[0]));
+ const auto val{TryParseHex<uint8_t>(signet_challenge[0])};
+ if (!val) {
+ throw std::runtime_error(strprintf("-signetchallenge must be hex, not '%s'.", signet_challenge[0]));
+ }
+ options.challenge.emplace(*val);
}
}
diff --git a/src/chainparams.h b/src/chainparams.h
index 1e8366dcf5..4743e022db 100644
--- a/src/chainparams.h
+++ b/src/chainparams.h
@@ -6,20 +6,9 @@
#ifndef BITCOIN_CHAINPARAMS_H
#define BITCOIN_CHAINPARAMS_H
-#include <kernel/chainparams.h>
+#include <kernel/chainparams.h> // IWYU pragma: export
-#include <consensus/params.h>
-#include <netaddress.h>
-#include <primitives/block.h>
-#include <protocol.h>
-#include <util/chaintype.h>
-#include <util/hash_type.h>
-
-#include <cstdint>
#include <memory>
-#include <string>
-#include <unordered_map>
-#include <vector>
class ArgsManager;
diff --git a/src/common/args.cpp b/src/common/args.cpp
index 93f2005931..c9af2d7f5e 100644
--- a/src/common/args.cpp
+++ b/src/common/args.cpp
@@ -716,7 +716,8 @@ bool CheckDataDirOption(const ArgsManager& args)
fs::path ArgsManager::GetConfigFilePath() const
{
- return GetConfigFile(*this, GetPathArg("-conf", BITCOIN_CONF_FILENAME));
+ LOCK(cs_args);
+ return *Assert(m_config_path);
}
ChainType ArgsManager::GetChainType() const
diff --git a/src/common/args.h b/src/common/args.h
index 537a64fcfd..7569297a74 100644
--- a/src/common/args.h
+++ b/src/common/args.h
@@ -28,7 +28,6 @@ extern const char * const BITCOIN_SETTINGS_FILENAME;
// Return true if -datadir option points to a valid directory or is not specified.
bool CheckDataDirOption(const ArgsManager& args);
-fs::path GetConfigFile(const ArgsManager& args, const fs::path& configuration_file_path);
/**
* Most paths passed as configuration arguments are treated as relative to
@@ -138,6 +137,7 @@ protected:
std::map<OptionsCategory, std::map<std::string, Arg>> m_available_args GUARDED_BY(cs_args);
bool m_accept_any_command GUARDED_BY(cs_args){true};
std::list<SectionInfo> m_config_sections GUARDED_BY(cs_args);
+ std::optional<fs::path> m_config_path GUARDED_BY(cs_args);
mutable fs::path m_cached_blocks_path GUARDED_BY(cs_args);
mutable fs::path m_cached_datadir_path GUARDED_BY(cs_args);
mutable fs::path m_cached_network_datadir_path GUARDED_BY(cs_args);
diff --git a/src/common/config.cpp b/src/common/config.cpp
index 5efb5efb67..e25b4fe2df 100644
--- a/src/common/config.cpp
+++ b/src/common/config.cpp
@@ -27,11 +27,6 @@
#include <utility>
#include <vector>
-fs::path GetConfigFile(const ArgsManager& args, const fs::path& configuration_file_path)
-{
- return AbsPathForConfigVal(args, configuration_file_path, /*net_specific=*/false);
-}
-
static bool GetConfigOptions(std::istream& stream, const std::string& filepath, std::string& error, std::vector<std::pair<std::string, std::string>>& options, std::list<SectionInfo>& sections)
{
std::string str, prefix;
@@ -126,6 +121,7 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
LOCK(cs_args);
m_settings.ro_config.clear();
m_config_sections.clear();
+ m_config_path = AbsPathForConfigVal(*this, GetPathArg("-conf", BITCOIN_CONF_FILENAME), /*net_specific=*/false);
}
const auto conf_path{GetConfigFilePath()};
@@ -176,7 +172,7 @@ bool ArgsManager::ReadConfigFiles(std::string& error, bool ignore_invalid_keys)
const size_t default_includes = add_includes({});
for (const std::string& conf_file_name : conf_file_names) {
- std::ifstream conf_file_stream{GetConfigFile(*this, fs::PathFromString(conf_file_name))};
+ std::ifstream conf_file_stream{AbsPathForConfigVal(*this, fs::PathFromString(conf_file_name), /*net_specific=*/false)};
if (conf_file_stream.good()) {
if (!ReadConfigStream(conf_file_stream, conf_file_name, error, ignore_invalid_keys)) {
return false;
diff --git a/src/common/init.cpp b/src/common/init.cpp
index 8933d59c27..412d73aec7 100644
--- a/src/common/init.cpp
+++ b/src/common/init.cpp
@@ -5,6 +5,7 @@
#include <chainparams.h>
#include <common/args.h>
#include <common/init.h>
+#include <logging.h>
#include <tinyformat.h>
#include <util/fs.h>
#include <util/translation.h>
@@ -20,6 +21,19 @@ std::optional<ConfigError> InitConfig(ArgsManager& args, SettingsAbortFn setting
if (!CheckDataDirOption(args)) {
return ConfigError{ConfigStatus::FAILED, strprintf(_("Specified data directory \"%s\" does not exist."), args.GetArg("-datadir", ""))};
}
+
+ // Record original datadir and config paths before parsing the config
+ // file. It is possible for the config file to contain a datadir= line
+ // that changes the datadir path after it is parsed. This is useful for
+ // CLI tools to let them use a different data storage location without
+ // needing to pass it every time on the command line. (It is not
+ // possible for the config file to cause another configuration to be
+ // used, though. Specifying a conf= option in the config file causes a
+ // parse error, and specifying a datadir= location containing another
+ // bitcoin.conf file just ignores the other file.)
+ const fs::path orig_datadir_path{args.GetDataDirBase()};
+ const fs::path orig_config_path{AbsPathForConfigVal(args, args.GetPathArg("-conf", BITCOIN_CONF_FILENAME), /*net_specific=*/false)};
+
std::string error;
if (!args.ReadConfigFiles(error, true)) {
return ConfigError{ConfigStatus::FAILED, strprintf(_("Error reading configuration file: %s"), error)};
@@ -48,6 +62,32 @@ std::optional<ConfigError> InitConfig(ArgsManager& args, SettingsAbortFn setting
fs::create_directories(net_path / "wallets");
}
+ // Show an error or warning if there is a bitcoin.conf file in the
+ // datadir that is being ignored.
+ const fs::path base_config_path = base_path / BITCOIN_CONF_FILENAME;
+ if (fs::exists(base_config_path) && !fs::equivalent(orig_config_path, base_config_path)) {
+ const std::string cli_config_path = args.GetArg("-conf", "");
+ const std::string config_source = cli_config_path.empty()
+ ? strprintf("data directory %s", fs::quoted(fs::PathToString(orig_datadir_path)))
+ : strprintf("command line argument %s", fs::quoted("-conf=" + cli_config_path));
+ const std::string error = strprintf(
+ "Data directory %1$s contains a %2$s file which is ignored, because a different configuration file "
+ "%3$s from %4$s is being used instead. Possible ways to address this would be to:\n"
+ "- Delete or rename the %2$s file in data directory %1$s.\n"
+ "- Change datadir= or conf= options to specify one configuration file, not two, and use "
+ "includeconf= to include any other configuration files.\n"
+ "- Set allowignoredconf=1 option to treat this condition as a warning, not an error.",
+ fs::quoted(fs::PathToString(base_path)),
+ fs::quoted(BITCOIN_CONF_FILENAME),
+ fs::quoted(fs::PathToString(orig_config_path)),
+ config_source);
+ if (args.GetBoolArg("-allowignoredconf", false)) {
+ LogPrintf("Warning: %s\n", error);
+ } else {
+ return ConfigError{ConfigStatus::FAILED, Untranslated(error)};
+ }
+ }
+
// Create settings.json if -nosettings was not specified.
if (args.GetSettingsPath()) {
std::vector<std::string> details;
diff --git a/src/init.cpp b/src/init.cpp
index bb14d3dbfd..d9e5e70ee3 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -446,6 +446,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
argsman.AddArg("-dbcache=<n>", strprintf("Maximum database cache size <n> MiB (%d to %d, default: %d). In addition, unused mempool memory is shared for this cache (see -maxmempool).", nMinDbCache, nMaxDbCache, nDefaultDbCache), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-includeconf=<file>", "Specify additional configuration file, relative to the -datadir path (only useable from configuration file, not command line)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-allowignoredconf", strprintf("For backwards compatibility, treat an unused %s file in the datadir as a warning, not an error.", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-loadblock=<file>", "Imports blocks from external file on startup", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-maxmempool=<n>", strprintf("Keep the transaction memory pool below <n> megabytes (default: %u)", DEFAULT_MAX_MEMPOOL_SIZE_MB), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-maxorphantx=<n>", strprintf("Keep at most <n> unconnectable transactions in memory (default: %u)", DEFAULT_MAX_ORPHAN_TRANSACTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
@@ -1027,15 +1028,17 @@ bool AppInitParameterInteraction(const ArgsManager& args, bool use_syscall_sandb
.datadir = args.GetDataDirNet(),
.notifications = notifications,
};
- if (const auto error{ApplyArgsManOptions(args, chainman_opts_dummy)}) {
- return InitError(*error);
+ auto chainman_result{ApplyArgsManOptions(args, chainman_opts_dummy)};
+ if (!chainman_result) {
+ return InitError(util::ErrorString(chainman_result));
}
BlockManager::Options blockman_opts_dummy{
.chainparams = chainman_opts_dummy.chainparams,
.blocks_dir = args.GetBlocksDirPath(),
};
- if (const auto error{ApplyArgsManOptions(args, blockman_opts_dummy)}) {
- return InitError(*error);
+ auto blockman_result{ApplyArgsManOptions(args, blockman_opts_dummy)};
+ if (!blockman_result) {
+ return InitError(util::ErrorString(blockman_result));
}
}
@@ -1058,8 +1061,9 @@ static bool LockDataDirectory(bool probeOnly)
bool AppInitSanityChecks(const kernel::Context& kernel)
{
// ********************************************************* Step 4: sanity checks
- if (auto error = kernel::SanityChecks(kernel)) {
- InitError(*error);
+ auto result{kernel::SanityChecks(kernel)};
+ if (!result) {
+ InitError(util::ErrorString(result));
return InitError(strprintf(_("Initialization sanity check failed. %s is shutting down."), PACKAGE_NAME));
}
@@ -1234,9 +1238,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// Initialize addrman
assert(!node.addrman);
uiInterface.InitMessage(_("Loading P2P addresses…").translated);
- if (const auto error{LoadAddrman(*node.netgroupman, args, node.addrman)}) {
- return InitError(*error);
- }
+ auto addrman{LoadAddrman(*node.netgroupman, args)};
+ if (!addrman) return InitError(util::ErrorString(addrman));
+ node.addrman = std::move(*addrman);
}
assert(!node.banman);
@@ -1440,13 +1444,13 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
.adjusted_time_callback = GetAdjustedTime,
.notifications = *node.notifications,
};
- Assert(!ApplyArgsManOptions(args, chainman_opts)); // no error can happen, already checked in AppInitParameterInteraction
+ Assert(ApplyArgsManOptions(args, chainman_opts)); // no error can happen, already checked in AppInitParameterInteraction
BlockManager::Options blockman_opts{
.chainparams = chainman_opts.chainparams,
.blocks_dir = args.GetBlocksDirPath(),
};
- Assert(!ApplyArgsManOptions(args, blockman_opts)); // no error can happen, already checked in AppInitParameterInteraction
+ Assert(ApplyArgsManOptions(args, blockman_opts)); // no error can happen, already checked in AppInitParameterInteraction
// cache size calculations
CacheSizes cache_sizes = CalculateCacheSizes(args, g_enabled_filter_types.size());
@@ -1469,8 +1473,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
.estimator = node.fee_estimator.get(),
.check_ratio = chainparams.DefaultConsistencyChecks() ? 1 : 0,
};
- if (const auto err{ApplyArgsManOptions(args, chainparams, mempool_opts)}) {
- return InitError(*err);
+ auto result{ApplyArgsManOptions(args, chainparams, mempool_opts)};
+ if (!result) {
+ return InitError(util::ErrorString(result));
}
mempool_opts.check_ratio = std::clamp<int>(mempool_opts.check_ratio, 0, 1'000'000);
@@ -1569,8 +1574,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// If reindex-chainstate was specified, delay syncing indexes until ThreadImport has reindexed the chain
if (!fReindexChainState) g_indexes_ready_to_sync = true;
if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
- if (const auto error{WITH_LOCK(cs_main, return CheckLegacyTxindex(*Assert(chainman.m_blockman.m_block_tree_db)))}) {
- return InitError(*error);
+ auto result{WITH_LOCK(cs_main, return CheckLegacyTxindex(*Assert(chainman.m_blockman.m_block_tree_db)))};
+ if (!result) {
+ return InitError(util::ErrorString(result));
}
g_txindex = std::make_unique<TxIndex>(interfaces::MakeChain(node), cache_sizes.tx_index, false, fReindex);
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index a3fa753a98..40bf0b680c 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -87,6 +87,9 @@ struct BlockInfo {
unsigned data_pos = 0;
const CBlock* data = nullptr;
const CBlockUndo* undo_data = nullptr;
+ // The maximum time in the chain up to and including this block.
+ // A timestamp that can only move forward.
+ unsigned int chain_time_max{0};
BlockInfo(const uint256& hash LIFETIMEBOUND) : hash(hash) {}
};
diff --git a/src/kernel/chain.cpp b/src/kernel/chain.cpp
index 82e77125d7..1c877866d0 100644
--- a/src/kernel/chain.cpp
+++ b/src/kernel/chain.cpp
@@ -16,6 +16,7 @@ interfaces::BlockInfo MakeBlockInfo(const CBlockIndex* index, const CBlock* data
if (index) {
info.prev_hash = index->pprev ? index->pprev->phashBlock : nullptr;
info.height = index->nHeight;
+ info.chain_time_max = index->GetBlockTimeMax();
LOCK(::cs_main);
info.file_number = index->nFile;
info.data_pos = index->nDataPos;
diff --git a/src/kernel/checks.cpp b/src/kernel/checks.cpp
index 4c303c172c..bf8a2ec74c 100644
--- a/src/kernel/checks.cpp
+++ b/src/kernel/checks.cpp
@@ -13,21 +13,21 @@
namespace kernel {
-std::optional<bilingual_str> SanityChecks(const Context&)
+util::Result<void> SanityChecks(const Context&)
{
if (!ECC_InitSanityCheck()) {
- return Untranslated("Elliptic curve cryptography sanity check failure. Aborting.");
+ return util::Error{Untranslated("Elliptic curve cryptography sanity check failure. Aborting.")};
}
if (!Random_SanityCheck()) {
- return Untranslated("OS cryptographic RNG sanity check failure. Aborting.");
+ return util::Error{Untranslated("OS cryptographic RNG sanity check failure. Aborting.")};
}
if (!ChronoSanityCheck()) {
- return Untranslated("Clock epoch mismatch. Aborting.");
+ return util::Error{Untranslated("Clock epoch mismatch. Aborting.")};
}
- return std::nullopt;
+ return {};
}
}
diff --git a/src/kernel/checks.h b/src/kernel/checks.h
index 3eb14824fb..8cff4e3be4 100644
--- a/src/kernel/checks.h
+++ b/src/kernel/checks.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_KERNEL_CHECKS_H
#define BITCOIN_KERNEL_CHECKS_H
-#include <optional>
+#include <util/result.h>
struct bilingual_str;
@@ -16,7 +16,7 @@ struct Context;
/**
* Ensure a usable environment with all necessary library support.
*/
-std::optional<bilingual_str> SanityChecks(const Context&);
+util::Result<void> SanityChecks(const Context&);
}
diff --git a/src/key_io.cpp b/src/key_io.cpp
index 4659a59544..454a96df5e 100644
--- a/src/key_io.cpp
+++ b/src/key_io.cpp
@@ -124,7 +124,11 @@ CTxDestination DecodeDestination(const std::string& str, const CChainParams& par
data.clear();
const auto dec = bech32::Decode(str);
- if ((dec.encoding == bech32::Encoding::BECH32 || dec.encoding == bech32::Encoding::BECH32M) && dec.data.size() > 0) {
+ if (dec.encoding == bech32::Encoding::BECH32 || dec.encoding == bech32::Encoding::BECH32M) {
+ if (dec.data.empty()) {
+ error_str = "Empty Bech32 data section";
+ return CNoDestination();
+ }
// Bech32 decoding
if (dec.hrp != params.Bech32HRP()) {
error_str = strprintf("Invalid or unsupported prefix for Segwit (Bech32) address (expected %s, got %s).", params.Bech32HRP(), dec.hrp);
@@ -142,6 +146,9 @@ CTxDestination DecodeDestination(const std::string& str, const CChainParams& par
// The rest of the symbols are converted witness program bytes.
data.reserve(((dec.data.size() - 1) * 5) / 8);
if (ConvertBits<5, 8, false>([&](unsigned char c) { data.push_back(c); }, dec.data.begin() + 1, dec.data.end())) {
+
+ std::string_view byte_str{data.size() == 1 ? "byte" : "bytes"};
+
if (version == 0) {
{
WitnessV0KeyHash keyid;
@@ -158,7 +165,7 @@ CTxDestination DecodeDestination(const std::string& str, const CChainParams& par
}
}
- error_str = "Invalid Bech32 v0 address data size";
+ error_str = strprintf("Invalid Bech32 v0 address program size (%d %s), per BIP141", data.size(), byte_str);
return CNoDestination();
}
@@ -175,7 +182,7 @@ CTxDestination DecodeDestination(const std::string& str, const CChainParams& par
}
if (data.size() < 2 || data.size() > BECH32_WITNESS_PROG_MAX_LEN) {
- error_str = "Invalid Bech32 address data size";
+ error_str = strprintf("Invalid Bech32 address program size (%d %s)", data.size(), byte_str);
return CNoDestination();
}
@@ -184,6 +191,9 @@ CTxDestination DecodeDestination(const std::string& str, const CChainParams& par
std::copy(data.begin(), data.end(), unk.program);
unk.length = data.size();
return unk;
+ } else {
+ error_str = strprintf("Invalid padding in Bech32 data section");
+ return CNoDestination();
}
}
diff --git a/src/net.h b/src/net.h
index 908b16f35e..83fe0427d4 100644
--- a/src/net.h
+++ b/src/net.h
@@ -200,7 +200,9 @@ public:
int nVersion;
std::string cleanSubVer;
bool fInbound;
+ // We requested high bandwidth connection to peer
bool m_bip152_highbandwidth_to;
+ // Peer requested high bandwidth connection
bool m_bip152_highbandwidth_from;
int m_starting_height;
uint64_t nSendBytes;
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 0e3f7435c8..e723d4657d 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -431,7 +431,6 @@ struct CNodeState {
std::list<QueuedBlock> vBlocksInFlight;
//! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty.
std::chrono::microseconds m_downloading_since{0us};
- int nBlocksInFlight{0};
//! Whether we consider this a preferred download peer.
bool fPreferredDownload{false};
/** Whether this peer wants invs or cmpctblocks (when possible) for block announcements. */
@@ -877,6 +876,9 @@ private:
/** Have we requested this block from a peer */
bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ /** Have we requested this block from an outbound peer */
+ bool IsBlockRequestedFromOutbound(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
/** Remove this block from our tracked requested blocks. Called if:
* - the block has been received from a peer
* - the request for the block has timed out
@@ -899,7 +901,9 @@ private:
*/
void FindNextBlocksToDownload(const Peer& peer, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main);
+ /* Multimap used to preserve insertion order */
+ typedef std::multimap<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator>> BlockDownloadMap;
+ BlockDownloadMap mapBlocksInFlight GUARDED_BY(cs_main);
/** When our tip was last updated. */
std::atomic<std::chrono::seconds> m_last_tip_update{0s};
@@ -1117,40 +1121,55 @@ std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::micros
bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
{
- return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
+ return mapBlocksInFlight.count(hash);
}
-void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
+bool PeerManagerImpl::IsBlockRequestedFromOutbound(const uint256& hash)
{
- auto it = mapBlocksInFlight.find(hash);
- if (it == mapBlocksInFlight.end()) {
- // Block was not requested
- return;
+ for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
+ auto [nodeid, block_it] = range.first->second;
+ CNodeState& nodestate = *Assert(State(nodeid));
+ if (!nodestate.m_is_inbound) return true;
}
- auto [node_id, list_it] = it->second;
+ return false;
+}
- if (from_peer && node_id != *from_peer) {
- // Block was requested by another peer
+void PeerManagerImpl::RemoveBlockRequest(const uint256& hash, std::optional<NodeId> from_peer)
+{
+ auto range = mapBlocksInFlight.equal_range(hash);
+ if (range.first == range.second) {
+ // Block was not requested from any peer
return;
}
- CNodeState *state = State(node_id);
- assert(state != nullptr);
+ // We should not have requested too many of this block
+ Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
- if (state->vBlocksInFlight.begin() == list_it) {
- // First block on the queue was received, update the start download time for the next one
- state->m_downloading_since = std::max(state->m_downloading_since, GetTime<std::chrono::microseconds>());
- }
- state->vBlocksInFlight.erase(list_it);
+ while (range.first != range.second) {
+ auto [node_id, list_it] = range.first->second;
+
+ if (from_peer && *from_peer != node_id) {
+ range.first++;
+ continue;
+ }
- state->nBlocksInFlight--;
- if (state->nBlocksInFlight == 0) {
- // Last validated block on the queue was received.
- m_peers_downloading_from--;
+ CNodeState& state = *Assert(State(node_id));
+
+ if (state.vBlocksInFlight.begin() == list_it) {
+ // First block on the queue was received, update the start download time for the next one
+ state.m_downloading_since = std::max(state.m_downloading_since, GetTime<std::chrono::microseconds>());
+ }
+ state.vBlocksInFlight.erase(list_it);
+
+ if (state.vBlocksInFlight.empty()) {
+ // Last validated block on the queue for this peer was received.
+ m_peers_downloading_from--;
+ }
+ state.m_stalling_since = 0us;
+
+ range.first = mapBlocksInFlight.erase(range.first);
}
- state->m_stalling_since = 0us;
- mapBlocksInFlight.erase(it);
}
bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit)
@@ -1160,27 +1179,29 @@ bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, st
CNodeState *state = State(nodeid);
assert(state != nullptr);
+ Assume(mapBlocksInFlight.count(hash) <= MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK);
+
// Short-circuit most stuff in case it is from the same node
- std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash);
- if (itInFlight != mapBlocksInFlight.end() && itInFlight->second.first == nodeid) {
- if (pit) {
- *pit = &itInFlight->second.second;
+ for (auto range = mapBlocksInFlight.equal_range(hash); range.first != range.second; range.first++) {
+ if (range.first->second.first == nodeid) {
+ if (pit) {
+ *pit = &range.first->second.second;
+ }
+ return false;
}
- return false;
}
- // Make sure it's not listed somewhere already.
- RemoveBlockRequest(hash, std::nullopt);
+ // Make sure it's not being fetched already from same peer.
+ RemoveBlockRequest(hash, nodeid);
std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(),
{&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)});
- state->nBlocksInFlight++;
- if (state->nBlocksInFlight == 1) {
+ if (state->vBlocksInFlight.size() == 1) {
// We're starting a block download (batch) from this peer.
state->m_downloading_since = GetTime<std::chrono::microseconds>();
m_peers_downloading_from++;
}
- itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first;
+ auto itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it)));
if (pit) {
*pit = &itInFlight->second.second;
}
@@ -1383,7 +1404,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(const Peer& peer, unsigned int co
}
} else if (waitingfor == -1) {
// This is the first already-in-flight block.
- waitingfor = mapBlocksInFlight[pindex->GetBlockHash()].first;
+ waitingfor = mapBlocksInFlight.lower_bound(pindex->GetBlockHash())->second.first;
}
}
}
@@ -1513,13 +1534,21 @@ void PeerManagerImpl::FinalizeNode(const CNode& node)
nSyncStarted--;
for (const QueuedBlock& entry : state->vBlocksInFlight) {
- mapBlocksInFlight.erase(entry.pindex->GetBlockHash());
+ auto range = mapBlocksInFlight.equal_range(entry.pindex->GetBlockHash());
+ while (range.first != range.second) {
+ auto [node_id, list_it] = range.first->second;
+ if (node_id != nodeid) {
+ range.first++;
+ } else {
+ range.first = mapBlocksInFlight.erase(range.first);
+ }
+ }
}
m_orphanage.EraseForPeer(nodeid);
m_txrequest.DisconnectedPeer(nodeid);
if (m_txreconciliation) m_txreconciliation->ForgetPeer(nodeid);
m_num_preferred_download_peers -= state->fPreferredDownload;
- m_peers_downloading_from -= (state->nBlocksInFlight != 0);
+ m_peers_downloading_from -= (!state->vBlocksInFlight.empty());
assert(m_peers_downloading_from >= 0);
m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect;
assert(m_outbound_peers_with_protect_from_disconnect >= 0);
@@ -1760,11 +1789,10 @@ std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBl
LOCK(cs_main);
- // Mark block as in-flight unless it already is (for this peer).
- // If the peer does not send us a block, vBlocksInFlight remains non-empty,
- // causing us to timeout and disconnect.
- // If a block was already in-flight for a different peer, its BLOCKTXN
- // response will be dropped.
+ // Forget about all prior requests
+ RemoveBlockRequest(block_index.GetBlockHash(), std::nullopt);
+
+ // Mark block as in-flight
if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
// Construct message to request the block
@@ -2680,7 +2708,7 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c
std::vector<CInv> vGetData;
// Download as much as possible, from earliest to latest.
for (const CBlockIndex *pindex : reverse_iterate(vToFetch)) {
- if (nodestate->nBlocksInFlight >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (nodestate->vBlocksInFlight.size() >= MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
// Can't download any more from this peer
break;
}
@@ -4274,15 +4302,27 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
nodestate->m_last_block_announcement = GetTime();
}
- std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator blockInFlightIt = mapBlocksInFlight.find(pindex->GetBlockHash());
- bool fAlreadyInFlight = blockInFlightIt != mapBlocksInFlight.end();
-
if (pindex->nStatus & BLOCK_HAVE_DATA) // Nothing to do here
return;
+ auto range_flight = mapBlocksInFlight.equal_range(pindex->GetBlockHash());
+ size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
+ bool requested_block_from_this_peer{false};
+
+ // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
+ bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
+
+ while (range_flight.first != range_flight.second) {
+ if (range_flight.first->second.first == pfrom.GetId()) {
+ requested_block_from_this_peer = true;
+ break;
+ }
+ range_flight.first++;
+ }
+
if (pindex->nChainWork <= m_chainman.ActiveChain().Tip()->nChainWork || // We know something better
pindex->nTx != 0) { // We had this block at some point, but pruned it
- if (fAlreadyInFlight) {
+ if (requested_block_from_this_peer) {
// We requested this block for some reason, but our mempool will probably be useless
// so we just grab the block via normal getdata
std::vector<CInv> vInv(1);
@@ -4293,15 +4333,15 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
// If we're not close to tip yet, give up and let parallel block fetch work its magic
- if (!fAlreadyInFlight && !CanDirectFetch()) {
+ if (!already_in_flight && !CanDirectFetch()) {
return;
}
// We want to be a bit conservative just to be extra careful about DoS
// possibilities in compact block processing...
if (pindex->nHeight <= m_chainman.ActiveChain().Height() + 2) {
- if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
- (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) {
+ if ((already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK && nodestate->vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) ||
+ requested_block_from_this_peer) {
std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr;
if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) {
if (!(*queuedBlockIt)->partialBlock)
@@ -4320,10 +4360,15 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
Misbehaving(*peer, 100, "invalid compact block");
return;
} else if (status == READ_STATUS_FAILED) {
- // Duplicate txindexes, the block is now in-flight, so just request it
- std::vector<CInv> vInv(1);
- vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ if (first_in_flight) {
+ // Duplicate txindexes, the block is now in-flight, so just request it
+ std::vector<CInv> vInv(1);
+ vInv[0] = CInv(MSG_BLOCK | GetFetchFlags(*peer), blockhash);
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, vInv));
+ } else {
+ // Give up for this peer and wait for other peer(s)
+ RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
+ }
return;
}
@@ -4338,9 +4383,24 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
txn.blockhash = blockhash;
blockTxnMsg << txn;
fProcessBLOCKTXN = true;
- } else {
+ } else if (first_in_flight) {
+ // We will try to round-trip any compact blocks we get on failure,
+ // as long as it's first...
req.blockhash = pindex->GetBlockHash();
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
+ } else if (pfrom.m_bip152_highbandwidth_to &&
+ (!pfrom.IsInboundConn() ||
+ IsBlockRequestedFromOutbound(blockhash) ||
+ already_in_flight < MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK - 1)) {
+ // ... or it's a hb relay peer and:
+ // - peer is outbound, or
+ // - we already have an outbound attempt in flight(so we'll take what we can get), or
+ // - it's not the final parallel download slot (which we may reserve for first outbound)
+ req.blockhash = pindex->GetBlockHash();
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETBLOCKTXN, req));
+ } else {
+ // Give up for this peer and wait for other peer(s)
+ RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId());
}
} else {
// This block is either already in flight from a different
@@ -4361,7 +4421,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
}
} else {
- if (fAlreadyInFlight) {
+ if (requested_block_from_this_peer) {
// We requested this block, but its far into the future, so our
// mempool will probably be useless - request the block normally
std::vector<CInv> vInv(1);
@@ -4433,24 +4493,44 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
{
LOCK(cs_main);
- std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator it = mapBlocksInFlight.find(resp.blockhash);
- if (it == mapBlocksInFlight.end() || !it->second.second->partialBlock ||
- it->second.first != pfrom.GetId()) {
+ auto range_flight = mapBlocksInFlight.equal_range(resp.blockhash);
+ size_t already_in_flight = std::distance(range_flight.first, range_flight.second);
+ bool requested_block_from_this_peer{false};
+
+ // Multimap ensures ordering of outstanding requests. It's either empty or first in line.
+ bool first_in_flight = already_in_flight == 0 || (range_flight.first->second.first == pfrom.GetId());
+
+ while (range_flight.first != range_flight.second) {
+ auto [node_id, block_it] = range_flight.first->second;
+ if (node_id == pfrom.GetId() && block_it->partialBlock) {
+ requested_block_from_this_peer = true;
+ break;
+ }
+ range_flight.first++;
+ }
+
+ if (!requested_block_from_this_peer) {
LogPrint(BCLog::NET, "Peer %d sent us block transactions for block we weren't expecting\n", pfrom.GetId());
return;
}
- PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock;
+ PartiallyDownloadedBlock& partialBlock = *range_flight.first->second.second->partialBlock;
ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn);
if (status == READ_STATUS_INVALID) {
RemoveBlockRequest(resp.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
Misbehaving(*peer, 100, "invalid compact block/non-matching block transactions");
return;
} else if (status == READ_STATUS_FAILED) {
- // Might have collided, fall back to getdata now :(
- std::vector<CInv> invs;
- invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(*peer), resp.blockhash));
- m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
+ if (first_in_flight) {
+ // Might have collided, fall back to getdata now :(
+ std::vector<CInv> invs;
+ invs.push_back(CInv(MSG_BLOCK | GetFetchFlags(*peer), resp.blockhash));
+ m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETDATA, invs));
+ } else {
+ RemoveBlockRequest(resp.blockhash, pfrom.GetId());
+ LogPrint(BCLog::NET, "Peer %d sent us a compact block but it failed to reconstruct, waiting on first download to complete\n", pfrom.GetId());
+ return;
+ }
} else {
// Block is either okay, or possibly we received
// READ_STATUS_CHECKBLOCK_FAILED.
@@ -5043,14 +5123,14 @@ void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
// valid headers chain with at least as much work as our tip.
CNodeState *node_state = State(pnode->GetId());
if (node_state == nullptr ||
- (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->nBlocksInFlight == 0)) {
+ (now - pnode->m_connected >= MINIMUM_CONNECT_TIME && node_state->vBlocksInFlight.empty())) {
pnode->fDisconnect = true;
LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n",
pnode->GetId(), count_seconds(pnode->m_last_block_time));
return true;
} else {
LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
- pnode->GetId(), count_seconds(pnode->m_connected), node_state->nBlocksInFlight);
+ pnode->GetId(), count_seconds(pnode->m_connected), node_state->vBlocksInFlight.size());
}
return false;
});
@@ -5090,13 +5170,13 @@ void PeerManagerImpl::EvictExtraOutboundPeers(std::chrono::seconds now)
// Also don't disconnect any peer we're trying to download a
// block from.
CNodeState &state = *State(pnode->GetId());
- if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.nBlocksInFlight == 0) {
+ if (now - pnode->m_connected > MINIMUM_CONNECT_TIME && state.vBlocksInFlight.empty()) {
LogPrint(BCLog::NET, "disconnecting extra outbound peer=%d (last block announcement received at time %d)\n", pnode->GetId(), oldest_block_announcement);
pnode->fDisconnect = true;
return true;
} else {
LogPrint(BCLog::NET, "keeping outbound peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
- pnode->GetId(), count_seconds(pnode->m_connected), state.nBlocksInFlight);
+ pnode->GetId(), count_seconds(pnode->m_connected), state.vBlocksInFlight.size());
return false;
}
});
@@ -5751,7 +5831,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Stalling only triggers when the block download window cannot move. During normal steady state,
// the download window should be much larger than the to-be-downloaded set of blocks, so disconnection
// should only happen during initial block download.
- LogPrintf("Peer=%d is stalling block download, disconnecting\n", pto->GetId());
+ LogPrintf("Peer=%d%s is stalling block download, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
pto->fDisconnect = true;
// Increase timeout for the next peer so that we don't disconnect multiple peers if our own
// bandwidth is insufficient.
@@ -5770,7 +5850,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
QueuedBlock &queuedBlock = state.vBlocksInFlight.front();
int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1;
if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) {
- LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId());
+ LogPrintf("Timeout downloading block %s from peer=%d%s, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
pto->fDisconnect = true;
return true;
}
@@ -5786,11 +5866,11 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// disconnect our sync peer for stalling; we have bigger
// problems if we can't get any outbound peers.
if (!pto->HasPermission(NetPermissionFlags::NoBan)) {
- LogPrintf("Timeout downloading headers from peer=%d, disconnecting\n", pto->GetId());
+ LogPrintf("Timeout downloading headers from peer=%d%s, disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
pto->fDisconnect = true;
return true;
} else {
- LogPrintf("Timeout downloading headers from noban peer=%d, not disconnecting\n", pto->GetId());
+ LogPrintf("Timeout downloading headers from noban peer=%d%s, not disconnecting\n", pto->GetId(), fLogIPs ? strprintf(" peeraddr=%s", pto->addr.ToStringAddrPort()) : "");
// Reset the headers sync state so that we have a
// chance to try downloading from a different peer.
// Note: this will also result in at least one more
@@ -5816,10 +5896,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Message: getdata (blocks)
//
std::vector<CInv> vGetData;
- if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.ActiveChainstate().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
+ if (CanServeBlocks(*peer) && ((sync_blocks_and_headers_from_peer && !IsLimitedPeer(*peer)) || !m_chainman.ActiveChainstate().IsInitialBlockDownload()) && state.vBlocksInFlight.size() < MAX_BLOCKS_IN_TRANSIT_PER_PEER) {
std::vector<const CBlockIndex*> vToDownload;
NodeId staller = -1;
- FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller);
+ FindNextBlocksToDownload(*peer, MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.vBlocksInFlight.size(), vToDownload, staller);
for (const CBlockIndex *pindex : vToDownload) {
uint32_t nFetchFlags = GetFetchFlags(*peer);
vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash()));
@@ -5827,7 +5907,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(),
pindex->nHeight, pto->GetId());
}
- if (state.nBlocksInFlight == 0 && staller != -1) {
+ if (state.vBlocksInFlight.empty() && staller != -1) {
if (State(staller)->m_stalling_since == 0us) {
State(staller)->m_stalling_since = current_time;
LogPrint(BCLog::NET, "Stall started peer=%d\n", staller);
diff --git a/src/net_processing.h b/src/net_processing.h
index af9a02139b..deebb24c94 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -22,6 +22,8 @@ static const bool DEFAULT_PEERBLOOMFILTERS = false;
static const bool DEFAULT_PEERBLOCKFILTERS = false;
/** Threshold for marking a node to be discouraged, e.g. disconnected and added to the discouragement filter. */
static const int DISCOURAGEMENT_THRESHOLD{100};
+/** Maximum number of outstanding CMPCTBLOCK requests for the same block. */
+static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK = 3;
struct CNodeStateStats {
int nSyncHeight = -1;
diff --git a/src/node/blockmanager_args.cpp b/src/node/blockmanager_args.cpp
index 23b0bd37ab..4b296db1b0 100644
--- a/src/node/blockmanager_args.cpp
+++ b/src/node/blockmanager_args.cpp
@@ -7,26 +7,26 @@
#include <common/args.h>
#include <node/blockstorage.h>
#include <tinyformat.h>
+#include <util/result.h>
#include <util/translation.h>
#include <validation.h>
#include <cstdint>
-#include <optional>
namespace node {
-std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& args, BlockManager::Options& opts)
+util::Result<void> ApplyArgsManOptions(const ArgsManager& args, BlockManager::Options& opts)
{
// block pruning; get the amount of disk space (in MiB) to allot for block & undo files
int64_t nPruneArg{args.GetIntArg("-prune", opts.prune_target)};
if (nPruneArg < 0) {
- return _("Prune cannot be configured with a negative value.");
+ return util::Error{_("Prune cannot be configured with a negative value.")};
}
uint64_t nPruneTarget{uint64_t(nPruneArg) * 1024 * 1024};
if (nPruneArg == 1) { // manual pruning: -prune=1
nPruneTarget = BlockManager::PRUNE_TARGET_MANUAL;
} else if (nPruneTarget) {
if (nPruneTarget < MIN_DISK_SPACE_FOR_BLOCK_FILES) {
- return strprintf(_("Prune configured below the minimum of %d MiB. Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024);
+ return util::Error{strprintf(_("Prune configured below the minimum of %d MiB. Please use a higher number."), MIN_DISK_SPACE_FOR_BLOCK_FILES / 1024 / 1024)};
}
}
opts.prune_target = nPruneTarget;
@@ -34,6 +34,6 @@ std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& args, BlockM
if (auto value{args.GetBoolArg("-fastprune")}) opts.fast_prune = *value;
if (auto value{args.GetBoolArg("-stopafterblockimport")}) opts.stop_after_block_import = *value;
- return std::nullopt;
+ return {};
}
} // namespace node
diff --git a/src/node/blockmanager_args.h b/src/node/blockmanager_args.h
index e657c6bb45..84540b1922 100644
--- a/src/node/blockmanager_args.h
+++ b/src/node/blockmanager_args.h
@@ -7,14 +7,12 @@
#define BITCOIN_NODE_BLOCKMANAGER_ARGS_H
#include <node/blockstorage.h>
-
-#include <optional>
+#include <util/result.h>
class ArgsManager;
-struct bilingual_str;
namespace node {
-std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& args, BlockManager::Options& opts);
+util::Result<void> ApplyArgsManOptions(const ArgsManager& args, BlockManager::Options& opts);
} // namespace node
#endif // BITCOIN_NODE_BLOCKMANAGER_ARGS_H
diff --git a/src/node/chainstatemanager_args.cpp b/src/node/chainstatemanager_args.cpp
index b97344c9aa..87d9238c18 100644
--- a/src/node/chainstatemanager_args.cpp
+++ b/src/node/chainstatemanager_args.cpp
@@ -11,16 +11,16 @@
#include <node/database_args.h>
#include <tinyformat.h>
#include <uint256.h>
+#include <util/result.h>
#include <util/strencodings.h>
#include <util/translation.h>
#include <validation.h>
#include <chrono>
-#include <optional>
#include <string>
namespace node {
-std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& args, ChainstateManager::Options& opts)
+util::Result<void> ApplyArgsManOptions(const ArgsManager& args, ChainstateManager::Options& opts)
{
if (auto value{args.GetBoolArg("-checkblockindex")}) opts.check_block_index = *value;
@@ -28,7 +28,7 @@ std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& args, Chains
if (auto value{args.GetArg("-minimumchainwork")}) {
if (!IsHexNumber(*value)) {
- return strprintf(Untranslated("Invalid non-hex (%s) minimum chain work value specified"), *value);
+ return util::Error{strprintf(Untranslated("Invalid non-hex (%s) minimum chain work value specified"), *value)};
}
opts.minimum_chain_work = UintToArith256(uint256S(*value));
}
@@ -41,6 +41,6 @@ std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& args, Chains
ReadDatabaseArgs(args, opts.coins_db);
ReadCoinsViewArgs(args, opts.coins_view);
- return std::nullopt;
+ return {};
}
} // namespace node
diff --git a/src/node/chainstatemanager_args.h b/src/node/chainstatemanager_args.h
index 6c46b998f2..82eb037368 100644
--- a/src/node/chainstatemanager_args.h
+++ b/src/node/chainstatemanager_args.h
@@ -5,15 +5,13 @@
#ifndef BITCOIN_NODE_CHAINSTATEMANAGER_ARGS_H
#define BITCOIN_NODE_CHAINSTATEMANAGER_ARGS_H
+#include <util/result.h>
#include <validation.h>
-#include <optional>
-
class ArgsManager;
-struct bilingual_str;
namespace node {
-std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& args, ChainstateManager::Options& opts);
+util::Result<void> ApplyArgsManOptions(const ArgsManager& args, ChainstateManager::Options& opts);
} // namespace node
#endif // BITCOIN_NODE_CHAINSTATEMANAGER_ARGS_H
diff --git a/src/node/mempool_args.cpp b/src/node/mempool_args.cpp
index 294111a58a..5381902263 100644
--- a/src/node/mempool_args.cpp
+++ b/src/node/mempool_args.cpp
@@ -38,7 +38,7 @@ void ApplyArgsManOptions(const ArgsManager& argsman, MemPoolLimits& mempool_limi
}
}
-std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& argsman, const CChainParams& chainparams, MemPoolOptions& mempool_opts)
+util::Result<void> ApplyArgsManOptions(const ArgsManager& argsman, const CChainParams& chainparams, MemPoolOptions& mempool_opts)
{
mempool_opts.check_ratio = argsman.GetIntArg("-checkmempool", mempool_opts.check_ratio);
@@ -52,7 +52,7 @@ std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& argsman, con
if (std::optional<CAmount> inc_relay_fee = ParseMoney(argsman.GetArg("-incrementalrelayfee", ""))) {
mempool_opts.incremental_relay_feerate = CFeeRate{inc_relay_fee.value()};
} else {
- return AmountErrMsg("incrementalrelayfee", argsman.GetArg("-incrementalrelayfee", ""));
+ return util::Error{AmountErrMsg("incrementalrelayfee", argsman.GetArg("-incrementalrelayfee", ""))};
}
}
@@ -61,7 +61,7 @@ std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& argsman, con
// High fee check is done afterward in CWallet::Create()
mempool_opts.min_relay_feerate = CFeeRate{min_relay_feerate.value()};
} else {
- return AmountErrMsg("minrelaytxfee", argsman.GetArg("-minrelaytxfee", ""));
+ return util::Error{AmountErrMsg("minrelaytxfee", argsman.GetArg("-minrelaytxfee", ""))};
}
} else if (mempool_opts.incremental_relay_feerate > mempool_opts.min_relay_feerate) {
// Allow only setting incremental fee to control both
@@ -75,7 +75,7 @@ std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& argsman, con
if (std::optional<CAmount> parsed = ParseMoney(argsman.GetArg("-dustrelayfee", ""))) {
mempool_opts.dust_relay_feerate = CFeeRate{parsed.value()};
} else {
- return AmountErrMsg("dustrelayfee", argsman.GetArg("-dustrelayfee", ""));
+ return util::Error{AmountErrMsg("dustrelayfee", argsman.GetArg("-dustrelayfee", ""))};
}
}
@@ -89,12 +89,12 @@ std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& argsman, con
mempool_opts.require_standard = !argsman.GetBoolArg("-acceptnonstdtxn", !chainparams.RequireStandard());
if (!chainparams.IsTestChain() && !mempool_opts.require_standard) {
- return strprintf(Untranslated("acceptnonstdtxn is not currently supported for %s chain"), chainparams.GetChainTypeString());
+ return util::Error{strprintf(Untranslated("acceptnonstdtxn is not currently supported for %s chain"), chainparams.GetChainTypeString())};
}
mempool_opts.full_rbf = argsman.GetBoolArg("-mempoolfullrbf", mempool_opts.full_rbf);
ApplyArgsManOptions(argsman, mempool_opts.limits);
- return std::nullopt;
+ return {};
}
diff --git a/src/node/mempool_args.h b/src/node/mempool_args.h
index 52d8b4f265..630fee6421 100644
--- a/src/node/mempool_args.h
+++ b/src/node/mempool_args.h
@@ -5,7 +5,7 @@
#ifndef BITCOIN_NODE_MEMPOOL_ARGS_H
#define BITCOIN_NODE_MEMPOOL_ARGS_H
-#include <optional>
+#include <util/result.h>
class ArgsManager;
class CChainParams;
@@ -21,7 +21,7 @@ struct MemPoolOptions;
* @param[in] argsman The ArgsManager in which to check set options.
* @param[in,out] mempool_opts The MemPoolOptions to modify according to \p argsman.
*/
-[[nodiscard]] std::optional<bilingual_str> ApplyArgsManOptions(const ArgsManager& argsman, const CChainParams& chainparams, kernel::MemPoolOptions& mempool_opts);
+[[nodiscard]] util::Result<void> ApplyArgsManOptions(const ArgsManager& argsman, const CChainParams& chainparams, kernel::MemPoolOptions& mempool_opts);
#endif // BITCOIN_NODE_MEMPOOL_ARGS_H
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index eaadbd7f7a..e45fc1ced8 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -71,6 +71,9 @@ int main(int argc, char* argv[])
gArgs.ForceSetArg("-upnp", "0");
gArgs.ForceSetArg("-natpmp", "0");
+ std::string error;
+ if (!gArgs.ReadConfigFiles(error, true)) QWARN(error.c_str());
+
// Prefer the "minimal" platform for the test instead of the normal default
// platform ("xcb", "windows", or "cocoa") so tests can't unintentionally
// interfere with any background GUIs and don't require extra resources.
diff --git a/src/random.cpp b/src/random.cpp
index f4c51574cc..54500e6cc6 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -28,14 +28,10 @@
#include <sys/time.h>
#endif
-#ifdef HAVE_SYS_GETRANDOM
-#include <sys/syscall.h>
-#include <linux/random.h>
-#endif
-#if defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
-#include <unistd.h>
+#if defined(HAVE_GETRANDOM) || (defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX))
#include <sys/random.h>
#endif
+
#ifdef HAVE_SYSCTL_ARND
#include <sys/sysctl.h>
#endif
@@ -252,7 +248,7 @@ static void Strengthen(const unsigned char (&seed)[32], SteadyClock::duration du
/** Fallback: get 32 bytes of system entropy from /dev/urandom. The most
* compatible way to get cryptographic randomness on UNIX-ish platforms.
*/
-static void GetDevURandom(unsigned char *ent32)
+[[maybe_unused]] static void GetDevURandom(unsigned char *ent32)
{
int f = open("/dev/urandom", O_RDONLY);
if (f == -1) {
@@ -285,23 +281,14 @@ void GetOSRand(unsigned char *ent32)
RandFailure();
}
CryptReleaseContext(hProvider, 0);
-#elif defined(HAVE_SYS_GETRANDOM)
+#elif defined(HAVE_GETRANDOM)
/* Linux. From the getrandom(2) man page:
* "If the urandom source has been initialized, reads of up to 256 bytes
* will always return as many bytes as requested and will not be
* interrupted by signals."
*/
- int rv = syscall(SYS_getrandom, ent32, NUM_OS_RANDOM_BYTES, 0);
- if (rv != NUM_OS_RANDOM_BYTES) {
- if (rv < 0 && errno == ENOSYS) {
- /* Fallback for kernel <3.17: the return value will be -1 and errno
- * ENOSYS if the syscall is not available, in that case fall back
- * to /dev/urandom.
- */
- GetDevURandom(ent32);
- } else {
- RandFailure();
- }
+ if (getrandom(ent32, NUM_OS_RANDOM_BYTES, 0) != NUM_OS_RANDOM_BYTES) {
+ RandFailure();
}
#elif defined(__OpenBSD__)
/* OpenBSD. From the arc4random(3) man page:
@@ -311,16 +298,10 @@ void GetOSRand(unsigned char *ent32)
The function call is always successful.
*/
arc4random_buf(ent32, NUM_OS_RANDOM_BYTES);
- // Silence a compiler warning about unused function.
- (void)GetDevURandom;
#elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
- /* getentropy() is available on macOS 10.12 and later.
- */
if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
RandFailure();
}
- // Silence a compiler warning about unused function.
- (void)GetDevURandom;
#elif defined(HAVE_SYSCTL_ARND)
/* FreeBSD, NetBSD and similar. It is possible for the call to return less
* bytes than requested, so need to read in a loop.
@@ -334,8 +315,6 @@ void GetOSRand(unsigned char *ent32)
}
have += len;
} while (have < NUM_OS_RANDOM_BYTES);
- // Silence a compiler warning about unused function.
- (void)GetDevURandom;
#else
/* Fall back to /dev/urandom if there is no specific method implemented to
* get system entropy for this OS.
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 88370b5925..ee3237638e 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -428,7 +428,7 @@ static RPCHelpMan getblockfrompeer()
"getblockfrompeer",
"Attempt to fetch block from a given peer.\n\n"
"We must have the header for this block, e.g. using submitheader.\n"
- "Subsequent calls for the same block and a new peer will cause the response from the previous peer to be ignored.\n"
+ "Subsequent calls for the same block may cause the response from the previous peer to be ignored.\n"
"Peers generally ignore requests for a stale block that they never fully verified, or one that is more than a month old.\n"
"When a peer does not respond with a block, we will disconnect.\n"
"Note: The block could be re-pruned as soon as it is received.\n\n"
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index f3c19003ff..d08e2d55d1 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -133,6 +133,9 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "walletprocesspsbt", 1, "sign" },
{ "walletprocesspsbt", 3, "bip32derivs" },
{ "walletprocesspsbt", 4, "finalize" },
+ { "descriptorprocesspsbt", 1, "descriptors"},
+ { "descriptorprocesspsbt", 3, "bip32derivs" },
+ { "descriptorprocesspsbt", 4, "finalize" },
{ "createpsbt", 0, "inputs" },
{ "createpsbt", 1, "outputs" },
{ "createpsbt", 2, "locktime" },
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 464be66046..eb0200ccf5 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -170,8 +170,9 @@ static std::vector<RPCArg> CreateTxDoc()
};
}
-// Update PSBT with information from the mempool, the UTXO set, the txindex, and the provided descriptors
-PartiallySignedTransaction ProcessPSBT(const std::string& psbt_string, const std::any& context, const HidingSigningProvider& provider)
+// Update PSBT with information from the mempool, the UTXO set, the txindex, and the provided descriptors.
+// Optionally, sign the inputs that we can using information from the descriptors.
+PartiallySignedTransaction ProcessPSBT(const std::string& psbt_string, const std::any& context, const HidingSigningProvider& provider, int sighash_type, bool finalize)
{
// Unserialize the transactions
PartiallySignedTransaction psbtx;
@@ -240,9 +241,10 @@ PartiallySignedTransaction ProcessPSBT(const std::string& psbt_string, const std
}
// Update script/keypath information using descriptor data.
- // Note that SignPSBTInput does a lot more than just constructing ECDSA signatures
- // we don't actually care about those here, in fact.
- SignPSBTInput(provider, psbtx, /*index=*/i, &txdata, /*sighash=*/1);
+ // Note that SignPSBTInput does a lot more than just constructing ECDSA signatures.
+ // We only actually care about those if our signing provider doesn't hide private
+ // information, as is the case with `descriptorprocesspsbt`
+ SignPSBTInput(provider, psbtx, /*index=*/i, &txdata, sighash_type, /*out_sigdata=*/nullptr, finalize);
}
// Update script/keypath information using descriptor data.
@@ -1695,7 +1697,9 @@ static RPCHelpMan utxoupdatepsbt()
const PartiallySignedTransaction& psbtx = ProcessPSBT(
request.params[0].get_str(),
request.context,
- HidingSigningProvider(&provider, /*hide_secret=*/true, /*hide_origin=*/false));
+ HidingSigningProvider(&provider, /*hide_secret=*/true, /*hide_origin=*/false),
+ /*sighash_type=*/SIGHASH_ALL,
+ /*finalize=*/false);
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << psbtx;
@@ -1914,6 +1918,82 @@ static RPCHelpMan analyzepsbt()
};
}
+RPCHelpMan descriptorprocesspsbt()
+{
+ return RPCHelpMan{"descriptorprocesspsbt",
+ "\nUpdate all segwit inputs in a PSBT with information from output descriptors, the UTXO set or the mempool. \n"
+ "Then, sign the inputs we are able to with information from the output descriptors. ",
+ {
+ {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "The transaction base64 string"},
+ {"descriptors", RPCArg::Type::ARR, RPCArg::Optional::NO, "An array of either strings or objects", {
+ {"", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "An output descriptor"},
+ {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "An object with an output descriptor and extra information", {
+ {"desc", RPCArg::Type::STR, RPCArg::Optional::NO, "An output descriptor"},
+ {"range", RPCArg::Type::RANGE, RPCArg::Default{1000}, "Up to what index HD chains should be explored (either end or [begin,end])"},
+ }},
+ }},
+ {"sighashtype", RPCArg::Type::STR, RPCArg::Default{"DEFAULT for Taproot, ALL otherwise"}, "The signature hash type to sign with if not specified by the PSBT. Must be one of\n"
+ " \"DEFAULT\"\n"
+ " \"ALL\"\n"
+ " \"NONE\"\n"
+ " \"SINGLE\"\n"
+ " \"ALL|ANYONECANPAY\"\n"
+ " \"NONE|ANYONECANPAY\"\n"
+ " \"SINGLE|ANYONECANPAY\""},
+ {"bip32derivs", RPCArg::Type::BOOL, RPCArg::Default{true}, "Include BIP 32 derivation paths for public keys if we know them"},
+ {"finalize", RPCArg::Type::BOOL, RPCArg::Default{true}, "Also finalize inputs if possible"},
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::STR, "psbt", "The base64-encoded partially signed transaction"},
+ {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"},
+ }
+ },
+ RPCExamples{
+ HelpExampleCli("descriptorprocesspsbt", "\"psbt\" \"[\\\"descriptor1\\\", \\\"descriptor2\\\"]\"") +
+ HelpExampleCli("descriptorprocesspsbt", "\"psbt\" \"[{\\\"desc\\\":\\\"mydescriptor\\\", \\\"range\\\":21}]\"")
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
+ // Add descriptor information to a signing provider
+ FlatSigningProvider provider;
+
+ auto descs = request.params[1].get_array();
+ for (size_t i = 0; i < descs.size(); ++i) {
+ EvalDescriptorStringOrObject(descs[i], provider, /*expand_priv=*/true);
+ }
+
+ int sighash_type = ParseSighashString(request.params[2]);
+ bool bip32derivs = request.params[3].isNull() ? true : request.params[3].get_bool();
+ bool finalize = request.params[4].isNull() ? true : request.params[4].get_bool();
+
+ const PartiallySignedTransaction& psbtx = ProcessPSBT(
+ request.params[0].get_str(),
+ request.context,
+ HidingSigningProvider(&provider, /*hide_secret=*/false, !bip32derivs),
+ sighash_type,
+ finalize);
+
+ // Check whether or not all of the inputs are now signed
+ bool complete = true;
+ for (const auto& input : psbtx.inputs) {
+ complete &= PSBTInputSigned(input);
+ }
+
+ CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
+ ssTx << psbtx;
+
+ UniValue result(UniValue::VOBJ);
+
+ result.pushKV("psbt", EncodeBase64(ssTx));
+ result.pushKV("complete", complete);
+
+ return result;
+},
+ };
+}
+
void RegisterRawTransactionRPCCommands(CRPCTable& t)
{
static const CRPCCommand commands[]{
@@ -1929,6 +2009,7 @@ void RegisterRawTransactionRPCCommands(CRPCTable& t)
{"rawtransactions", &createpsbt},
{"rawtransactions", &converttopsbt},
{"rawtransactions", &utxoupdatepsbt},
+ {"rawtransactions", &descriptorprocesspsbt},
{"rawtransactions", &joinpsbts},
{"rawtransactions", &analyzepsbt},
};
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 81489d7cec..d1ff3f3871 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -1126,7 +1126,7 @@ std::pair<int64_t, int64_t> ParseDescriptorRange(const UniValue& value)
return {low, high};
}
-std::vector<CScript> EvalDescriptorStringOrObject(const UniValue& scanobject, FlatSigningProvider& provider)
+std::vector<CScript> EvalDescriptorStringOrObject(const UniValue& scanobject, FlatSigningProvider& provider, const bool expand_priv)
{
std::string desc_str;
std::pair<int64_t, int64_t> range = {0, 1000};
@@ -1159,6 +1159,9 @@ std::vector<CScript> EvalDescriptorStringOrObject(const UniValue& scanobject, Fl
if (!desc->Expand(i, provider, scripts, provider)) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Cannot derive script without private keys: '%s'", desc_str));
}
+ if (expand_priv) {
+ desc->ExpandPrivate(/*pos=*/i, provider, /*out=*/provider);
+ }
std::move(scripts.begin(), scripts.end(), std::back_inserter(ret));
}
return ret;
diff --git a/src/rpc/util.h b/src/rpc/util.h
index bb5c30a2f4..3ff02582a6 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -110,7 +110,7 @@ UniValue JSONRPCTransactionError(TransactionError terr, const std::string& err_s
std::pair<int64_t, int64_t> ParseDescriptorRange(const UniValue& value);
/** Evaluate a descriptor given as a string, or as a {"desc":...,"range":...} object, with default range of 1000. */
-std::vector<CScript> EvalDescriptorStringOrObject(const UniValue& scanobject, FlatSigningProvider& provider);
+std::vector<CScript> EvalDescriptorStringOrObject(const UniValue& scanobject, FlatSigningProvider& provider, const bool expand_priv = false);
/** Returns, given services flags, a list of humanly readable (known) network services */
UniValue GetServicesNames(ServiceFlags services);
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index 0bbfb206d5..44ba8bc254 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -14,14 +14,19 @@
#include <csignal>
#include <cstdint>
+#include <cstdio>
+#include <cstdlib>
+#include <cstring>
#include <exception>
#include <fstream>
#include <functional>
+#include <iostream>
#include <map>
#include <memory>
#include <string>
#include <tuple>
#include <unistd.h>
+#include <utility>
#include <vector>
const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
@@ -77,13 +82,13 @@ void initialize()
return WrappedGetAddrInfo(name, false);
};
- bool should_abort{false};
+ bool should_exit{false};
if (std::getenv("PRINT_ALL_FUZZ_TARGETS_AND_ABORT")) {
for (const auto& t : FuzzTargets()) {
if (std::get<2>(t.second)) continue;
std::cout << t.first << std::endl;
}
- should_abort = true;
+ should_exit = true;
}
if (const char* out_path = std::getenv("WRITE_ALL_FUZZ_TARGETS_AND_ABORT")) {
std::cout << "Writing all fuzz target names to '" << out_path << "'." << std::endl;
@@ -92,13 +97,23 @@ void initialize()
if (std::get<2>(t.second)) continue;
out_stream << t.first << std::endl;
}
- should_abort = true;
+ should_exit= true;
+ }
+ if (should_exit){
+ std::exit(EXIT_SUCCESS);
+ }
+ if (const auto* env_fuzz{std::getenv("FUZZ")}) {
+ // To allow for easier fuzz executable binary modification,
+ static std::string g_copy{env_fuzz}; // create copy to avoid compiler optimizations, and
+ g_fuzz_target = g_copy.c_str(); // strip string after the first null-char.
+ } else {
+ std::cerr << "Must select fuzz target with the FUZZ env var." << std::endl;
+ std::cerr << "Hint: Set the PRINT_ALL_FUZZ_TARGETS_AND_ABORT=1 env var to see all compiled targets." << std::endl;
+ std::exit(EXIT_FAILURE);
}
- Assert(!should_abort);
- g_fuzz_target = Assert(std::getenv("FUZZ"));
const auto it = FuzzTargets().find(g_fuzz_target);
if (it == FuzzTargets().end()) {
- std::cerr << "No fuzzer for " << g_fuzz_target << "." << std::endl;
+ std::cerr << "No fuzz target compiled for " << g_fuzz_target << "." << std::endl;
std::exit(EXIT_FAILURE);
}
Assert(!g_test_one_input);
diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp
index 70dd1e17c5..744ff4701d 100644
--- a/src/test/fuzz/process_message.cpp
+++ b/src/test/fuzz/process_message.cpp
@@ -2,15 +2,16 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <banman.h>
-#include <chainparams.h>
#include <consensus/consensus.h>
#include <net.h>
#include <net_processing.h>
+#include <primitives/transaction.h>
#include <protocol.h>
-#include <scheduler.h>
#include <script/script.h>
+#include <serialize.h>
+#include <span.h>
#include <streams.h>
+#include <sync.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@@ -20,42 +21,32 @@
#include <test/util/setup_common.h>
#include <test/util/validation.h>
#include <util/chaintype.h>
+#include <util/check.h>
+#include <util/time.h>
+#include <validation.h>
#include <validationinterface.h>
#include <version.h>
+
#include <atomic>
-#include <cassert>
-#include <chrono>
-#include <cstdint>
-#include <iosfwd>
+#include <cstdlib>
#include <iostream>
#include <memory>
#include <string>
+#include <string_view>
+#include <vector>
namespace {
const TestingSetup* g_setup;
+std::string_view LIMIT_TO_MESSAGE_TYPE{};
} // namespace
-size_t& GetNumMsgTypes()
-{
- static size_t g_num_msg_types{0};
- return g_num_msg_types;
-}
-#define FUZZ_TARGET_MSG(msg_type) \
- struct msg_type##_Count_Before_Main { \
- msg_type##_Count_Before_Main() \
- { \
- ++GetNumMsgTypes(); \
- } \
- } const static g_##msg_type##_count_before_main; \
- FUZZ_TARGET_INIT(process_message_##msg_type, initialize_process_message) \
- { \
- fuzz_target(buffer, #msg_type); \
- }
-
void initialize_process_message()
{
- Assert(GetNumMsgTypes() == getAllNetMessageTypes().size()); // If this fails, add or remove the message type below
+ if (const auto val{std::getenv("LIMIT_TO_MESSAGE_TYPE")}) {
+ LIMIT_TO_MESSAGE_TYPE = val;
+ Assert(std::count(getAllNetMessageTypes().begin(), getAllNetMessageTypes().end(), LIMIT_TO_MESSAGE_TYPE)); // Unknown message type passed
+ }
static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(
/*chain_type=*/ChainType::REGTEST,
@@ -67,7 +58,7 @@ void initialize_process_message()
SyncWithValidationInterfaceQueue();
}
-void fuzz_target(FuzzBufferType buffer, const std::string& LIMIT_TO_MESSAGE_TYPE)
+FUZZ_TARGET_INIT(process_message, initialize_process_message)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
@@ -101,40 +92,3 @@ void fuzz_target(FuzzBufferType buffer, const std::string& LIMIT_TO_MESSAGE_TYPE
SyncWithValidationInterfaceQueue();
g_setup->m_node.connman->StopNodes();
}
-
-FUZZ_TARGET_INIT(process_message, initialize_process_message) { fuzz_target(buffer, ""); }
-FUZZ_TARGET_MSG(addr);
-FUZZ_TARGET_MSG(addrv2);
-FUZZ_TARGET_MSG(block);
-FUZZ_TARGET_MSG(blocktxn);
-FUZZ_TARGET_MSG(cfcheckpt);
-FUZZ_TARGET_MSG(cfheaders);
-FUZZ_TARGET_MSG(cfilter);
-FUZZ_TARGET_MSG(cmpctblock);
-FUZZ_TARGET_MSG(feefilter);
-FUZZ_TARGET_MSG(filteradd);
-FUZZ_TARGET_MSG(filterclear);
-FUZZ_TARGET_MSG(filterload);
-FUZZ_TARGET_MSG(getaddr);
-FUZZ_TARGET_MSG(getblocks);
-FUZZ_TARGET_MSG(getblocktxn);
-FUZZ_TARGET_MSG(getcfcheckpt);
-FUZZ_TARGET_MSG(getcfheaders);
-FUZZ_TARGET_MSG(getcfilters);
-FUZZ_TARGET_MSG(getdata);
-FUZZ_TARGET_MSG(getheaders);
-FUZZ_TARGET_MSG(headers);
-FUZZ_TARGET_MSG(inv);
-FUZZ_TARGET_MSG(mempool);
-FUZZ_TARGET_MSG(merkleblock);
-FUZZ_TARGET_MSG(notfound);
-FUZZ_TARGET_MSG(ping);
-FUZZ_TARGET_MSG(pong);
-FUZZ_TARGET_MSG(sendaddrv2);
-FUZZ_TARGET_MSG(sendcmpct);
-FUZZ_TARGET_MSG(sendheaders);
-FUZZ_TARGET_MSG(sendtxrcncl);
-FUZZ_TARGET_MSG(tx);
-FUZZ_TARGET_MSG(verack);
-FUZZ_TARGET_MSG(version);
-FUZZ_TARGET_MSG(wtxidrelay);
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index 33ffcc4cdd..6424f756a0 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -98,6 +98,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"decoderawtransaction",
"decodescript",
"deriveaddresses",
+ "descriptorprocesspsbt",
"disconnectnode",
"echo",
"echojson",
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index bd5a81be45..b7429df02c 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -8,7 +8,7 @@
#include <common/args.h>
#include <key.h>
#include <node/caches.h>
-#include <node/context.h>
+#include <node/context.h> // IWYU pragma: export
#include <primitives/transaction.h>
#include <pubkey.h>
#include <random.h>
diff --git a/src/test/util/txmempool.cpp b/src/test/util/txmempool.cpp
index 1873cf5ec8..4797d9c310 100644
--- a/src/test/util/txmempool.cpp
+++ b/src/test/util/txmempool.cpp
@@ -22,8 +22,8 @@ CTxMemPool::Options MemPoolOptionsForTest(const NodeContext& node)
// chainparams.DefaultConsistencyChecks for tests
.check_ratio = 1,
};
- const auto err{ApplyArgsManOptions(*node.args, ::Params(), mempool_opts)};
- Assert(!err);
+ const auto result{ApplyArgsManOptions(*node.args, ::Params(), mempool_opts)};
+ Assert(result);
return mempool_opts;
}
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 15351a4355..b2095bd4a3 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -31,22 +31,22 @@ static constexpr uint8_t DB_COINS{'c'};
static constexpr uint8_t DB_TXINDEX_BLOCK{'T'};
// uint8_t DB_TXINDEX{'t'}
-std::optional<bilingual_str> CheckLegacyTxindex(CBlockTreeDB& block_tree_db)
+util::Result<void> CheckLegacyTxindex(CBlockTreeDB& block_tree_db)
{
CBlockLocator ignored{};
if (block_tree_db.Read(DB_TXINDEX_BLOCK, ignored)) {
- return _("The -txindex upgrade started by a previous version cannot be completed. Restart with the previous version or run a full -reindex.");
+ return util::Error{_("The -txindex upgrade started by a previous version cannot be completed. Restart with the previous version or run a full -reindex.")};
}
bool txindex_legacy_flag{false};
block_tree_db.ReadFlag("txindex", txindex_legacy_flag);
if (txindex_legacy_flag) {
// Disable legacy txindex and warn once about occupied disk space
if (!block_tree_db.WriteFlag("txindex", false)) {
- return Untranslated("Failed to write block index db flag 'txindex'='0'");
+ return util::Error{Untranslated("Failed to write block index db flag 'txindex'='0'")};
}
- return _("The block index db contains a legacy 'txindex'. To clear the occupied disk space, run a full -reindex, otherwise ignore this error. This error message will not be displayed again.");
+ return util::Error{_("The block index db contains a legacy 'txindex'. To clear the occupied disk space, run a full -reindex, otherwise ignore this error. This error message will not be displayed again.")};
}
- return std::nullopt;
+ return {};
}
bool CCoinsViewDB::NeedsUpgrade()
diff --git a/src/txdb.h b/src/txdb.h
index 63c7152097..c92733a22c 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -11,6 +11,7 @@
#include <kernel/cs_main.h>
#include <sync.h>
#include <util/fs.h>
+#include <util/result.h>
#include <memory>
#include <optional>
@@ -98,6 +99,6 @@ public:
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
};
-std::optional<bilingual_str> CheckLegacyTxindex(CBlockTreeDB& block_tree_db);
+util::Result<void> CheckLegacyTxindex(CBlockTreeDB& block_tree_db);
#endif // BITCOIN_TXDB_H
diff --git a/src/util/fs.h b/src/util/fs.h
index 886a30394e..8f79f6cba6 100644
--- a/src/util/fs.h
+++ b/src/util/fs.h
@@ -8,7 +8,7 @@
#include <tinyformat.h>
#include <cstdio>
-#include <filesystem>
+#include <filesystem> // IWYU pragma: export
#include <functional>
#include <iomanip>
#include <ios>
diff --git a/src/util/result.h b/src/util/result.h
index 972b1aada0..b99995c7e5 100644
--- a/src/util/result.h
+++ b/src/util/result.h
@@ -31,16 +31,19 @@ struct Error {
//! `std::optional<T>` can be updated to return `util::Result<T>` and return
//! error strings usually just replacing `return std::nullopt;` with `return
//! util::Error{error_string};`.
-template <class T>
+template <class M>
class Result
{
private:
+ using T = std::conditional_t<std::is_same_v<M, void>, std::monostate, M>;
+
std::variant<bilingual_str, T> m_variant;
template <typename FT>
friend bilingual_str ErrorString(const Result<FT>& result);
public:
+ Result() : m_variant{std::in_place_index_t<1>{}, std::monostate{}} {} // constructor for void
Result(T obj) : m_variant{std::in_place_index_t<1>{}, std::move(obj)} {}
Result(Error error) : m_variant{std::in_place_index_t<0>{}, std::move(error.message)} {}
diff --git a/src/util/strencodings.h b/src/util/strencodings.h
index 05e7b957c4..d792562735 100644
--- a/src/util/strencodings.h
+++ b/src/util/strencodings.h
@@ -17,8 +17,8 @@
#include <cstdint>
#include <limits>
#include <optional>
-#include <string>
-#include <string_view>
+#include <string> // IWYU pragma: export
+#include <string_view> // IWYU pragma: export
#include <system_error>
#include <type_traits>
#include <vector>
diff --git a/src/util/string.h b/src/util/string.h
index fb93d2a80e..8b69d6ccae 100644
--- a/src/util/string.h
+++ b/src/util/string.h
@@ -12,8 +12,8 @@
#include <cstring>
#include <locale>
#include <sstream>
-#include <string>
-#include <string_view>
+#include <string> // IWYU pragma: export
+#include <string_view> // IWYU pragma: export
#include <vector>
void ReplaceAll(std::string& in_out, const std::string& search, const std::string& substitute);
diff --git a/src/validation.cpp b/src/validation.cpp
index 86c5c4d161..5fd2d05447 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -2730,7 +2730,6 @@ bool Chainstate::DisconnectTip(BlockValidationState& state, DisconnectedBlockTra
return true;
}
-static SteadyClock::duration time_read_from_disk_total{};
static SteadyClock::duration time_connect_total{};
static SteadyClock::duration time_flush{};
static SteadyClock::duration time_chainstate{};
@@ -2804,12 +2803,11 @@ bool Chainstate::ConnectTip(BlockValidationState& state, CBlockIndex* pindexNew,
const CBlock& blockConnecting = *pthisBlock;
// Apply the block atomically to the chain state.
const auto time_2{SteadyClock::now()};
- time_read_from_disk_total += time_2 - time_1;
SteadyClock::time_point time_3;
- LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms [%.2fs (%.2fms/blk)]\n",
- Ticks<MillisecondsDouble>(time_2 - time_1),
- Ticks<SecondsDouble>(time_read_from_disk_total),
- Ticks<MillisecondsDouble>(time_read_from_disk_total) / num_blocks_total);
+ // When adding aggregate statistics in the future, keep in mind that
+ // num_blocks_total may be zero until the ConnectBlock() call below.
+ LogPrint(BCLog::BENCH, " - Load block from disk: %.2fms\n",
+ Ticks<MillisecondsDouble>(time_2 - time_1));
{
CCoinsViewCache view(&CoinsTip());
bool rv = ConnectBlock(blockConnecting, state, pindexNew, view);
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index 953fdb63b6..1c8d65c583 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -710,6 +710,8 @@ void LegacyScriptPubKeyMan::UpdateTimeFirstKey(int64_t nCreateTime)
} else if (!nTimeFirstKey || nCreateTime < nTimeFirstKey) {
nTimeFirstKey = nCreateTime;
}
+
+ NotifyFirstKeyTimeChanged(this, nTimeFirstKey);
}
bool LegacyScriptPubKeyMan::LoadKey(const CKey& key, const CPubKey &pubkey)
@@ -2736,6 +2738,8 @@ void DescriptorScriptPubKeyMan::UpdateWalletDescriptor(WalletDescriptor& descrip
m_map_script_pub_keys.clear();
m_max_cached_index = -1;
m_wallet_descriptor = descriptor;
+
+ NotifyFirstKeyTimeChanged(this, m_wallet_descriptor.creation_time);
}
bool DescriptorScriptPubKeyMan::CanUpdateToWalletDescriptor(const WalletDescriptor& descriptor, std::string& error)
diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h
index 22b67c88e9..bf35c776ae 100644
--- a/src/wallet/scriptpubkeyman.h
+++ b/src/wallet/scriptpubkeyman.h
@@ -5,6 +5,7 @@
#ifndef BITCOIN_WALLET_SCRIPTPUBKEYMAN_H
#define BITCOIN_WALLET_SCRIPTPUBKEYMAN_H
+#include <logging.h>
#include <psbt.h>
#include <script/descriptor.h>
#include <script/signingprovider.h>
@@ -27,6 +28,8 @@ enum class OutputType;
struct bilingual_str;
namespace wallet {
+struct MigrationData;
+
// Wallet storage things that ScriptPubKeyMans need in order to be able to store things to the wallet database.
// It provides access to things that are part of the entire wallet and not specific to a ScriptPubKeyMan such as
// wallet flags, wallet version, encryption keys, encryption status, and the database itself. This allows a
@@ -256,6 +259,9 @@ public:
/** Keypool has new keys */
boost::signals2::signal<void ()> NotifyCanGetAddressesChanged;
+
+ /** Birth time changed */
+ boost::signals2::signal<void (const ScriptPubKeyMan* spkm, int64_t new_birth_time)> NotifyFirstKeyTimeChanged;
};
/** OutputTypes supported by the LegacyScriptPubKeyMan */
@@ -658,6 +664,18 @@ public:
void UpgradeDescriptorCache();
};
+
+/** struct containing information needed for migrating legacy wallets to descriptor wallets */
+struct MigrationData
+{
+ CExtKey master_key;
+ std::vector<std::pair<std::string, int64_t>> watch_descs;
+ std::vector<std::pair<std::string, int64_t>> solvable_descs;
+ std::vector<std::unique_ptr<DescriptorScriptPubKeyMan>> desc_spkms;
+ std::shared_ptr<CWallet> watchonly_wallet{nullptr};
+ std::shared_ptr<CWallet> solvable_wallet{nullptr};
+};
+
} // namespace wallet
#endif // BITCOIN_WALLET_SCRIPTPUBKEYMAN_H
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index b3eed8abc7..63ace5c47f 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -42,6 +42,7 @@
#include <wallet/context.h>
#include <wallet/external_signer_scriptpubkeyman.h>
#include <wallet/fees.h>
+#include <wallet/scriptpubkeyman.h>
#include <univalue.h>
@@ -1266,11 +1267,6 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
{
LOCK(cs_wallet);
- WalletBatch batch(GetDatabase());
-
- std::set<uint256> todo;
- std::set<uint256> done;
-
// Can't mark abandoned if confirmed or in mempool
auto it = mapWallet.find(hashTx);
assert(it != mapWallet.end());
@@ -1279,44 +1275,25 @@ bool CWallet::AbandonTransaction(const uint256& hashTx)
return false;
}
- todo.insert(hashTx);
-
- while (!todo.empty()) {
- uint256 now = *todo.begin();
- todo.erase(now);
- done.insert(now);
- auto it = mapWallet.find(now);
- assert(it != mapWallet.end());
- CWalletTx& wtx = it->second;
- int currentconfirm = GetTxDepthInMainChain(wtx);
- // If the orig tx was not in block, none of its spends can be
- assert(currentconfirm <= 0);
- // if (currentconfirm < 0) {Tx and spends are already conflicted, no need to abandon}
- if (currentconfirm == 0 && !wtx.isAbandoned()) {
- // If the orig tx was not in block/mempool, none of its spends can be in mempool
- assert(!wtx.InMempool());
+ auto try_updating_state = [](CWalletTx& wtx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) {
+ // If the orig tx was not in block/mempool, none of its spends can be.
+ assert(!wtx.isConfirmed());
+ assert(!wtx.InMempool());
+ // If already conflicted or abandoned, no need to set abandoned
+ if (!wtx.isConflicted() && !wtx.isAbandoned()) {
wtx.m_state = TxStateInactive{/*abandoned=*/true};
- wtx.MarkDirty();
- batch.WriteTx(wtx);
- NotifyTransactionChanged(wtx.GetHash(), CT_UPDATED);
- // Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too.
- // States are not permanent, so these transactions can become unabandoned if they are re-added to the
- // mempool, or confirmed in a block, or conflicted.
- // Note: If the reorged coinbase is re-added to the main chain, the descendants that have not had their
- // states change will remain abandoned and will require manual broadcast if the user wants them.
- for (unsigned int i = 0; i < wtx.tx->vout.size(); ++i) {
- std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(COutPoint(now, i));
- for (TxSpends::const_iterator iter = range.first; iter != range.second; ++iter) {
- if (!done.count(iter->second)) {
- todo.insert(iter->second);
- }
- }
- }
- // If a transaction changes 'conflicted' state, that changes the balance
- // available of the outputs it spends. So force those to be recomputed
- MarkInputsDirty(wtx.tx);
+ return TxUpdate::NOTIFY_CHANGED;
}
- }
+ return TxUpdate::UNCHANGED;
+ };
+
+ // Iterate over all its outputs, and mark transactions in the wallet that spend them abandoned too.
+ // States are not permanent, so these transactions can become unabandoned if they are re-added to the
+ // mempool, or confirmed in a block, or conflicted.
+ // Note: If the reorged coinbase is re-added to the main chain, the descendants that have not had their
+ // states change will remain abandoned and will require manual broadcast if the user wants them.
+
+ RecursiveUpdateTxState(hashTx, try_updating_state);
return true;
}
@@ -1333,13 +1310,29 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c
if (conflictconfirms >= 0)
return;
+ auto try_updating_state = [&](CWalletTx& wtx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) {
+ if (conflictconfirms < GetTxDepthInMainChain(wtx)) {
+ // Block is 'more conflicted' than current confirm; update.
+ // Mark transaction as conflicted with this block.
+ wtx.m_state = TxStateConflicted{hashBlock, conflicting_height};
+ return TxUpdate::CHANGED;
+ }
+ return TxUpdate::UNCHANGED;
+ };
+
+ // Iterate over all its outputs, and mark transactions in the wallet that spend them conflicted too.
+ RecursiveUpdateTxState(hashTx, try_updating_state);
+
+}
+
+void CWallet::RecursiveUpdateTxState(const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) {
// Do not flush the wallet here for performance reasons
WalletBatch batch(GetDatabase(), false);
std::set<uint256> todo;
std::set<uint256> done;
- todo.insert(hashTx);
+ todo.insert(tx_hash);
while (!todo.empty()) {
uint256 now = *todo.begin();
@@ -1348,14 +1341,12 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c
auto it = mapWallet.find(now);
assert(it != mapWallet.end());
CWalletTx& wtx = it->second;
- int currentconfirm = GetTxDepthInMainChain(wtx);
- if (conflictconfirms < currentconfirm) {
- // Block is 'more conflicted' than current confirm; update.
- // Mark transaction as conflicted with this block.
- wtx.m_state = TxStateConflicted{hashBlock, conflicting_height};
+
+ TxUpdate update_state = try_updating_state(wtx);
+ if (update_state != TxUpdate::UNCHANGED) {
wtx.MarkDirty();
batch.WriteTx(wtx);
- // Iterate over all its outputs, and mark transactions in the wallet that spend them conflicted too
+ // Iterate over all its outputs, and update those tx states as well (if applicable)
for (unsigned int i = 0; i < wtx.tx->vout.size(); ++i) {
std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(COutPoint(now, i));
for (TxSpends::const_iterator iter = range.first; iter != range.second; ++iter) {
@@ -1364,7 +1355,12 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c
}
}
}
- // If a transaction changes 'conflicted' state, that changes the balance
+
+ if (update_state == TxUpdate::NOTIFY_CHANGED) {
+ NotifyTransactionChanged(wtx.GetHash(), CT_UPDATED);
+ }
+
+ // If a transaction changes its tx state, that usually changes the balance
// available of the outputs it spends. So force those to be recomputed
MarkInputsDirty(wtx.tx);
}
@@ -1436,6 +1432,12 @@ void CWallet::blockConnected(const interfaces::BlockInfo& block)
m_last_block_processed_height = block.height;
m_last_block_processed = block.hash;
+
+ // No need to scan block if it was created before the wallet birthday.
+ // Uses chain max time and twice the grace period to adjust time for block time variability.
+ if (block.chain_time_max < m_birth_time.load() - (TIMESTAMP_WINDOW * 2)) return;
+
+ // Scan block
for (size_t index = 0; index < block.data->vtx.size(); index++) {
SyncTransaction(block.data->vtx[index], TxStateConfirmed{block.hash, block.height, static_cast<int>(index)});
transactionRemovedFromMempool(block.data->vtx[index], MemPoolRemovalReason::BLOCK);
@@ -1453,8 +1455,36 @@ void CWallet::blockDisconnected(const interfaces::BlockInfo& block)
// future with a stickier abandoned state or even removing abandontransaction call.
m_last_block_processed_height = block.height - 1;
m_last_block_processed = *Assert(block.prev_hash);
+
+ int disconnect_height = block.height;
+
for (const CTransactionRef& ptx : Assert(block.data)->vtx) {
SyncTransaction(ptx, TxStateInactive{});
+
+ for (const CTxIn& tx_in : ptx->vin) {
+ // No other wallet transactions conflicted with this transaction
+ if (mapTxSpends.count(tx_in.prevout) < 1) continue;
+
+ std::pair<TxSpends::const_iterator, TxSpends::const_iterator> range = mapTxSpends.equal_range(tx_in.prevout);
+
+ // For all of the spends that conflict with this transaction
+ for (TxSpends::const_iterator _it = range.first; _it != range.second; ++_it) {
+ CWalletTx& wtx = mapWallet.find(_it->second)->second;
+
+ if (!wtx.isConflicted()) continue;
+
+ auto try_updating_state = [&](CWalletTx& tx) {
+ if (!tx.isConflicted()) return TxUpdate::UNCHANGED;
+ if (tx.state<TxStateConflicted>()->conflicting_block_height >= disconnect_height) {
+ tx.m_state = TxStateInactive{};
+ return TxUpdate::CHANGED;
+ }
+ return TxUpdate::UNCHANGED;
+ };
+
+ RecursiveUpdateTxState(wtx.tx->GetHash(), try_updating_state);
+ }
+ }
}
}
@@ -1779,6 +1809,14 @@ bool CWallet::ImportScriptPubKeys(const std::string& label, const std::set<CScri
return true;
}
+void CWallet::FirstKeyTimeChanged(const ScriptPubKeyMan* spkm, int64_t new_birth_time)
+{
+ int64_t birthtime = m_birth_time.load();
+ if (new_birth_time < birthtime) {
+ m_birth_time = new_birth_time;
+ }
+}
+
/**
* Scan active chain for relevant transactions after importing keys. This should
* be called whenever new keys are added to the wallet, with the oldest key
@@ -3107,6 +3145,14 @@ std::shared_ptr<CWallet> CWallet::Create(WalletContext& context, const std::stri
// Try to top up keypool. No-op if the wallet is locked.
walletInstance->TopUpKeyPool();
+ // Cache the first key time
+ std::optional<int64_t> time_first_key;
+ for (auto spk_man : walletInstance->GetAllScriptPubKeyMans()) {
+ int64_t time = spk_man->GetTimeFirstKey();
+ if (!time_first_key || time < *time_first_key) time_first_key = time;
+ }
+ if (time_first_key) walletInstance->m_birth_time = *time_first_key;
+
if (chain && !AttachChain(walletInstance, *chain, rescan_required, error, warnings)) {
return nullptr;
}
@@ -3182,11 +3228,7 @@ bool CWallet::AttachChain(const std::shared_ptr<CWallet>& walletInstance, interf
{
// No need to read and scan block if block was created before
// our wallet birthday (as adjusted for block time variability)
- std::optional<int64_t> time_first_key;
- for (auto spk_man : walletInstance->GetAllScriptPubKeyMans()) {
- int64_t time = spk_man->GetTimeFirstKey();
- if (!time_first_key || time < *time_first_key) time_first_key = time;
- }
+ std::optional<int64_t> time_first_key = walletInstance->m_birth_time.load();
if (time_first_key) {
FoundBlock found = FoundBlock().height(rescan_height);
chain.findFirstBlockWithTimeAndHeight(*time_first_key - TIMESTAMP_WINDOW, rescan_height, found);
@@ -3498,6 +3540,14 @@ LegacyScriptPubKeyMan* CWallet::GetOrCreateLegacyScriptPubKeyMan()
return GetLegacyScriptPubKeyMan();
}
+void CWallet::AddScriptPubKeyMan(const uint256& id, std::unique_ptr<ScriptPubKeyMan> spkm_man)
+{
+ const auto& spkm = m_spk_managers[id] = std::move(spkm_man);
+
+ // Update birth time if needed
+ FirstKeyTimeChanged(spkm.get(), spkm->GetTimeFirstKey());
+}
+
void CWallet::SetupLegacyScriptPubKeyMan()
{
if (!m_internal_spk_managers.empty() || !m_external_spk_managers.empty() || !m_spk_managers.empty() || IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)) {
@@ -3509,7 +3559,8 @@ void CWallet::SetupLegacyScriptPubKeyMan()
m_internal_spk_managers[type] = spk_manager.get();
m_external_spk_managers[type] = spk_manager.get();
}
- m_spk_managers[spk_manager->GetID()] = std::move(spk_manager);
+ uint256 id = spk_manager->GetID();
+ AddScriptPubKeyMan(id, std::move(spk_manager));
}
const CKeyingMaterial& CWallet::GetEncryptionKey() const
@@ -3527,6 +3578,7 @@ void CWallet::ConnectScriptPubKeyManNotifiers()
for (const auto& spk_man : GetActiveScriptPubKeyMans()) {
spk_man->NotifyWatchonlyChanged.connect(NotifyWatchonlyChanged);
spk_man->NotifyCanGetAddressesChanged.connect(NotifyCanGetAddressesChanged);
+ spk_man->NotifyFirstKeyTimeChanged.connect(std::bind(&CWallet::FirstKeyTimeChanged, this, std::placeholders::_1, std::placeholders::_2));
}
}
@@ -3534,10 +3586,10 @@ void CWallet::LoadDescriptorScriptPubKeyMan(uint256 id, WalletDescriptor& desc)
{
if (IsWalletFlagSet(WALLET_FLAG_EXTERNAL_SIGNER)) {
auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new ExternalSignerScriptPubKeyMan(*this, desc, m_keypool_size));
- m_spk_managers[id] = std::move(spk_manager);
+ AddScriptPubKeyMan(id, std::move(spk_manager));
} else {
auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, desc, m_keypool_size));
- m_spk_managers[id] = std::move(spk_manager);
+ AddScriptPubKeyMan(id, std::move(spk_manager));
}
}
@@ -3558,7 +3610,7 @@ void CWallet::SetupDescriptorScriptPubKeyMans(const CExtKey& master_key)
}
spk_manager->SetupDescriptorGeneration(master_key, t, internal);
uint256 id = spk_manager->GetID();
- m_spk_managers[id] = std::move(spk_manager);
+ AddScriptPubKeyMan(id, std::move(spk_manager));
AddActiveScriptPubKeyMan(id, t, internal);
}
}
@@ -3606,7 +3658,7 @@ void CWallet::SetupDescriptorScriptPubKeyMans()
auto spk_manager = std::unique_ptr<ExternalSignerScriptPubKeyMan>(new ExternalSignerScriptPubKeyMan(*this, m_keypool_size));
spk_manager->SetupDescriptor(std::move(desc));
uint256 id = spk_manager->GetID();
- m_spk_managers[id] = std::move(spk_manager);
+ AddScriptPubKeyMan(id, std::move(spk_manager));
AddActiveScriptPubKeyMan(id, t, internal);
}
}
@@ -3723,7 +3775,8 @@ ScriptPubKeyMan* CWallet::AddWalletDescriptor(WalletDescriptor& desc, const Flat
spk_man = new_spk_man.get();
// Save the descriptor to memory
- m_spk_managers[new_spk_man->GetID()] = std::move(new_spk_man);
+ uint256 id = new_spk_man->GetID();
+ AddScriptPubKeyMan(id, std::move(new_spk_man));
}
// Add the private keys to the descriptor
@@ -3866,7 +3919,8 @@ bool CWallet::ApplyMigrationData(MigrationData& data, bilingual_str& error)
error = _("Error: Duplicate descriptors created during migration. Your wallet may be corrupted.");
return false;
}
- m_spk_managers[desc_spkm->GetID()] = std::move(desc_spkm);
+ uint256 id = desc_spkm->GetID();
+ AddScriptPubKeyMan(id, std::move(desc_spkm));
}
// Remove the LegacyScriptPubKeyMan from disk
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 79f4c43456..7bf7264568 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -299,6 +299,10 @@ private:
// Local time that the tip block was received. Used to schedule wallet rebroadcasts.
std::atomic<int64_t> m_best_block_time {0};
+ // First created key time. Used to skip blocks prior to this time.
+ // 'std::numeric_limits<int64_t>::max()' if wallet is blank.
+ std::atomic<int64_t> m_birth_time{std::numeric_limits<int64_t>::max()};
+
/**
* Used to keep track of spent outpoints, and
* detect and report conflicts (double-spends or
@@ -330,6 +334,13 @@ private:
/** Mark a transaction (and its in-wallet descendants) as conflicting with a particular block. */
void MarkConflicted(const uint256& hashBlock, int conflicting_height, const uint256& hashTx);
+ enum class TxUpdate { UNCHANGED, CHANGED, NOTIFY_CHANGED };
+
+ using TryUpdatingStateFn = std::function<TxUpdate(CWalletTx& wtx)>;
+
+ /** Mark a transaction (and its in-wallet descendants) as a particular tx state. */
+ void RecursiveUpdateTxState(const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+
/** Mark a transaction's inputs dirty, thus forcing the outputs to be recomputed */
void MarkInputsDirty(const CTransactionRef& tx) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
@@ -380,6 +391,10 @@ private:
// ScriptPubKeyMan::GetID. In many cases it will be the hash of an internal structure
std::map<uint256, std::unique_ptr<ScriptPubKeyMan>> m_spk_managers;
+ // Appends spk managers into the main 'm_spk_managers'.
+ // Must be the only method adding data to it.
+ void AddScriptPubKeyMan(const uint256& id, std::unique_ptr<ScriptPubKeyMan> spkm_man);
+
/**
* Catch wallet up to current chain, scanning new blocks, updating the best
* block locator and m_last_block_processed, and registering for
@@ -641,6 +656,9 @@ public:
bool ImportPubKeys(const std::vector<CKeyID>& ordered_pubkeys, const std::map<CKeyID, CPubKey>& pubkey_map, const std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>>& key_origins, const bool add_keypool, const bool internal, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
bool ImportScriptPubKeys(const std::string& label, const std::set<CScript>& script_pub_keys, const bool have_solving_data, const bool apply_label, const int64_t timestamp) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet);
+ /** Updates wallet birth time if 'new_birth_time' is below it */
+ void FirstKeyTimeChanged(const ScriptPubKeyMan* spkm, int64_t new_birth_time);
+
CFeeRate m_pay_tx_fee{DEFAULT_PAY_TX_FEE};
unsigned int m_confirm_target{DEFAULT_TX_CONFIRM_TARGET};
/** Allow Coin Selection to pick unconfirmed UTXOs that were sent from our own wallet if it
diff --git a/src/wallet/walletutil.h b/src/wallet/walletutil.h
index f639078de5..3f0c1228e3 100644
--- a/src/wallet/walletutil.h
+++ b/src/wallet/walletutil.h
@@ -104,20 +104,6 @@ public:
WalletDescriptor() {}
WalletDescriptor(std::shared_ptr<Descriptor> descriptor, uint64_t creation_time, int32_t range_start, int32_t range_end, int32_t next_index) : descriptor(descriptor), creation_time(creation_time), range_start(range_start), range_end(range_end), next_index(next_index) {}
};
-
-class CWallet;
-class DescriptorScriptPubKeyMan;
-
-/** struct containing information needed for migrating legacy wallets to descriptor wallets */
-struct MigrationData
-{
- CExtKey master_key;
- std::vector<std::pair<std::string, int64_t>> watch_descs;
- std::vector<std::pair<std::string, int64_t>> solvable_descs;
- std::vector<std::unique_ptr<DescriptorScriptPubKeyMan>> desc_spkms;
- std::shared_ptr<CWallet> watchonly_wallet{nullptr};
- std::shared_ptr<CWallet> solvable_wallet{nullptr};
-};
} // namespace wallet
#endif // BITCOIN_WALLET_WALLETUTIL_H
diff --git a/test/README.md b/test/README.md
index 0eddb72e1f..fee8828d34 100644
--- a/test/README.md
+++ b/test/README.md
@@ -331,6 +331,7 @@ Use the `-v` option for verbose output.
| Lint test | Dependency |
|-----------|:----------:|
| [`lint-python.py`](lint/lint-python.py) | [flake8](https://gitlab.com/pycqa/flake8)
+| [`lint-python.py`](lint/lint-python.py) | [lief](https://github.com/lief-project/LIEF)
| [`lint-python.py`](lint/lint-python.py) | [mypy](https://github.com/python/mypy)
| [`lint-python.py`](lint/lint-python.py) | [pyzmq](https://github.com/zeromq/pyzmq)
| [`lint-python-dead-code.py`](lint/lint-python-dead-code.py) | [vulture](https://github.com/jendrikseipp/vulture)
diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py
index f9730b48c5..2257605870 100755
--- a/test/functional/feature_config_args.py
+++ b/test/functional/feature_config_args.py
@@ -5,9 +5,14 @@
"""Test various command line arguments and configuration file parameters."""
import os
+import pathlib
+import re
+import sys
+import tempfile
import time
from test_framework.test_framework import BitcoinTestFramework
+from test_framework.test_node import ErrorMatch
from test_framework import util
@@ -74,7 +79,7 @@ class ConfArgsTest(BitcoinTestFramework):
util.write_config(main_conf_file_path, n=0, chain='', extra_config=f'includeconf={inc_conf_file_path}\n')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('acceptnonstdtxn=1\n')
- self.nodes[0].assert_start_raises_init_error(extra_args=[f"-conf={main_conf_file_path}"], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
+ self.nodes[0].assert_start_raises_init_error(extra_args=[f"-conf={main_conf_file_path}", "-allowignoredconf"], expected_msg='Error: acceptnonstdtxn is not currently supported for main chain')
with open(inc_conf_file_path, 'w', encoding='utf-8') as conf:
conf.write('nono\n')
@@ -108,6 +113,41 @@ class ConfArgsTest(BitcoinTestFramework):
with open(inc_conf_file2_path, 'w', encoding='utf-8') as conf:
conf.write('') # clear
+ def test_config_file_log(self):
+ # Disable this test for windows currently because trying to override
+ # the default datadir through the environment does not seem to work.
+ if sys.platform == "win32":
+ return
+
+ self.log.info('Test that correct configuration path is changed when configuration file changes the datadir')
+
+ # Create a temporary directory that will be treated as the default data
+ # directory by bitcoind.
+ env, default_datadir = util.get_temp_default_datadir(pathlib.Path(self.options.tmpdir, "test_config_file_log"))
+ default_datadir.mkdir(parents=True)
+
+ # Write a bitcoin.conf file in the default data directory containing a
+ # datadir= line pointing at the node datadir.
+ node = self.nodes[0]
+ conf_text = pathlib.Path(node.bitcoinconf).read_text()
+ conf_path = default_datadir / "bitcoin.conf"
+ conf_path.write_text(f"datadir={node.datadir}\n{conf_text}")
+
+ # Drop the node -datadir= argument during this test, because if it is
+ # specified it would take precedence over the datadir setting in the
+ # config file.
+ node_args = node.args
+ node.args = [arg for arg in node.args if not arg.startswith("-datadir=")]
+
+ # Check that correct configuration file path is actually logged
+ # (conf_path, not node.bitcoinconf)
+ with self.nodes[0].assert_debug_log(expected_msgs=[f"Config file: {conf_path}"]):
+ self.start_node(0, ["-allowignoredconf"], env=env)
+ self.stop_node(0)
+
+ # Restore node arguments after the test
+ node.args = node_args
+
def test_invalid_command_line_options(self):
self.nodes[0].assert_start_raises_init_error(
expected_msg='Error: Error parsing command line arguments: Can not set -proxy with no value. Please specify value with -proxy=value.',
@@ -282,6 +322,55 @@ class ConfArgsTest(BitcoinTestFramework):
unexpected_msgs=seednode_ignored):
self.restart_node(0, extra_args=[connect_arg, '-seednode=fakeaddress2'])
+ def test_ignored_conf(self):
+ self.log.info('Test error is triggered when the datadir in use contains a bitcoin.conf file that would be ignored '
+ 'because a conflicting -conf file argument is passed.')
+ node = self.nodes[0]
+ with tempfile.NamedTemporaryFile(dir=self.options.tmpdir, mode="wt", delete=False) as temp_conf:
+ temp_conf.write(f"datadir={node.datadir}\n")
+ node.assert_start_raises_init_error([f"-conf={temp_conf.name}"], re.escape(
+ f'Error: Data directory "{node.datadir}" contains a "bitcoin.conf" file which is ignored, because a '
+ f'different configuration file "{temp_conf.name}" from command line argument "-conf={temp_conf.name}" '
+ f'is being used instead.') + r"[\s\S]*", match=ErrorMatch.FULL_REGEX)
+
+ # Test that passing a redundant -conf command line argument pointing to
+ # the same bitcoin.conf that would be loaded anyway does not trigger an
+ # error.
+ self.start_node(0, [f'-conf={node.datadir}/bitcoin.conf'])
+ self.stop_node(0)
+
+ def test_ignored_default_conf(self):
+ # Disable this test for windows currently because trying to override
+ # the default datadir through the environment does not seem to work.
+ if sys.platform == "win32":
+ return
+
+ self.log.info('Test error is triggered when bitcoin.conf in the default data directory sets another datadir '
+ 'and it contains a different bitcoin.conf file that would be ignored')
+
+ # Create a temporary directory that will be treated as the default data
+ # directory by bitcoind.
+ env, default_datadir = util.get_temp_default_datadir(pathlib.Path(self.options.tmpdir, "home"))
+ default_datadir.mkdir(parents=True)
+
+ # Write a bitcoin.conf file in the default data directory containing a
+ # datadir= line pointing at the node datadir. This will trigger a
+ # startup error because the node datadir contains a different
+ # bitcoin.conf that would be ignored.
+ node = self.nodes[0]
+ (default_datadir / "bitcoin.conf").write_text(f"datadir={node.datadir}\n")
+
+ # Drop the node -datadir= argument during this test, because if it is
+ # specified it would take precedence over the datadir setting in the
+ # config file.
+ node_args = node.args
+ node.args = [arg for arg in node.args if not arg.startswith("-datadir=")]
+ node.assert_start_raises_init_error([], re.escape(
+ f'Error: Data directory "{node.datadir}" contains a "bitcoin.conf" file which is ignored, because a '
+ f'different configuration file "{default_datadir}/bitcoin.conf" from data directory "{default_datadir}" '
+ f'is being used instead.') + r"[\s\S]*", env=env, match=ErrorMatch.FULL_REGEX)
+ node.args = node_args
+
def run_test(self):
self.test_log_buffer()
self.test_args_log()
@@ -290,7 +379,10 @@ class ConfArgsTest(BitcoinTestFramework):
self.test_connect_with_seednode()
self.test_config_file_parser()
+ self.test_config_file_log()
self.test_invalid_command_line_options()
+ self.test_ignored_conf()
+ self.test_ignored_default_conf()
# Remove the -datadir argument so it doesn't override the config file
self.nodes[0].args = [arg for arg in self.nodes[0].args if not arg.startswith("-datadir")]
diff --git a/test/functional/feature_signet.py b/test/functional/feature_signet.py
index b41fe378af..a90a2a8e5e 100755
--- a/test/functional/feature_signet.py
+++ b/test/functional/feature_signet.py
@@ -76,6 +76,9 @@ class SignetBasicTest(BitcoinTestFramework):
self.log.info("test that signet logs the network magic on node start")
with self.nodes[0].assert_debug_log(["Signet derived magic (message start)"]):
self.restart_node(0)
+ self.stop_node(0)
+ self.nodes[0].assert_start_raises_init_error(extra_args=["-signetchallenge=abc"], expected_msg="Error: -signetchallenge must be hex, not 'abc'.")
+ self.nodes[0].assert_start_raises_init_error(extra_args=["-signetchallenge=abc"] * 2, expected_msg="Error: -signetchallenge cannot be multiple values.")
if __name__ == '__main__':
diff --git a/test/functional/interface_usdt_mempool.py b/test/functional/interface_usdt_mempool.py
index ec2f9e4e77..542849d558 100755
--- a/test/functional/interface_usdt_mempool.py
+++ b/test/functional/interface_usdt_mempool.py
@@ -173,6 +173,7 @@ class MempoolTracepointTest(BitcoinTestFramework):
self.log.info("Ensuring mempool:added event was handled successfully...")
assert_equal(EXPECTED_ADDED_EVENTS, handled_added_events)
+ self.generate(self.wallet, 1)
def removed_test(self):
"""Expire a transaction from the mempool and make sure the tracepoint returns
@@ -223,6 +224,7 @@ class MempoolTracepointTest(BitcoinTestFramework):
self.log.info("Ensuring mempool:removed event was handled successfully...")
assert_equal(EXPECTED_REMOVED_EVENTS, handled_removed_events)
+ self.generate(self.wallet, 1)
def replaced_test(self):
"""Replace one and two transactions in the mempool and make sure the tracepoint
@@ -280,6 +282,7 @@ class MempoolTracepointTest(BitcoinTestFramework):
self.log.info("Ensuring mempool:replaced event was handled successfully...")
assert_equal(EXPECTED_REPLACED_EVENTS, handled_replaced_events)
+ self.generate(self.wallet, 1)
def rejected_test(self):
"""Create an invalid transaction and make sure the tracepoint returns
@@ -321,6 +324,7 @@ class MempoolTracepointTest(BitcoinTestFramework):
self.log.info("Ensuring mempool:rejected event was handled successfully...")
assert_equal(EXPECTED_REJECTED_EVENTS, handled_rejected_events)
+ self.generate(self.wallet, 1)
def run_test(self):
"""Tests the mempool:added, mempool:removed, mempool:replaced,
diff --git a/test/functional/mempool_compatibility.py b/test/functional/mempool_compatibility.py
index a7bdc49695..7337802aea 100755
--- a/test/functional/mempool_compatibility.py
+++ b/test/functional/mempool_compatibility.py
@@ -47,12 +47,12 @@ class MempoolCompatibilityTest(BitcoinTestFramework):
# unbroadcasted_tx won't pass old_node's `MemPoolAccept::PreChecks`.
self.connect_nodes(0, 1)
self.sync_blocks()
- self.stop_node(1)
self.log.info("Add a transaction to mempool on old node and shutdown")
old_tx_hash = new_wallet.send_self_transfer(from_node=old_node)["txid"]
assert old_tx_hash in old_node.getrawmempool()
self.stop_node(0)
+ self.stop_node(1)
self.log.info("Move mempool.dat from old to new node")
old_node_mempool = os.path.join(old_node.datadir, self.chain, 'mempool.dat')
diff --git a/test/functional/mempool_package_limits.py b/test/functional/mempool_package_limits.py
index 6143ae83de..81451bf2a5 100755
--- a/test/functional/mempool_package_limits.py
+++ b/test/functional/mempool_package_limits.py
@@ -46,8 +46,7 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
# Add enough mature utxos to the wallet so that all txs spend confirmed coins.
- self.generate(self.wallet, 35)
- self.generate(self.nodes[0], COINBASE_MATURITY)
+ self.generate(self.wallet, COINBASE_MATURITY + 35)
self.test_chain_limits()
self.test_desc_count_limits()
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index 0387282862..95f7939412 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -7,7 +7,6 @@
from decimal import Decimal
from test_framework.messages import (
- COIN,
DEFAULT_ANCESTOR_LIMIT,
DEFAULT_DESCENDANT_LIMIT,
)
@@ -26,9 +25,6 @@ assert CUSTOM_DESCENDANT_LIMIT >= CUSTOM_ANCESTOR_LIMIT
class MempoolPackagesTest(BitcoinTestFramework):
- def add_options(self, parser):
- self.add_wallet_options(parser)
-
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
@@ -47,10 +43,6 @@ class MempoolPackagesTest(BitcoinTestFramework):
self.wallet = MiniWallet(self.nodes[0])
self.wallet.rescan_utxos()
- if self.is_specified_wallet_compiled():
- self.nodes[0].createwallet("watch_wallet", disable_private_keys=True)
- self.nodes[0].importaddress(self.wallet.get_address())
-
peer_inv_store = self.nodes[0].add_p2p_connection(P2PTxInvStore()) # keep track of invs
# DEFAULT_ANCESTOR_LIMIT transactions off a confirmed tx should be fine
@@ -63,13 +55,6 @@ class MempoolPackagesTest(BitcoinTestFramework):
ancestor_vsize += t["tx"].get_vsize()
ancestor_fees += t["fee"]
self.wallet.sendrawtransaction(from_node=self.nodes[0], tx_hex=t["hex"])
- # Check that listunspent ancestor{count, size, fees} yield the correct results
- if self.is_specified_wallet_compiled():
- wallet_unspent = self.nodes[0].listunspent(minconf=0)
- this_unspent = next(utxo_info for utxo_info in wallet_unspent if utxo_info["txid"] == t["txid"])
- assert_equal(this_unspent['ancestorcount'], i + 1)
- assert_equal(this_unspent['ancestorsize'], ancestor_vsize)
- assert_equal(this_unspent['ancestorfees'], ancestor_fees * COIN)
# Wait until mempool transactions have passed initial broadcast (sent inv and received getdata)
# Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between
diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py
index f818801136..8f74d9de20 100755
--- a/test/functional/mempool_persist.py
+++ b/test/functional/mempool_persist.py
@@ -191,6 +191,7 @@ class MempoolPersistTest(BitcoinTestFramework):
def test_persist_unbroadcast(self):
node0 = self.nodes[0]
self.start_node(0)
+ self.start_node(2)
# clear out mempool
self.generate(node0, 1, sync_fun=self.no_op)
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index 23eeea50bc..d6c06fdeed 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -105,6 +105,10 @@ class TestP2PConn(P2PInterface):
self.last_message.pop("headers", None)
self.last_message.pop("cmpctblock", None)
+ def clear_getblocktxn(self):
+ with p2p_lock:
+ self.last_message.pop("getblocktxn", None)
+
def get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
@@ -745,7 +749,7 @@ class CompactBlocksTest(BitcoinTestFramework):
peer.get_headers(locator=[int(tip, 16)], hashstop=0)
peer.send_and_ping(msg_sendcmpct(announce=True, version=2))
- def test_compactblock_reconstruction_multiple_peers(self, stalling_peer, delivery_peer):
+ def test_compactblock_reconstruction_stalling_peer(self, stalling_peer, delivery_peer):
node = self.nodes[0]
assert len(self.utxos)
@@ -823,12 +827,85 @@ class CompactBlocksTest(BitcoinTestFramework):
hb_test_node.send_and_ping(msg_sendcmpct(announce=False, version=2))
assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=False)
+ def test_compactblock_reconstruction_parallel_reconstruction(self, stalling_peer, delivery_peer, inbound_peer, outbound_peer):
+ """ All p2p connections are inbound except outbound_peer. We test that ultimate parallel slot
+ can only be taken by an outbound node unless prior attempts were done by an outbound
+ """
+ node = self.nodes[0]
+ assert len(self.utxos)
+
+ def announce_cmpct_block(node, peer, txn_count):
+ utxo = self.utxos.pop(0)
+ block = self.build_block_with_transactions(node, utxo, txn_count)
+
+ cmpct_block = HeaderAndShortIDs()
+ cmpct_block.initialize_from_block(block)
+ msg = msg_cmpctblock(cmpct_block.to_p2p())
+ peer.send_and_ping(msg)
+ with p2p_lock:
+ assert "getblocktxn" in peer.last_message
+ return block, cmpct_block
+
+ for name, peer in [("delivery", delivery_peer), ("inbound", inbound_peer), ("outbound", outbound_peer)]:
+ self.log.info(f"Setting {name} as high bandwidth peer")
+ block, cmpct_block = announce_cmpct_block(node, peer, 1)
+ msg = msg_blocktxn()
+ msg.block_transactions.blockhash = block.sha256
+ msg.block_transactions.transactions = block.vtx[1:]
+ peer.send_and_ping(msg)
+ assert_equal(int(node.getbestblockhash(), 16), block.sha256)
+ peer.clear_getblocktxn()
+
+ # Test the simple parallel download case...
+ for num_missing in [1, 5, 20]:
+
+ # Remaining low-bandwidth peer is stalling_peer, who announces first
+ assert_equal([peer['bip152_hb_to'] for peer in node.getpeerinfo()], [False, True, True, True])
+
+ block, cmpct_block = announce_cmpct_block(node, stalling_peer, num_missing)
+
+ delivery_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
+ with p2p_lock:
+ # The second peer to announce should still get a getblocktxn
+ assert "getblocktxn" in delivery_peer.last_message
+ assert int(node.getbestblockhash(), 16) != block.sha256
+
+ inbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
+ with p2p_lock:
+ # The third inbound peer to announce should *not* get a getblocktxn
+ assert "getblocktxn" not in inbound_peer.last_message
+ assert int(node.getbestblockhash(), 16) != block.sha256
+
+ outbound_peer.send_and_ping(msg_cmpctblock(cmpct_block.to_p2p()))
+ with p2p_lock:
+ # The third peer to announce should get a getblocktxn if outbound
+ assert "getblocktxn" in outbound_peer.last_message
+ assert int(node.getbestblockhash(), 16) != block.sha256
+
+ # Second peer completes the compact block first
+ msg = msg_blocktxn()
+ msg.block_transactions.blockhash = block.sha256
+ msg.block_transactions.transactions = block.vtx[1:]
+ delivery_peer.send_and_ping(msg)
+ assert_equal(int(node.getbestblockhash(), 16), block.sha256)
+
+ # Nothing bad should happen if we get a late fill from the first peer...
+ stalling_peer.send_and_ping(msg)
+ self.utxos.append([block.vtx[-1].sha256, 0, block.vtx[-1].vout[0].nValue])
+
+ delivery_peer.clear_getblocktxn()
+ inbound_peer.clear_getblocktxn()
+ outbound_peer.clear_getblocktxn()
+
+
def run_test(self):
self.wallet = MiniWallet(self.nodes[0])
# Setup the p2p connections
self.segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn())
self.additional_segwit_node = self.nodes[0].add_p2p_connection(TestP2PConn())
+ self.onemore_inbound_node = self.nodes[0].add_p2p_connection(TestP2PConn())
+ self.outbound_node = self.nodes[0].add_outbound_p2p_connection(TestP2PConn(), p2p_idx=3, connection_type="outbound-full-relay")
# We will need UTXOs to construct transactions in later tests.
self.make_utxos()
@@ -838,6 +915,8 @@ class CompactBlocksTest(BitcoinTestFramework):
self.log.info("Testing SENDCMPCT p2p message... ")
self.test_sendcmpct(self.segwit_node)
self.test_sendcmpct(self.additional_segwit_node)
+ self.test_sendcmpct(self.onemore_inbound_node)
+ self.test_sendcmpct(self.outbound_node)
self.log.info("Testing compactblock construction...")
self.test_compactblock_construction(self.segwit_node)
@@ -860,8 +939,11 @@ class CompactBlocksTest(BitcoinTestFramework):
self.log.info("Testing handling of incorrect blocktxn responses...")
self.test_incorrect_blocktxn_response(self.segwit_node)
- self.log.info("Testing reconstructing compact blocks from all peers...")
- self.test_compactblock_reconstruction_multiple_peers(self.segwit_node, self.additional_segwit_node)
+ self.log.info("Testing reconstructing compact blocks with a stalling peer...")
+ self.test_compactblock_reconstruction_stalling_peer(self.segwit_node, self.additional_segwit_node)
+
+ self.log.info("Testing reconstructing compact blocks from multiple peers...")
+ self.test_compactblock_reconstruction_parallel_reconstruction(stalling_peer=self.segwit_node, inbound_peer=self.onemore_inbound_node, delivery_peer=self.additional_segwit_node, outbound_peer=self.outbound_node)
# Test that if we submitblock to node1, we'll get a compact block
# announcement to all peers.
diff --git a/test/functional/rpc_invalid_address_message.py b/test/functional/rpc_invalid_address_message.py
index fd282a9bc1..6759b69dd1 100755
--- a/test/functional/rpc_invalid_address_message.py
+++ b/test/functional/rpc_invalid_address_message.py
@@ -63,12 +63,12 @@ class InvalidAddressErrorMessageTest(BitcoinTestFramework):
def test_validateaddress(self):
# Invalid Bech32
- self.check_invalid(BECH32_INVALID_SIZE, 'Invalid Bech32 address data size')
+ self.check_invalid(BECH32_INVALID_SIZE, "Invalid Bech32 address program size (41 bytes)")
self.check_invalid(BECH32_INVALID_PREFIX, 'Invalid or unsupported Segwit (Bech32) or Base58 encoding.')
self.check_invalid(BECH32_INVALID_BECH32, 'Version 1+ witness address must use Bech32m checksum')
self.check_invalid(BECH32_INVALID_BECH32M, 'Version 0 witness address must use Bech32 checksum')
self.check_invalid(BECH32_INVALID_VERSION, 'Invalid Bech32 address witness version')
- self.check_invalid(BECH32_INVALID_V0_SIZE, 'Invalid Bech32 v0 address data size')
+ self.check_invalid(BECH32_INVALID_V0_SIZE, "Invalid Bech32 v0 address program size (21 bytes), per BIP141")
self.check_invalid(BECH32_TOO_LONG, 'Bech32 string too long', list(range(90, 108)))
self.check_invalid(BECH32_ONE_ERROR, 'Invalid Bech32 checksum', [9])
self.check_invalid(BECH32_TWO_ERRORS, 'Invalid Bech32 checksum', [22, 43])
@@ -105,7 +105,7 @@ class InvalidAddressErrorMessageTest(BitcoinTestFramework):
def test_getaddressinfo(self):
node = self.nodes[0]
- assert_raises_rpc_error(-5, "Invalid Bech32 address data size", node.getaddressinfo, BECH32_INVALID_SIZE)
+ assert_raises_rpc_error(-5, "Invalid Bech32 address program size (41 bytes)", node.getaddressinfo, BECH32_INVALID_SIZE)
assert_raises_rpc_error(-5, "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", node.getaddressinfo, BECH32_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid or unsupported Base58-encoded address.", node.getaddressinfo, BASE58_INVALID_PREFIX)
assert_raises_rpc_error(-5, "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", node.getaddressinfo, INVALID_ADDRESS)
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 49cdfd0e66..7c653cc315 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -42,7 +42,10 @@ from test_framework.util import (
find_vout_for_address,
random_bytes,
)
-from test_framework.wallet_util import bytes_to_wif
+from test_framework.wallet_util import (
+ bytes_to_wif,
+ get_generate_key
+)
import json
import os
@@ -942,6 +945,48 @@ class PSBTTest(BitcoinTestFramework):
self.log.info("Test we don't crash when making a 0-value funded transaction at 0 fee without forcing an input selection")
assert_raises_rpc_error(-4, "Transaction requires one destination of non-0 value, a non-0 feerate, or a pre-selected input", self.nodes[0].walletcreatefundedpsbt, [], [{"data": "deadbeef"}], 0, {"fee_rate": "0"})
+ self.log.info("Test descriptorprocesspsbt updates and signs a psbt with descriptors")
+
+ self.generate(self.nodes[2], 1)
+
+ # Disable the wallet for node 2 since `descriptorprocesspsbt` does not use the wallet
+ self.restart_node(2, extra_args=["-disablewallet"])
+ self.connect_nodes(0, 2)
+ self.connect_nodes(1, 2)
+
+ key_info = get_generate_key()
+ key = key_info.privkey
+ address = key_info.p2wpkh_addr
+
+ descriptor = descsum_create(f"wpkh({key})")
+
+ txid = self.nodes[0].sendtoaddress(address, 1)
+ self.sync_all()
+ vout = find_output(self.nodes[0], txid, 1)
+
+ psbt = self.nodes[2].createpsbt([{"txid": txid, "vout": vout}], {self.nodes[0].getnewaddress(): 0.99999})
+ decoded = self.nodes[2].decodepsbt(psbt)
+ test_psbt_input_keys(decoded['inputs'][0], [])
+
+ # Test that even if the wrong descriptor is given, `witness_utxo` and `non_witness_utxo`
+ # are still added to the psbt
+ alt_descriptor = descsum_create(f"wpkh({get_generate_key().privkey})")
+ alt_psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[alt_descriptor], sighashtype="ALL")["psbt"]
+ decoded = self.nodes[2].decodepsbt(alt_psbt)
+ test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo'])
+
+ # Test that the psbt is not finalized and does not have bip32_derivs unless specified
+ psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[descriptor], sighashtype="ALL", bip32derivs=True, finalize=False)["psbt"]
+ decoded = self.nodes[2].decodepsbt(psbt)
+ test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo', 'partial_signatures', 'bip32_derivs'])
+
+ psbt = self.nodes[2].descriptorprocesspsbt(psbt=psbt, descriptors=[descriptor], sighashtype="ALL", bip32derivs=False, finalize=True)["psbt"]
+ decoded = self.nodes[2].decodepsbt(psbt)
+ test_psbt_input_keys(decoded['inputs'][0], ['witness_utxo', 'non_witness_utxo', 'final_scriptwitness'])
+
+ # Broadcast transaction
+ rawtx = self.nodes[2].finalizepsbt(psbt)["hex"]
+ self.nodes[2].sendrawtransaction(rawtx)
if __name__ == '__main__':
PSBTTest().main()
diff --git a/test/functional/rpc_validateaddress.py b/test/functional/rpc_validateaddress.py
new file mode 100755
index 0000000000..d87ba098c3
--- /dev/null
+++ b/test/functional/rpc_validateaddress.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python3
+# Copyright (c) 2023 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test validateaddress for main chain"""
+
+from test_framework.test_framework import BitcoinTestFramework
+
+from test_framework.util import assert_equal
+
+INVALID_DATA = [
+ # BIP 173
+ (
+ "tc1qw508d6qejxtdg4y5r3zarvary0c5xw7kg3g4ty",
+ "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", # Invalid hrp
+ [],
+ ),
+ ("bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kv8f3t5", "Invalid Bech32 checksum", [41]),
+ (
+ "BC13W508D6QEJXTDG4Y5R3ZARVARY0C5XW7KN40WF2",
+ "Version 1+ witness address must use Bech32m checksum",
+ [],
+ ),
+ (
+ "bc1rw5uspcuh",
+ "Version 1+ witness address must use Bech32m checksum", # Invalid program length
+ [],
+ ),
+ (
+ "bc10w508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kw5rljs90",
+ "Version 1+ witness address must use Bech32m checksum", # Invalid program length
+ [],
+ ),
+ (
+ "BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P",
+ "Invalid Bech32 v0 address program size (16 bytes), per BIP141",
+ [],
+ ),
+ (
+ "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sL5k7",
+ "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", # tb1, Mixed case
+ [],
+ ),
+ (
+ "BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3t4",
+ "Invalid character or mixed case", # bc1, Mixed case, not in BIP 173 test vectors
+ [40],
+ ),
+ (
+ "bc1zw508d6qejxtdg4y5r3zarvaryvqyzf3du",
+ "Version 1+ witness address must use Bech32m checksum", # Wrong padding
+ [],
+ ),
+ (
+ "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3pjxtptv",
+ "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", # tb1, Non-zero padding in 8-to-5 conversion
+ [],
+ ),
+ ("bc1gmk9yu", "Empty Bech32 data section", []),
+ # BIP 350
+ (
+ "tc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vq5zuyut",
+ "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", # Invalid human-readable part
+ [],
+ ),
+ (
+ "bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqh2y7hd",
+ "Version 1+ witness address must use Bech32m checksum", # Invalid checksum (Bech32 instead of Bech32m)
+ [],
+ ),
+ (
+ "tb1z0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqglt7rf",
+ "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", # tb1, Invalid checksum (Bech32 instead of Bech32m)
+ [],
+ ),
+ (
+ "BC1S0XLXVLHEMJA6C4DQV22UAPCTQUPFHLXM9H8Z3K2E72Q4K9HCZ7VQ54WELL",
+ "Version 1+ witness address must use Bech32m checksum", # Invalid checksum (Bech32 instead of Bech32m)
+ [],
+ ),
+ (
+ "bc1qw508d6qejxtdg4y5r3zarvary0c5xw7kemeawh",
+ "Version 0 witness address must use Bech32 checksum", # Invalid checksum (Bech32m instead of Bech32)
+ [],
+ ),
+ (
+ "tb1q0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vq24jc47",
+ "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", # tb1, Invalid checksum (Bech32m instead of Bech32)
+ [],
+ ),
+ (
+ "bc1p38j9r5y49hruaue7wxjce0updqjuyyx0kh56v8s25huc6995vvpql3jow4",
+ "Invalid Base 32 character", # Invalid character in checksum
+ [59],
+ ),
+ (
+ "BC130XLXVLHEMJA6C4DQV22UAPCTQUPFHLXM9H8Z3K2E72Q4K9HCZ7VQ7ZWS8R",
+ "Invalid Bech32 address witness version",
+ [],
+ ),
+ ("bc1pw5dgrnzv", "Invalid Bech32 address program size (1 byte)", []),
+ (
+ "bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7v8n0nx0muaewav253zgeav",
+ "Invalid Bech32 address program size (41 bytes)",
+ [],
+ ),
+ (
+ "BC1QR508D6QEJXTDG4Y5R3ZARVARYV98GJ9P",
+ "Invalid Bech32 v0 address program size (16 bytes), per BIP141",
+ [],
+ ),
+ (
+ "tb1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vq47Zagq",
+ "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", # tb1, Mixed case
+ [],
+ ),
+ (
+ "bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7v07qwwzcrf",
+ "Invalid padding in Bech32 data section", # zero padding of more than 4 bits
+ [],
+ ),
+ (
+ "tb1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vpggkg4j",
+ "Invalid or unsupported Segwit (Bech32) or Base58 encoding.", # tb1, Non-zero padding in 8-to-5 conversion
+ [],
+ ),
+ ("bc1gmk9yu", "Empty Bech32 data section", []),
+]
+VALID_DATA = [
+ # BIP 350
+ (
+ "BC1QW508D6QEJXTDG4Y5R3ZARVARY0C5XW7KV8F3T4",
+ "0014751e76e8199196d454941c45d1b3a323f1433bd6",
+ ),
+ # (
+ # "tb1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3q0sl5k7",
+ # "00201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262",
+ # ),
+ (
+ "bc1qrp33g0q5c5txsp9arysrx4k6zdkfs4nce4xj0gdcccefvpysxf3qccfmv3",
+ "00201863143c14c5166804bd19203356da136c985678cd4d27a1b8c6329604903262",
+ ),
+ (
+ "bc1pw508d6qejxtdg4y5r3zarvary0c5xw7kw508d6qejxtdg4y5r3zarvary0c5xw7kt5nd6y",
+ "5128751e76e8199196d454941c45d1b3a323f1433bd6751e76e8199196d454941c45d1b3a323f1433bd6",
+ ),
+ ("BC1SW50QGDZ25J", "6002751e"),
+ ("bc1zw508d6qejxtdg4y5r3zarvaryvaxxpcs", "5210751e76e8199196d454941c45d1b3a323"),
+ # (
+ # "tb1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesrxh6hy",
+ # "0020000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433",
+ # ),
+ (
+ "bc1qqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvses5wp4dt",
+ "0020000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433",
+ ),
+ # (
+ # "tb1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvsesf3hn0c",
+ # "5120000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433",
+ # ),
+ (
+ "bc1pqqqqp399et2xygdj5xreqhjjvcmzhxw4aywxecjdzew6hylgvses7epu4h",
+ "5120000000c4a5cad46221b2a187905e5266362b99d5e91c6ce24d165dab93e86433",
+ ),
+ (
+ "bc1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqzk5jj0",
+ "512079be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f81798",
+ ),
+]
+
+
+class ValidateAddressMainTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.chain = "" # main
+ self.num_nodes = 1
+ self.extra_args = [["-prune=899"]] * self.num_nodes
+
+ def check_valid(self, addr, spk):
+ info = self.nodes[0].validateaddress(addr)
+ assert_equal(info["isvalid"], True)
+ assert_equal(info["scriptPubKey"], spk)
+ assert "error" not in info
+ assert "error_locations" not in info
+
+ def check_invalid(self, addr, error_str, error_locations):
+ res = self.nodes[0].validateaddress(addr)
+ assert_equal(res["isvalid"], False)
+ assert_equal(res["error"], error_str)
+ assert_equal(res["error_locations"], error_locations)
+
+ def test_validateaddress(self):
+ for (addr, error, locs) in INVALID_DATA:
+ self.check_invalid(addr, error, locs)
+ for (addr, spk) in VALID_DATA:
+ self.check_valid(addr, spk)
+
+ def run_test(self):
+ self.test_validateaddress()
+
+
+if __name__ == "__main__":
+ ValidateAddressMainTest().main()
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 56abe5f26a..51bd697e81 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -190,7 +190,7 @@ class TestNode():
assert self.rpc_connected and self.rpc is not None, self._node_msg("Error: no RPC connection")
return getattr(RPCOverloadWrapper(self.rpc, descriptors=self.descriptors), name)
- def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, **kwargs):
+ def start(self, extra_args=None, *, cwd=None, stdout=None, stderr=None, env=None, **kwargs):
"""Start the node."""
if extra_args is None:
extra_args = self.extra_args
@@ -213,6 +213,8 @@ class TestNode():
# add environment variable LIBC_FATAL_STDERR_=1 so that libc errors are written to stderr and not the terminal
subp_env = dict(os.environ, LIBC_FATAL_STDERR_="1")
+ if env is not None:
+ subp_env.update(env)
self.process = subprocess.Popen(self.args + extra_args, env=subp_env, stdout=stdout, stderr=stderr, cwd=cwd, **kwargs)
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 2c227922c5..d3b3e4d536 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -12,13 +12,15 @@ import inspect
import json
import logging
import os
+import pathlib
import random
import re
+import sys
import time
from . import coverage
from .authproxy import AuthServiceProxy, JSONRPCException
-from typing import Callable, Optional
+from typing import Callable, Optional, Tuple
logger = logging.getLogger("TestFramework.utils")
@@ -419,6 +421,22 @@ def get_datadir_path(dirname, n):
return os.path.join(dirname, "node" + str(n))
+def get_temp_default_datadir(temp_dir: pathlib.Path) -> Tuple[dict, pathlib.Path]:
+ """Return os-specific environment variables that can be set to make the
+ GetDefaultDataDir() function return a datadir path under the provided
+ temp_dir, as well as the complete path it would return."""
+ if sys.platform == "win32":
+ env = dict(APPDATA=str(temp_dir))
+ datadir = temp_dir / "Bitcoin"
+ else:
+ env = dict(HOME=str(temp_dir))
+ if sys.platform == "darwin":
+ datadir = temp_dir / "Library/Application Support/Bitcoin"
+ else:
+ datadir = temp_dir / ".bitcoin"
+ return env, datadir
+
+
def append_config(datadir, options):
with open(os.path.join(datadir, "bitcoin.conf"), 'a', encoding='utf8') as f:
for option in options:
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index 64606b818b..1d546e12bd 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -218,10 +218,12 @@ class MiniWallet:
txid: get the first utxo we find from a specific transaction
"""
self._utxos = sorted(self._utxos, key=lambda k: (k['value'], -k['height'])) # Put the largest utxo last
+ blocks_height = self._test_node.getblockchaininfo()['blocks']
+ mature_coins = list(filter(lambda utxo: not utxo['coinbase'] or COINBASE_MATURITY - 1 <= blocks_height - utxo['height'], self._utxos))
if txid:
utxo_filter: Any = filter(lambda utxo: txid == utxo['txid'], self._utxos)
else:
- utxo_filter = reversed(self._utxos) # By default the largest utxo
+ utxo_filter = reversed(mature_coins) # By default the largest utxo
if vout is not None:
utxo_filter = filter(lambda utxo: vout == utxo['vout'], utxo_filter)
index = self._utxos.index(next(utxo_filter))
@@ -233,7 +235,8 @@ class MiniWallet:
def get_utxos(self, *, include_immature_coinbase=False, mark_as_spent=True):
"""Returns the list of all utxos and optionally mark them as spent"""
if not include_immature_coinbase:
- utxo_filter = filter(lambda utxo: not utxo['coinbase'] or COINBASE_MATURITY <= utxo['confirmations'], self._utxos)
+ blocks_height = self._test_node.getblockchaininfo()['blocks']
+ utxo_filter = filter(lambda utxo: not utxo['coinbase'] or COINBASE_MATURITY - 1 <= blocks_height - utxo['height'], self._utxos)
else:
utxo_filter = self._utxos
utxos = deepcopy(list(utxo_filter))
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 77e4dab6e4..5895e1de84 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -171,6 +171,7 @@ BASE_SCRIPTS = [
'wallet_fast_rescan.py --descriptors',
'interface_zmq.py',
'rpc_invalid_address_message.py',
+ 'rpc_validateaddress.py',
'interface_bitcoin_cli.py --legacy-wallet',
'interface_bitcoin_cli.py --descriptors',
'feature_bind_extra.py',
@@ -195,6 +196,8 @@ BASE_SCRIPTS = [
'wallet_watchonly.py --legacy-wallet',
'wallet_watchonly.py --usecli --legacy-wallet',
'wallet_reorgsrestore.py',
+ 'wallet_conflicts.py --legacy-wallet',
+ 'wallet_conflicts.py --descriptors',
'interface_http.py',
'interface_rpc.py',
'interface_usdt_coinselection.py',
@@ -528,6 +531,12 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
# Test Framework Tests
print("Running Unit Tests for Test Framework Modules")
+
+ tests_dir = src_dir + '/test/functional/'
+ # This allows `test_runner.py` to work from an out-of-source build directory using a symlink,
+ # a hard link or a copy on any platform. See https://github.com/bitcoin/bitcoin/pull/27561.
+ sys.path.append(tests_dir)
+
test_framework_tests = unittest.TestSuite()
for module in TEST_FRAMEWORK_MODULES:
test_framework_tests.addTest(unittest.TestLoader().loadTestsFromName("test_framework.{}".format(module)))
@@ -536,8 +545,6 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
logging.debug("Early exiting after failure in TestFramework unit tests")
sys.exit(False)
- tests_dir = src_dir + '/test/functional/'
-
flags = ['--cachedir={}'.format(cache_dir)] + args
if enable_coverage:
diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py
index 934f44588d..2691507773 100755
--- a/test/functional/wallet_abandonconflict.py
+++ b/test/functional/wallet_abandonconflict.py
@@ -226,20 +226,16 @@ class AbandonConflictTest(BitcoinTestFramework):
assert_equal(double_spend["walletconflicts"], [txAB1])
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
+ assert_equal(alice.gettransaction(txAB1)["confirmations"], -1)
newbalance = alice.getbalance()
assert_equal(newbalance, balance + Decimal("20"))
balance = newbalance
- # There is currently a minor bug around this and so this test doesn't work. See Issue #7315
- # Invalidate the block with the double spend and B's 10 BTC output should no longer be available
- # Don't think C's should either
+ # Invalidate the block with the double spend. B & C's 10 BTC outputs should no longer be available
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
+ assert_equal(alice.gettransaction(txAB1)["confirmations"], 0)
newbalance = alice.getbalance()
- #assert_equal(newbalance, balance - Decimal("10"))
- self.log.info("If balance has not declined after invalidateblock then out of mempool wallet tx which is no longer")
- self.log.info("conflicted has not resumed causing its inputs to be seen as spent. See Issue #7315")
- assert_equal(balance, newbalance)
-
+ assert_equal(newbalance, balance - Decimal("20"))
if __name__ == '__main__':
AbandonConflictTest().main()
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index e23ec7bc01..a1b805c09e 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -8,6 +8,10 @@ from itertools import product
from test_framework.blocktools import COINBASE_MATURITY
from test_framework.descriptors import descsum_create
+from test_framework.messages import (
+ COIN,
+ DEFAULT_ANCESTOR_LIMIT,
+)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_array_result,
@@ -17,6 +21,7 @@ from test_framework.util import (
find_vout_for_address,
)
from test_framework.wallet_util import test_address
+from test_framework.wallet import MiniWallet
NOT_A_NUMBER_OR_STRING = "Amount is not a number or string"
OUT_OF_RANGE = "Amount out of range"
@@ -784,6 +789,34 @@ class WalletTest(BitcoinTestFramework):
zeroconf_wallet.sendtoaddress(zeroconf_wallet.getnewaddress(), Decimal('0.5'))
+ self.test_chain_listunspent()
+
+ def test_chain_listunspent(self):
+ if not self.options.descriptors:
+ return
+ self.wallet = MiniWallet(self.nodes[0])
+ self.nodes[0].get_wallet_rpc(self.default_wallet_name).sendtoaddress(self.wallet.get_address(), "5")
+ self.generate(self.wallet, 1, sync_fun=self.no_op)
+ self.nodes[0].createwallet("watch_wallet", disable_private_keys=True)
+ watch_wallet = self.nodes[0].get_wallet_rpc("watch_wallet")
+ watch_wallet.importaddress(self.wallet.get_address())
+
+ # DEFAULT_ANCESTOR_LIMIT transactions off a confirmed tx should be fine
+ chain = self.wallet.create_self_transfer_chain(chain_length=DEFAULT_ANCESTOR_LIMIT)
+ ancestor_vsize = 0
+ ancestor_fees = Decimal(0)
+
+ for i, t in enumerate(chain):
+ ancestor_vsize += t["tx"].get_vsize()
+ ancestor_fees += t["fee"]
+ self.wallet.sendrawtransaction(from_node=self.nodes[0], tx_hex=t["hex"])
+ # Check that listunspent ancestor{count, size, fees} yield the correct results
+ wallet_unspent = watch_wallet.listunspent(minconf=0)
+ this_unspent = next(utxo_info for utxo_info in wallet_unspent if utxo_info["txid"] == t["txid"])
+ assert_equal(this_unspent['ancestorcount'], i + 1)
+ assert_equal(this_unspent['ancestorsize'], ancestor_vsize)
+ assert_equal(this_unspent['ancestorfees'], ancestor_fees * COIN)
+
if __name__ == '__main__':
WalletTest().main()
diff --git a/test/functional/wallet_conflicts.py b/test/functional/wallet_conflicts.py
new file mode 100755
index 0000000000..802b718cd5
--- /dev/null
+++ b/test/functional/wallet_conflicts.py
@@ -0,0 +1,127 @@
+#!/usr/bin/env python3
+# Copyright (c) 2023 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+"""
+Test that wallet correctly tracks transactions that have been conflicted by blocks, particularly during reorgs.
+"""
+
+from decimal import Decimal
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+)
+
+class TxConflicts(BitcoinTestFramework):
+ def add_options(self, parser):
+ self.add_wallet_options(parser)
+
+ def set_test_params(self):
+ self.num_nodes = 3
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def get_utxo_of_value(self, from_tx_id, search_value):
+ return next(tx_out["vout"] for tx_out in self.nodes[0].gettransaction(from_tx_id)["details"] if tx_out["amount"] == Decimal(f"{search_value}"))
+
+ def run_test(self):
+ self.log.info("Send tx from which to conflict outputs later")
+ txid_conflict_from_1 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
+ txid_conflict_from_2 = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10"))
+ self.generate(self.nodes[0], 1)
+ self.sync_blocks()
+
+ self.log.info("Disconnect nodes to broadcast conflicts on their respective chains")
+ self.disconnect_nodes(0, 1)
+ self.disconnect_nodes(2, 1)
+
+ self.log.info("Create transactions that conflict with each other")
+ output_A = self.get_utxo_of_value(from_tx_id=txid_conflict_from_1, search_value=10)
+ output_B = self.get_utxo_of_value(from_tx_id=txid_conflict_from_2, search_value=10)
+
+ # First create a transaction that consumes both A and B outputs.
+ #
+ # | tx1 | -----> | | | |
+ # | AB_parent_tx | ----> | Child_Tx |
+ # | tx2 | -----> | | | |
+ #
+ inputs_tx_AB_parent = [{"txid": txid_conflict_from_1, "vout": output_A}, {"txid": txid_conflict_from_2, "vout": output_B}]
+ tx_AB_parent = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs_tx_AB_parent, {self.nodes[0].getnewaddress(): Decimal("19.99998")}))
+
+ # Secondly, create two transactions: One consuming output_A, and another one consuming output_B
+ #
+ # | tx1 | -----> | Tx_A_1 |
+ # ----------------
+ # | tx2 | -----> | Tx_B_1 |
+ #
+ inputs_tx_A_1 = [{"txid": txid_conflict_from_1, "vout": output_A}]
+ inputs_tx_B_1 = [{"txid": txid_conflict_from_2, "vout": output_B}]
+ tx_A_1 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs_tx_A_1, {self.nodes[0].getnewaddress(): Decimal("9.99998")}))
+ tx_B_1 = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs_tx_B_1, {self.nodes[0].getnewaddress(): Decimal("9.99998")}))
+
+ self.log.info("Broadcast conflicted transaction")
+ txid_AB_parent = self.nodes[0].sendrawtransaction(tx_AB_parent["hex"])
+ self.generate(self.nodes[0], 1, sync_fun=self.no_op)
+
+ # Now that 'AB_parent_tx' was broadcast, build 'Child_Tx'
+ output_c = self.get_utxo_of_value(from_tx_id=txid_AB_parent, search_value=19.99998)
+ inputs_tx_C_child = [({"txid": txid_AB_parent, "vout": output_c})]
+
+ tx_C_child = self.nodes[0].signrawtransactionwithwallet(self.nodes[0].createrawtransaction(inputs_tx_C_child, {self.nodes[0].getnewaddress() : Decimal("19.99996")}))
+ tx_C_child_txid = self.nodes[0].sendrawtransaction(tx_C_child["hex"])
+ self.generate(self.nodes[0], 1, sync_fun=self.no_op)
+
+ self.log.info("Broadcast conflicting tx to node 1 and generate a longer chain")
+ conflicting_txid_A = self.nodes[1].sendrawtransaction(tx_A_1["hex"])
+ self.generate(self.nodes[1], 4, sync_fun=self.no_op)
+ conflicting_txid_B = self.nodes[1].sendrawtransaction(tx_B_1["hex"])
+ self.generate(self.nodes[1], 4, sync_fun=self.no_op)
+
+ self.log.info("Connect nodes 0 and 1, trigger reorg and ensure that the tx is effectively conflicted")
+ self.connect_nodes(0, 1)
+ self.sync_blocks([self.nodes[0], self.nodes[1]])
+ conflicted_AB_tx = self.nodes[0].gettransaction(txid_AB_parent)
+ tx_C_child = self.nodes[0].gettransaction(tx_C_child_txid)
+ conflicted_A_tx = self.nodes[0].gettransaction(conflicting_txid_A)
+
+ self.log.info("Verify, after the reorg, that Tx_A was accepted, and tx_AB and its Child_Tx are conflicting now")
+ # Tx A was accepted, Tx AB was not.
+ assert conflicted_AB_tx["confirmations"] < 0
+ assert conflicted_A_tx["confirmations"] > 0
+
+ # Conflicted tx should have confirmations set to the confirmations of the most conflicting tx
+ assert_equal(-conflicted_AB_tx["confirmations"], conflicted_A_tx["confirmations"])
+ # Child should inherit conflicted state from parent
+ assert_equal(-tx_C_child["confirmations"], conflicted_A_tx["confirmations"])
+ # Check the confirmations of the conflicting transactions
+ assert_equal(conflicted_A_tx["confirmations"], 8)
+ assert_equal(self.nodes[0].gettransaction(conflicting_txid_B)["confirmations"], 4)
+
+ self.log.info("Now generate a longer chain that does not contain any tx")
+ # Node2 chain without conflicts
+ self.generate(self.nodes[2], 15, sync_fun=self.no_op)
+
+ # Connect node0 and node2 and wait reorg
+ self.connect_nodes(0, 2)
+ self.sync_blocks()
+ conflicted = self.nodes[0].gettransaction(txid_AB_parent)
+ tx_C_child = self.nodes[0].gettransaction(tx_C_child_txid)
+
+ self.log.info("Test that formerly conflicted transaction are inactive after reorg")
+ # Former conflicted tx should be unconfirmed as it hasn't been yet rebroadcast
+ assert_equal(conflicted["confirmations"], 0)
+ # Former conflicted child tx should be unconfirmed as it hasn't been rebroadcast
+ assert_equal(tx_C_child["confirmations"], 0)
+ # Rebroadcast former conflicted tx and check it confirms smoothly
+ self.nodes[2].sendrawtransaction(conflicted["hex"])
+ self.generate(self.nodes[2], 1)
+ self.sync_blocks()
+ former_conflicted = self.nodes[0].gettransaction(txid_AB_parent)
+ assert_equal(former_conflicted["confirmations"], 1)
+ assert_equal(former_conflicted["blockheight"], 217)
+
+if __name__ == '__main__':
+ TxConflicts().main()
diff --git a/test/lint/lint-python.py b/test/lint/lint-python.py
index 9de13e44e2..539d0acb5d 100755
--- a/test/lint/lint-python.py
+++ b/test/lint/lint-python.py
@@ -13,7 +13,7 @@ import pkg_resources
import subprocess
import sys
-DEPS = ['flake8', 'mypy', 'pyzmq']
+DEPS = ['flake8', 'lief', 'mypy', 'pyzmq']
MYPY_CACHE_DIR = f"{os.getenv('BASE_ROOT_DIR', '')}/test/.mypy_cache"
# All .py files, except those in src/ (to exclude subtrees there)
diff --git a/test/lint/run-lint-format-strings.py b/test/lint/run-lint-format-strings.py
index 91915f05f9..d1896dba84 100755
--- a/test/lint/run-lint-format-strings.py
+++ b/test/lint/run-lint-format-strings.py
@@ -241,20 +241,32 @@ def count_format_specifiers(format_string):
3
>>> count_format_specifiers("foo %d bar %i foo %% foo %*d foo")
4
+ >>> count_format_specifiers("foo %5$d")
+ 5
+ >>> count_format_specifiers("foo %5$*7$d")
+ 7
"""
assert type(format_string) is str
format_string = format_string.replace('%%', 'X')
- n = 0
- in_specifier = False
- for i, char in enumerate(format_string):
- if char == "%":
- in_specifier = True
+ n = max_pos = 0
+ for m in re.finditer("%(.*?)[aAcdeEfFgGinopsuxX]", format_string, re.DOTALL):
+ # Increase the max position if the argument has a position number like
+ # "5$", otherwise increment the argument count.
+ pos_num, = re.match(r"(?:(^\d+)\$)?", m.group(1)).groups()
+ if pos_num is not None:
+ max_pos = max(max_pos, int(pos_num))
+ else:
n += 1
- elif char in "aAcdeEfFgGinopsuxX":
- in_specifier = False
- elif in_specifier and char == "*":
+
+ # Increase the max position if there is a "*" width argument with a
+ # position like "*7$", and increment the argument count if there is a
+ # "*" width argument with no position.
+ star, star_pos_num = re.match(r"(?:.*?(\*(?:(\d+)\$)?)|)", m.group(1)).groups()
+ if star_pos_num is not None:
+ max_pos = max(max_pos, int(star_pos_num))
+ elif star is not None:
n += 1
- return n
+ return max(n, max_pos)
def main():
diff --git a/test/util/test_runner.py b/test/util/test_runner.py
index e5cdd0bc3a..1cd368f6f4 100755
--- a/test/util/test_runner.py
+++ b/test/util/test_runner.py
@@ -74,6 +74,11 @@ def bctest(testDir, testObj, buildenv):
"""
# Get the exec names and arguments
execprog = os.path.join(buildenv["BUILDDIR"], "src", testObj["exec"] + buildenv["EXEEXT"])
+ if testObj["exec"] == "./bitcoin-util":
+ execprog = os.getenv("BITCOINUTIL", default=execprog)
+ elif testObj["exec"] == "./bitcoin-tx":
+ execprog = os.getenv("BITCOINTX", default=execprog)
+
execargs = testObj['args']
execrun = [execprog] + execargs