aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml4
-rwxr-xr-xci/test/00_setup_env_i686_centos.sh6
-rwxr-xr-xci/test/00_setup_env_mac.sh4
-rwxr-xr-xci/test/00_setup_env_win64.sh2
-rwxr-xr-xci/test/04_install.sh54
-rwxr-xr-xci/test/05_before_script.sh28
-rwxr-xr-xci/test/06_script_a.sh34
-rwxr-xr-xci/test/06_script_b.sh20
-rw-r--r--configure.ac18
-rwxr-xr-xcontrib/devtools/security-check.py23
-rwxr-xr-xcontrib/devtools/symbol-check.py2
-rwxr-xr-xcontrib/devtools/test-security-check.py90
-rw-r--r--contrib/guix/README.md4
-rwxr-xr-xcontrib/guix/guix-build4
-rwxr-xr-xcontrib/guix/guix-codesign2
-rwxr-xr-xcontrib/guix/libexec/build.sh2
-rw-r--r--contrib/guix/manifest.scm7
-rw-r--r--contrib/macdeploy/README.md20
-rw-r--r--depends/Makefile2
-rw-r--r--depends/README.md1
-rw-r--r--depends/hosts/darwin.mk6
-rw-r--r--depends/packages/boost.mk3
-rw-r--r--depends/packages/packages.mk6
-rw-r--r--depends/packages/zeromq.mk11
-rw-r--r--depends/patches/zeromq/netbsd_kevent_void.patch57
-rw-r--r--doc/REST-interface.md1
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/fuzzing.md9
-rw-r--r--doc/release-notes-23508.md9
-rw-r--r--doc/tor.md2
-rw-r--r--share/setup.nsi.in2
-rw-r--r--src/banman.cpp20
-rw-r--r--src/bench/addrman.cpp11
-rw-r--r--src/bench/bench.cpp6
-rw-r--r--src/bench/checkblock.cpp8
-rw-r--r--src/bench/rpc_blockchain.cpp4
-rw-r--r--src/bitcoin-tx.cpp10
-rw-r--r--src/chain.cpp2
-rw-r--r--src/chain.h31
-rw-r--r--src/common/bloom.cpp4
-rw-r--r--src/core_write.cpp2
-rw-r--r--src/crypto/chacha_poly_aead.cpp2
-rw-r--r--src/dbwrapper.h6
-rw-r--r--src/fs.cpp4
-rw-r--r--src/fs.h2
-rw-r--r--src/hash.h15
-rw-r--r--src/index/txindex.cpp4
-rw-r--r--src/interfaces/chain.h3
-rw-r--r--src/interfaces/wallet.h1
-rw-r--r--src/net.cpp65
-rw-r--r--src/net.h27
-rw-r--r--src/net_processing.cpp85
-rw-r--r--src/net_processing.h9
-rw-r--r--src/node/blockstorage.cpp18
-rw-r--r--src/node/blockstorage.h9
-rw-r--r--src/node/interfaces.cpp5
-rw-r--r--src/node/ui_interface.h3
-rw-r--r--src/policy/packages.h1
-rw-r--r--src/primitives/transaction.h36
-rw-r--r--src/psbt.cpp2
-rw-r--r--src/pubkey.h4
-rw-r--r--src/qt/forms/debugwindow.ui82
-rw-r--r--src/qt/guiconstants.h2
-rw-r--r--src/qt/psbtoperationsdialog.cpp2
-rw-r--r--src/qt/rpcconsole.cpp3
-rw-r--r--src/qt/sendcoinsdialog.cpp2
-rw-r--r--src/qt/test/test_main.cpp3
-rw-r--r--src/qt/transactiondesc.cpp9
-rw-r--r--src/qt/transactionrecord.cpp15
-rw-r--r--src/qt/transactionrecord.h2
-rw-r--r--src/qt/transactiontablemodel.cpp9
-rw-r--r--src/qt/walletframe.cpp2
-rw-r--r--src/random.cpp7
-rw-r--r--src/random.h14
-rw-r--r--src/rpc/blockchain.cpp237
-rw-r--r--src/rpc/client.cpp2
-rw-r--r--src/rpc/net.cpp2
-rw-r--r--src/rpc/rawtransaction.cpp7
-rw-r--r--src/rpc/rawtransaction_util.cpp2
-rw-r--r--src/rpc/util.cpp16
-rw-r--r--src/rpc/util.h9
-rw-r--r--src/scheduler.cpp12
-rw-r--r--src/scheduler.h6
-rw-r--r--src/script/bitcoinconsensus.cpp17
-rw-r--r--src/script/interpreter.cpp17
-rw-r--r--src/script/interpreter.h12
-rw-r--r--src/script/sign.cpp2
-rw-r--r--src/serialize.h77
-rw-r--r--src/span.h9
-rw-r--r--src/streams.h86
-rw-r--r--src/support/allocators/zeroafterfree.h2
-rw-r--r--src/test/README.md28
-rw-r--r--src/test/addrman_tests.cpp70
-rw-r--r--src/test/arith_uint256_tests.cpp12
-rw-r--r--src/test/bloom_tests.cpp12
-rw-r--r--src/test/coins_tests.cpp4
-rw-r--r--src/test/descriptor_tests.cpp48
-rw-r--r--src/test/fuzz/addrman.cpp18
-rw-r--r--src/test/fuzz/autofile.cpp8
-rw-r--r--src/test/fuzz/buffered_file.cpp6
-rw-r--r--src/test/fuzz/chain.cpp23
-rw-r--r--src/test/fuzz/connman.cpp16
-rw-r--r--src/test/fuzz/deserialize.cpp13
-rw-r--r--src/test/fuzz/fuzz.cpp27
-rw-r--r--src/test/fuzz/integer.cpp5
-rw-r--r--src/test/fuzz/p2p_transport_serialization.cpp4
-rw-r--r--src/test/fuzz/rpc.cpp3
-rw-r--r--src/test/fuzz/script.cpp19
-rw-r--r--src/test/fuzz/signature_checker.cpp2
-rw-r--r--src/test/fuzz/util.cpp2
-rw-r--r--src/test/fuzz/util.h6
-rw-r--r--src/test/fuzz/versionbits.cpp35
-rw-r--r--src/test/interfaces_tests.cpp1
-rw-r--r--src/test/main.cpp15
-rw-r--r--src/test/miner_tests.cpp2
-rw-r--r--src/test/net_tests.cpp2
-rw-r--r--src/test/pow_tests.cpp2
-rw-r--r--src/test/prevector_tests.cpp16
-rw-r--r--src/test/script_tests.cpp20
-rw-r--r--src/test/serialize_tests.cpp52
-rw-r--r--src/test/sighash_tests.cpp7
-rw-r--r--src/test/skiplist_tests.cpp4
-rw-r--r--src/test/streams_tests.cpp28
-rw-r--r--src/test/transaction_tests.cpp4
-rw-r--r--src/test/txpackage_tests.cpp232
-rw-r--r--src/test/util/blockfilter.cpp2
-rw-r--r--src/test/util/setup_common.cpp17
-rw-r--r--src/test/util/setup_common.h4
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp3
-rw-r--r--src/txdb.cpp5
-rw-r--r--src/txdb.h3
-rw-r--r--src/txmempool.cpp32
-rw-r--r--src/txmempool.h90
-rw-r--r--src/uint256.h6
-rw-r--r--src/util/settings.cpp2
-rw-r--r--src/util/system.cpp7
-rw-r--r--src/validation.cpp129
-rw-r--r--src/validation.h29
-rw-r--r--src/versionbits.cpp31
-rw-r--r--src/versionbits.h12
-rw-r--r--src/wallet/bdb.cpp6
-rw-r--r--src/wallet/coincontrol.h19
-rw-r--r--src/wallet/dump.cpp12
-rw-r--r--src/wallet/interfaces.cpp1
-rw-r--r--src/wallet/load.cpp2
-rw-r--r--src/wallet/receive.cpp2
-rw-r--r--src/wallet/rpc/coins.cpp4
-rw-r--r--src/wallet/rpc/spend.cpp79
-rw-r--r--src/wallet/rpc/transactions.cpp4
-rw-r--r--src/wallet/spend.cpp16
-rw-r--r--src/wallet/sqlite.cpp18
-rw-r--r--src/wallet/test/spend_tests.cpp51
-rw-r--r--src/wallet/test/wallet_tests.cpp17
-rw-r--r--src/wallet/wallet.cpp51
-rw-r--r--src/wallet/wallet.h2
-rw-r--r--src/zmq/zmqpublishnotifier.cpp10
-rw-r--r--test/functional/data/rpc_decodescript.json11
-rwxr-xr-xtest/functional/feature_cltv.py2
-rwxr-xr-xtest/functional/feature_dersig.py2
-rwxr-xr-xtest/functional/feature_init.py41
-rwxr-xr-xtest/functional/feature_maxtipage.py56
-rwxr-xr-xtest/functional/interface_rest.py18
-rwxr-xr-xtest/functional/mempool_updatefromblock.py2
-rwxr-xr-xtest/functional/rpc_blockchain.py57
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py53
-rwxr-xr-xtest/functional/rpc_getblockfrompeer.py16
-rwxr-xr-xtest/functional/rpc_psbt.py89
-rwxr-xr-xtest/functional/rpc_rawtransaction.py39
-rwxr-xr-xtest/functional/rpc_signrawtransaction.py4
-rwxr-xr-xtest/functional/test_framework/test_node.py9
-rw-r--r--test/functional/test_framework/util.py2
-rwxr-xr-xtest/functional/test_runner.py2
-rwxr-xr-xtest/functional/wallet_multiwallet.py2
-rwxr-xr-xtest/functional/wallet_send.py40
-rwxr-xr-xtest/functional/wallet_timelock.py50
-rwxr-xr-xtest/lint/commit-script-check.sh5
-rw-r--r--test/lint/lint-spelling.ignore-words.txt2
-rw-r--r--test/sanitizer_suppressions/ubsan24
-rw-r--r--test/util/data/tt-delin1-out.json2
-rw-r--r--test/util/data/tt-delout1-out.json1
-rw-r--r--test/util/data/tt-locktime317000-out.json2
-rw-r--r--test/util/data/txcreate1.json2
-rw-r--r--test/util/data/txcreate2.json1
-rw-r--r--test/util/data/txcreatedata1.json2
-rw-r--r--test/util/data/txcreatedata2.json2
-rw-r--r--test/util/data/txcreatedata_seq0.json1
-rw-r--r--test/util/data/txcreatedata_seq1.json1
-rw-r--r--test/util/data/txcreatemultisig1.json1
-rw-r--r--test/util/data/txcreatemultisig2.json1
-rw-r--r--test/util/data/txcreatemultisig3.json1
-rw-r--r--test/util/data/txcreatemultisig4.json1
-rw-r--r--test/util/data/txcreatemultisig5.json1
-rw-r--r--test/util/data/txcreateoutpubkey1.json1
-rw-r--r--test/util/data/txcreateoutpubkey2.json1
-rw-r--r--test/util/data/txcreateoutpubkey3.json1
-rw-r--r--test/util/data/txcreatescript1.json1
-rw-r--r--test/util/data/txcreatescript2.json1
-rw-r--r--test/util/data/txcreatescript3.json1
-rw-r--r--test/util/data/txcreatescript4.json1
-rw-r--r--test/util/data/txcreatesignv1.json1
200 files changed, 2400 insertions, 1066 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 240e2cf705..fec4975080 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -188,7 +188,7 @@ task:
name: '32-bit + dash [gui] [CentOS 8]'
<< : *GLOBAL_TASK_TEMPLATE
container:
- image: centos:8
+ image: quay.io/centos/centos:stream8
env:
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
PACKAGE_MANAGER_INSTALL: "yum install -y"
@@ -277,7 +277,7 @@ task:
container:
image: ubuntu:focal
env:
- MACOS_SDK: "Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers"
+ MACOS_SDK: "Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers"
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
FILE_ENV: "./ci/test/00_setup_env_mac.sh"
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index 1bd74a5217..8f1cc8af29 100755
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -7,9 +7,9 @@
export LC_ALL=C.UTF-8
export HOST=i686-pc-linux-gnu
-export CONTAINER_NAME=ci_i686_centos_8
-export DOCKER_NAME_TAG=centos:8
-export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python3-zmq which patch lbzip2 dash rsync coreutils bison"
+export CONTAINER_NAME=ci_i686_centos
+export DOCKER_NAME_TAG=quay.io/centos/centos:stream8
+export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python3-zmq which patch lbzip2 xz procps-ng dash rsync coreutils bison"
export GOAL="install"
export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports"
export CONFIG_SHELL="/bin/dash"
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index d70b993b99..c4f22c8f9e 100755
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -10,8 +10,8 @@ export CONTAINER_NAME=ci_macos_cross
export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos
export HOST=x86_64-apple-darwin
export PACKAGES="cmake libz-dev libtinfo5 python3-setuptools xorriso"
-export XCODE_VERSION=12.1
-export XCODE_BUILD_ID=12A7403
+export XCODE_VERSION=12.2
+export XCODE_BUILD_ID=12B45b
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index 44b6eb7ae3..6619852423 100755
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
@@ -13,4 +13,4 @@ export DPKG_ADD_ARCH="i386"
export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 wine32 file"
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
-export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --disable-external-signer"
+export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests"
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index 491a587b70..e409df62eb 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -55,21 +55,21 @@ else
echo "Running on host system without docker wrapper"
fi
-DOCKER_EXEC () {
+CI_EXEC () {
$DOCKER_CI_CMD_PREFIX bash -c "export PATH=$BASE_SCRATCH_DIR/bins/:\$PATH && cd \"$P_CI_DIR\" && $*"
}
-export -f DOCKER_EXEC
+export -f CI_EXEC
if [ -n "$DPKG_ADD_ARCH" ]; then
- DOCKER_EXEC dpkg --add-architecture "$DPKG_ADD_ARCH"
+ CI_EXEC dpkg --add-architecture "$DPKG_ADD_ARCH"
fi
-if [[ $DOCKER_NAME_TAG == centos* ]]; then
- ${CI_RETRY_EXE} DOCKER_EXEC dnf -y install epel-release
- ${CI_RETRY_EXE} DOCKER_EXEC dnf -y --allowerasing install "$DOCKER_PACKAGES" "$PACKAGES"
+if [[ $DOCKER_NAME_TAG == *centos* ]]; then
+ ${CI_RETRY_EXE} CI_EXEC dnf -y install epel-release
+ ${CI_RETRY_EXE} CI_EXEC dnf -y --allowerasing install "$DOCKER_PACKAGES" "$PACKAGES"
elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
- ${CI_RETRY_EXE} DOCKER_EXEC apt-get update
- ${CI_RETRY_EXE} DOCKER_EXEC apt-get install --no-install-recommends --no-upgrade -y "$PACKAGES" "$DOCKER_PACKAGES"
+ ${CI_RETRY_EXE} CI_EXEC apt-get update
+ ${CI_RETRY_EXE} CI_EXEC apt-get install --no-install-recommends --no-upgrade -y "$PACKAGES" "$DOCKER_PACKAGES"
if [ -n "$PIP_PACKAGES" ]; then
# shellcheck disable=SC2086
${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES
@@ -80,50 +80,50 @@ if [ "$CI_OS_NAME" == "macos" ]; then
top -l 1 -s 0 | awk ' /PhysMem/ {print}'
echo "Number of CPUs: $(sysctl -n hw.logicalcpu)"
else
- DOCKER_EXEC free -m -h
- DOCKER_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\)
- DOCKER_EXEC echo "$(lscpu | grep Endian)"
+ CI_EXEC free -m -h
+ CI_EXEC echo "Number of CPUs \(nproc\):" \$\(nproc\)
+ CI_EXEC echo "$(lscpu | grep Endian)"
fi
-DOCKER_EXEC echo "Free disk space:"
-DOCKER_EXEC df -h
+CI_EXEC echo "Free disk space:"
+CI_EXEC df -h
if [ "$RUN_FUZZ_TESTS" = "true" ]; then
export DIR_FUZZ_IN=${DIR_QA_ASSETS}/fuzz_seed_corpus/
if [ ! -d "$DIR_FUZZ_IN" ]; then
- DOCKER_EXEC git clone --depth=1 https://github.com/bitcoin-core/qa-assets "${DIR_QA_ASSETS}"
+ CI_EXEC git clone --depth=1 https://github.com/bitcoin-core/qa-assets "${DIR_QA_ASSETS}"
fi
elif [ "$RUN_UNIT_TESTS" = "true" ] || [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then
export DIR_UNIT_TEST_DATA=${DIR_QA_ASSETS}/unit_test_data/
if [ ! -d "$DIR_UNIT_TEST_DATA" ]; then
- DOCKER_EXEC mkdir -p "$DIR_UNIT_TEST_DATA"
- DOCKER_EXEC curl --location --fail https://github.com/bitcoin-core/qa-assets/raw/main/unit_test_data/script_assets_test.json -o "${DIR_UNIT_TEST_DATA}/script_assets_test.json"
+ CI_EXEC mkdir -p "$DIR_UNIT_TEST_DATA"
+ CI_EXEC curl --location --fail https://github.com/bitcoin-core/qa-assets/raw/main/unit_test_data/script_assets_test.json -o "${DIR_UNIT_TEST_DATA}/script_assets_test.json"
fi
fi
-DOCKER_EXEC mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/"
+CI_EXEC mkdir -p "${BASE_SCRATCH_DIR}/sanitizer-output/"
if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
- DOCKER_EXEC "update-alternatives --install /usr/bin/clang++ clang++ \$(which clang++-9) 100"
- DOCKER_EXEC "update-alternatives --install /usr/bin/clang clang \$(which clang-9) 100"
- DOCKER_EXEC "mkdir -p ${BASE_SCRATCH_DIR}/msan/build/"
- DOCKER_EXEC "git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-12.0.0 ${BASE_SCRATCH_DIR}/msan/llvm-project"
- DOCKER_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && cmake -DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi' -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_SANITIZER=Memory -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DLLVM_TARGETS_TO_BUILD=X86 ../llvm-project/llvm/"
- DOCKER_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && make $MAKEJOBS cxx"
+ CI_EXEC "update-alternatives --install /usr/bin/clang++ clang++ \$(which clang++-9) 100"
+ CI_EXEC "update-alternatives --install /usr/bin/clang clang \$(which clang-9) 100"
+ CI_EXEC "mkdir -p ${BASE_SCRATCH_DIR}/msan/build/"
+ CI_EXEC "git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-12.0.0 ${BASE_SCRATCH_DIR}/msan/llvm-project"
+ CI_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && cmake -DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi' -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_SANITIZER=Memory -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DLLVM_TARGETS_TO_BUILD=X86 ../llvm-project/llvm/"
+ CI_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && make $MAKEJOBS cxx"
fi
if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
echo "Create $BASE_ROOT_DIR"
- DOCKER_EXEC rsync -a /ro_base/ "$BASE_ROOT_DIR"
+ CI_EXEC rsync -a /ro_base/ "$BASE_ROOT_DIR"
fi
if [ "$USE_BUSY_BOX" = "true" ]; then
echo "Setup to use BusyBox utils"
- DOCKER_EXEC mkdir -p "${BASE_SCRATCH_DIR}/bins/"
+ CI_EXEC mkdir -p "${BASE_SCRATCH_DIR}/bins/"
# tar excluded for now because it requires passing in the exact archive type in ./depends (fixed in later BusyBox version)
# find excluded for now because it does not recognize the -delete option in ./depends (fixed in later BusyBox version)
# ar excluded for now because it does not recognize the -q option in ./depends (unknown if fixed)
# shellcheck disable=SC1010
- DOCKER_EXEC for util in \$\(busybox --list \| grep -v "^ar$" \| grep -v "^tar$" \| grep -v "^find$"\)\; do ln -s \$\(command -v busybox\) "${BASE_SCRATCH_DIR}/bins/\$util"\; done
+ CI_EXEC for util in \$\(busybox --list \| grep -v "^ar$" \| grep -v "^tar$" \| grep -v "^find$"\)\; do ln -s \$\(command -v busybox\) "${BASE_SCRATCH_DIR}/bins/\$util"\; done
# Print BusyBox version
- DOCKER_EXEC patch --help
+ CI_EXEC patch --help
fi
diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh
index 96d92573b9..1d67355d27 100755
--- a/ci/test/05_before_script.sh
+++ b/ci/test/05_before_script.sh
@@ -10,10 +10,10 @@ export LC_ALL=C.UTF-8
if [ "$CI_OS_NAME" == "macos" ]; then
echo > "${HOME}/Library/Application Support/Bitcoin"
else
- DOCKER_EXEC echo \> \$HOME/.bitcoin
+ CI_EXEC echo \> \$HOME/.bitcoin
fi
-DOCKER_EXEC mkdir -p "${DEPENDS_DIR}/SDKs" "${DEPENDS_DIR}/sdk-sources"
+CI_EXEC mkdir -p "${DEPENDS_DIR}/SDKs" "${DEPENDS_DIR}/sdk-sources"
OSX_SDK_BASENAME="Xcode-${XCODE_VERSION}-${XCODE_BUILD_ID}-extracted-SDK-with-libcxx-headers"
@@ -21,42 +21,42 @@ if [ -n "$XCODE_VERSION" ] && [ ! -d "${DEPENDS_DIR}/SDKs/${OSX_SDK_BASENAME}" ]
OSX_SDK_FILENAME="${OSX_SDK_BASENAME}.tar.gz"
OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_FILENAME}"
if [ ! -f "$OSX_SDK_PATH" ]; then
- DOCKER_EXEC curl --location --fail "${SDK_URL}/${OSX_SDK_FILENAME}" -o "$OSX_SDK_PATH"
+ CI_EXEC curl --location --fail "${SDK_URL}/${OSX_SDK_FILENAME}" -o "$OSX_SDK_PATH"
fi
- DOCKER_EXEC tar -C "${DEPENDS_DIR}/SDKs" -xf "$OSX_SDK_PATH"
+ CI_EXEC tar -C "${DEPENDS_DIR}/SDKs" -xf "$OSX_SDK_PATH"
fi
if [ -n "$ANDROID_HOME" ] && [ ! -d "$ANDROID_HOME" ]; then
ANDROID_TOOLS_PATH=${DEPENDS_DIR}/sdk-sources/android-tools.zip
if [ ! -f "$ANDROID_TOOLS_PATH" ]; then
- DOCKER_EXEC curl --location --fail "${ANDROID_TOOLS_URL}" -o "$ANDROID_TOOLS_PATH"
+ CI_EXEC curl --location --fail "${ANDROID_TOOLS_URL}" -o "$ANDROID_TOOLS_PATH"
fi
- DOCKER_EXEC mkdir -p "${ANDROID_HOME}/cmdline-tools"
- DOCKER_EXEC unzip -o "$ANDROID_TOOLS_PATH" -d "${ANDROID_HOME}/cmdline-tools"
- DOCKER_EXEC "yes | ${ANDROID_HOME}/cmdline-tools/tools/bin/sdkmanager --install \"build-tools;${ANDROID_BUILD_TOOLS_VERSION}\" \"platform-tools\" \"platforms;android-${ANDROID_API_LEVEL}\" \"ndk;${ANDROID_NDK_VERSION}\""
+ CI_EXEC mkdir -p "${ANDROID_HOME}/cmdline-tools"
+ CI_EXEC unzip -o "$ANDROID_TOOLS_PATH" -d "${ANDROID_HOME}/cmdline-tools"
+ CI_EXEC "yes | ${ANDROID_HOME}/cmdline-tools/tools/bin/sdkmanager --install \"build-tools;${ANDROID_BUILD_TOOLS_VERSION}\" \"platform-tools\" \"platforms;android-${ANDROID_API_LEVEL}\" \"ndk;${ANDROID_NDK_VERSION}\""
fi
if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
# Use BDB compiled using install_db4.sh script to work around linking issue when using BDB
# from depends. See https://github.com/bitcoin/bitcoin/pull/18288#discussion_r433189350 for
# details.
- DOCKER_EXEC "contrib/install_db4.sh \$(pwd) --enable-umrw CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
+ CI_EXEC "contrib/install_db4.sh \$(pwd) --enable-umrw CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
fi
if [[ $HOST = *-mingw32 ]]; then
- DOCKER_EXEC update-alternatives --set "${HOST}-g++" \$\(which "${HOST}-g++-posix"\)
+ CI_EXEC update-alternatives --set "${HOST}-g++" \$\(which "${HOST}-g++-posix"\)
fi
if [ -z "$NO_DEPENDS" ]; then
- if [[ $DOCKER_NAME_TAG == centos* ]]; then
+ if [[ $DOCKER_NAME_TAG == *centos* ]]; then
# CentOS has problems building the depends if the config shell is not explicitly set
# (i.e. for libevent a Makefile with an empty SHELL variable is generated, leading to
# an error as the first command is executed)
- SHELL_OPTS="LC_ALL=en_US.UTF-8 CONFIG_SHELL=/bin/bash"
+ SHELL_OPTS="LC_ALL=en_US.UTF-8 CONFIG_SHELL=/bin/dash"
else
SHELL_OPTS="CONFIG_SHELL="
fi
- DOCKER_EXEC "$SHELL_OPTS" make "$MAKEJOBS" -C depends HOST="$HOST" "$DEP_OPTS"
+ CI_EXEC "$SHELL_OPTS" make "$MAKEJOBS" -C depends HOST="$HOST" "$DEP_OPTS"
fi
if [ -n "$PREVIOUS_RELEASES_TO_DOWNLOAD" ]; then
- DOCKER_EXEC test/get_previous_releases.py -b -t "$PREVIOUS_RELEASES_DIR" "${PREVIOUS_RELEASES_TO_DOWNLOAD}"
+ CI_EXEC test/get_previous_releases.py -b -t "$PREVIOUS_RELEASES_DIR" "${PREVIOUS_RELEASES_TO_DOWNLOAD}"
fi
diff --git a/ci/test/06_script_a.sh b/ci/test/06_script_a.sh
index ff20d5bf3f..d5f1ca273a 100755
--- a/ci/test/06_script_a.sh
+++ b/ci/test/06_script_a.sh
@@ -7,10 +7,10 @@
export LC_ALL=C.UTF-8
if [ -n "$ANDROID_TOOLS_URL" ]; then
- DOCKER_EXEC make distclean || true
- DOCKER_EXEC ./autogen.sh
- DOCKER_EXEC ./configure "$BITCOIN_CONFIG" --prefix="${DEPENDS_DIR}/aarch64-linux-android" || ( (DOCKER_EXEC cat config.log) && false)
- DOCKER_EXEC "make $MAKEJOBS && cd src/qt && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk"
+ CI_EXEC make distclean || true
+ CI_EXEC ./autogen.sh
+ CI_EXEC ./configure "$BITCOIN_CONFIG" --prefix="${DEPENDS_DIR}/aarch64-linux-android" || ( (CI_EXEC cat config.log) && false)
+ CI_EXEC "make $MAKEJOBS && cd src/qt && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk"
exit 0
fi
@@ -18,38 +18,38 @@ BITCOIN_CONFIG_ALL="--enable-suppress-external-warnings --disable-dependency-tra
if [ -z "$NO_WERROR" ]; then
BITCOIN_CONFIG_ALL="${BITCOIN_CONFIG_ALL} --enable-werror"
fi
-DOCKER_EXEC "ccache --zero-stats --max-size=$CCACHE_SIZE"
+CI_EXEC "ccache --zero-stats --max-size=$CCACHE_SIZE"
if [ -n "$CONFIG_SHELL" ]; then
- DOCKER_EXEC "$CONFIG_SHELL" -c "./autogen.sh"
+ CI_EXEC "$CONFIG_SHELL" -c "./autogen.sh"
else
- DOCKER_EXEC ./autogen.sh
+ CI_EXEC ./autogen.sh
fi
-DOCKER_EXEC mkdir -p "${BASE_BUILD_DIR}"
+CI_EXEC mkdir -p "${BASE_BUILD_DIR}"
export P_CI_DIR="${BASE_BUILD_DIR}"
-DOCKER_EXEC "${BASE_ROOT_DIR}/configure" --cache-file=config.cache "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (DOCKER_EXEC cat config.log) && false)
+CI_EXEC "${BASE_ROOT_DIR}/configure" --cache-file=config.cache "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (CI_EXEC cat config.log) && false)
-DOCKER_EXEC make distdir VERSION="$HOST"
+CI_EXEC make distdir VERSION="$HOST"
export P_CI_DIR="${BASE_BUILD_DIR}/bitcoin-$HOST"
-DOCKER_EXEC ./configure --cache-file=../config.cache "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (DOCKER_EXEC cat config.log) && false)
+CI_EXEC ./configure --cache-file=../config.cache "$BITCOIN_CONFIG_ALL" "$BITCOIN_CONFIG" || ( (CI_EXEC cat config.log) && false)
set -o errtrace
-trap 'DOCKER_EXEC "cat ${BASE_SCRATCH_DIR}/sanitizer-output/* 2> /dev/null"' ERR
+trap 'CI_EXEC "cat ${BASE_SCRATCH_DIR}/sanitizer-output/* 2> /dev/null"' ERR
if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
# MemorySanitizer (MSAN) does not support tracking memory initialization done by
# using the Linux getrandom syscall. Avoid using getrandom by undefining
# HAVE_SYS_GETRANDOM. See https://github.com/google/sanitizers/issues/852 for
# details.
- DOCKER_EXEC 'grep -v HAVE_SYS_GETRANDOM src/config/bitcoin-config.h > src/config/bitcoin-config.h.tmp && mv src/config/bitcoin-config.h.tmp src/config/bitcoin-config.h'
+ CI_EXEC 'grep -v HAVE_SYS_GETRANDOM src/config/bitcoin-config.h > src/config/bitcoin-config.h.tmp && mv src/config/bitcoin-config.h.tmp src/config/bitcoin-config.h'
fi
-DOCKER_EXEC make "$MAKEJOBS" "$GOAL" || ( echo "Build failure. Verbose build follows." && DOCKER_EXEC make "$GOAL" V=1 ; false )
+CI_EXEC make "$MAKEJOBS" "$GOAL" || ( echo "Build failure. Verbose build follows." && CI_EXEC make "$GOAL" V=1 ; false )
-DOCKER_EXEC "ccache --version | head -n 1 && ccache --show-stats"
-DOCKER_EXEC du -sh "${DEPENDS_DIR}"/*/
-DOCKER_EXEC du -sh "${PREVIOUS_RELEASES_DIR}"
+CI_EXEC "ccache --version | head -n 1 && ccache --show-stats"
+CI_EXEC du -sh "${DEPENDS_DIR}"/*/
+CI_EXEC du -sh "${PREVIOUS_RELEASES_DIR}"
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index b7802b4710..de42aa6eb1 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -8,36 +8,36 @@ export LC_ALL=C.UTF-8
if [[ $HOST = *-mingw32 ]]; then
# Generate all binaries, so that they can be wrapped
- DOCKER_EXEC make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1
- DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh"
+ CI_EXEC make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1
+ CI_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh"
fi
if [ -n "$QEMU_USER_CMD" ]; then
# Generate all binaries, so that they can be wrapped
- DOCKER_EXEC make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1
- DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-qemu.sh"
+ CI_EXEC make "$MAKEJOBS" -C src/secp256k1 VERBOSE=1
+ CI_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-qemu.sh"
fi
if [ -n "$USE_VALGRIND" ]; then
- DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh"
+ CI_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-valgrind.sh"
fi
if [ "$RUN_UNIT_TESTS" = "true" ]; then
- DOCKER_EXEC "${TEST_RUNNER_ENV}" DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" make "$MAKEJOBS" check VERBOSE=1
+ CI_EXEC "${TEST_RUNNER_ENV}" DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" make "$MAKEJOBS" check VERBOSE=1
fi
if [ "$RUN_UNIT_TESTS_SEQUENTIAL" = "true" ]; then
- DOCKER_EXEC "${TEST_RUNNER_ENV}" DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" "${BASE_BUILD_DIR}/bitcoin-*/src/test/test_bitcoin*" --catch_system_errors=no -l test_suite
+ CI_EXEC "${TEST_RUNNER_ENV}" DIR_UNIT_TEST_DATA="${DIR_UNIT_TEST_DATA}" LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" "${BASE_BUILD_DIR}/bitcoin-*/src/test/test_bitcoin*" --catch_system_errors=no -l test_suite
fi
if [ "$RUN_FUNCTIONAL_TESTS" = "true" ]; then
- DOCKER_EXEC LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" "${TEST_RUNNER_ENV}" test/functional/test_runner.py --ci "$MAKEJOBS" --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" "${TEST_RUNNER_EXTRA}" --quiet --failfast
+ CI_EXEC LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" "${TEST_RUNNER_ENV}" test/functional/test_runner.py --ci "$MAKEJOBS" --tmpdirprefix "${BASE_SCRATCH_DIR}/test_runner/" --ansi --combinedlogslen=4000 --timeout-factor="${TEST_RUNNER_TIMEOUT_FACTOR}" "${TEST_RUNNER_EXTRA}" --quiet --failfast
fi
if [ "$RUN_SECURITY_TESTS" = "true" ]; then
- DOCKER_EXEC make test-security-check
+ CI_EXEC make test-security-check
fi
if [ "$RUN_FUZZ_TESTS" = "true" ]; then
- DOCKER_EXEC LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/fuzz/test_runner.py "${FUZZ_TESTS_CONFIG}" "$MAKEJOBS" -l DEBUG "${DIR_FUZZ_IN}"
+ CI_EXEC LD_LIBRARY_PATH="${DEPENDS_DIR}/${HOST}/lib" test/fuzz/test_runner.py "${FUZZ_TESTS_CONFIG}" "$MAKEJOBS" -l DEBUG "${DIR_FUZZ_IN}"
fi
diff --git a/configure.ac b/configure.ac
index 5a6b54a1ae..bef3973996 100644
--- a/configure.ac
+++ b/configure.ac
@@ -321,7 +321,7 @@ AC_ARG_ENABLE([werror],
AC_ARG_ENABLE([external-signer],
[AS_HELP_STRING([--enable-external-signer],[compile external signer support (default is yes, requires Boost::Process)])],
[use_external_signer=$enableval],
- [use_external_signer=yes])
+ [use_external_signer=auto])
AC_ARG_ENABLE([lto],
[AS_HELP_STRING([--enable-lto],[build using LTO (default is no)])],
@@ -1415,7 +1415,21 @@ if test "$use_boost" = "yes"; then
fi
if test "$use_external_signer" != "no"; then
- AC_DEFINE([ENABLE_EXTERNAL_SIGNER], [], [Define if external signer support is enabled])
+ case $host in
+ *mingw*)
+ dnl Boost Process uses Boost Filesystem when targeting Windows. Also,
+ dnl since Boost 1.71.0, Process does not work with mingw-w64 without
+ dnl workarounds. See 67669ab425b52a2b6be3d2f3b3b7e3939b676a2c.
+ if test "$use_external_signer" = "yes"; then
+ AC_MSG_ERROR([External signing is not supported on Windows])
+ fi
+ use_external_signer="no";
+ ;;
+ *)
+ use_external_signer="yes"
+ AC_DEFINE([ENABLE_EXTERNAL_SIGNER], [1], [Define if external signer support is enabled])
+ ;;
+ esac
fi
AM_CONDITIONAL([ENABLE_EXTERNAL_SIGNER], [test "$use_external_signer" = "yes"])
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 137fe377da..e6a29b73b9 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -111,6 +111,17 @@ def check_ELF_separate_code(binary):
return False
return True
+def check_ELF_control_flow(binary) -> bool:
+ '''
+ Check for control flow instrumentation
+ '''
+ main = binary.get_function_address('main')
+ content = binary.get_content_from_virtual_address(main, 4, lief.Binary.VA_TYPES.AUTO)
+
+ if content == [243, 15, 30, 250]: # endbr64
+ return True
+ return False
+
def check_PE_DYNAMIC_BASE(binary) -> bool:
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists
@@ -172,7 +183,7 @@ def check_NX(binary) -> bool:
'''
return binary.has_nx
-def check_control_flow(binary) -> bool:
+def check_MACHO_control_flow(binary) -> bool:
'''
Check for control flow instrumentation
'''
@@ -200,17 +211,14 @@ BASE_PE = [
]
BASE_MACHO = [
- ('PIE', check_PIE),
('NOUNDEFS', check_MACHO_NOUNDEFS),
- ('NX', check_NX),
('LAZY_BINDINGS', check_MACHO_LAZY_BINDINGS),
('Canary', check_MACHO_Canary),
- ('CONTROL_FLOW', check_control_flow),
]
CHECKS = {
lief.EXE_FORMATS.ELF: {
- lief.ARCHITECTURES.X86: BASE_ELF,
+ lief.ARCHITECTURES.X86: BASE_ELF + [('CONTROL_FLOW', check_ELF_control_flow)],
lief.ARCHITECTURES.ARM: BASE_ELF,
lief.ARCHITECTURES.ARM64: BASE_ELF,
lief.ARCHITECTURES.PPC: BASE_ELF,
@@ -220,7 +228,10 @@ CHECKS = {
lief.ARCHITECTURES.X86: BASE_PE,
},
lief.EXE_FORMATS.MACHO: {
- lief.ARCHITECTURES.X86: BASE_MACHO,
+ lief.ARCHITECTURES.X86: BASE_MACHO + [('PIE', check_PIE),
+ ('NX', check_NX),
+ ('CONTROL_FLOW', check_MACHO_control_flow)],
+ lief.ARCHITECTURES.ARM64: BASE_MACHO,
}
}
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 4b695b3530..461132ae63 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -229,7 +229,7 @@ def check_MACHO_min_os(binary) -> bool:
return False
def check_MACHO_sdk(binary) -> bool:
- if binary.build_version.sdk == [10, 15, 6]:
+ if binary.build_version.sdk == [11, 0, 0]:
return True
return False
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index 6b748e8743..d3d225f3ab 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -5,6 +5,7 @@
'''
Test script for security-check.py
'''
+import lief #type:ignore
import os
import subprocess
from typing import List
@@ -41,25 +42,49 @@ def call_security_check(cc, source, executable, options):
p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
return (p.returncode, p.stdout.rstrip())
+def get_arch(cc, source, executable):
+ subprocess.run([*cc, source, '-o', executable], check=True)
+ binary = lief.parse(executable)
+ arch = binary.abstract.header.architecture
+ os.remove(executable)
+ return arch
+
class TestSecurityChecks(unittest.TestCase):
def test_ELF(self):
source = 'test1.c'
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'gcc')
write_testcode(source)
+ arch = get_arch(cc, source, executable)
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE NX RELRO Canary'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE RELRO Canary'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE RELRO'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
- (1, executable+': failed RELRO'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
- (1, executable+': failed separate_code'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
- (0, ''))
+ if arch == lief.ARCHITECTURES.X86:
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE NX RELRO Canary CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO Canary CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed RELRO CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
+ (1, executable+': failed separate_code CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']),
+ (0, ''))
+ else:
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE NX RELRO Canary'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO Canary'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed RELRO'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
+ (1, executable+': failed separate_code'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
+ (0, ''))
clean_files(source, executable)
@@ -91,21 +116,34 @@ class TestSecurityChecks(unittest.TestCase):
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'clang')
write_testcode(source)
+ arch = get_arch(cc, source, executable)
+
+ if arch == lief.ARCHITECTURES.X86:
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary PIE NX CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE NX CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS PIE CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']),
+ (1, executable+': failed LAZY_BINDINGS PIE CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']),
+ (1, executable+': failed PIE CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
+ (1, executable+': failed PIE'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
+ (0, ''))
+ else:
+ # arm64 darwin doesn't support non-PIE binaries, control flow or executable stacks
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fno-stack-protector']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS Canary'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-flat_namespace','-fstack-protector-all']),
+ (1, executable+': failed NOUNDEFS LAZY_BINDINGS'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-fstack-protector-all']),
+ (1, executable+': failed LAZY_BINDINGS'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-bind_at_load','-fstack-protector-all']),
+ (0, ''))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fno-stack-protector']),
- (1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS Canary CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-Wl,-allow_stack_execute','-fstack-protector-all']),
- (1, executable+': failed PIE NOUNDEFS NX LAZY_BINDINGS CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-flat_namespace','-fstack-protector-all']),
- (1, executable+': failed PIE NOUNDEFS LAZY_BINDINGS CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-fstack-protector-all']),
- (1, executable+': failed PIE LAZY_BINDINGS CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all']),
- (1, executable+': failed PIE CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-no_pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
- (1, executable+': failed PIE'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-pie','-Wl,-bind_at_load','-fstack-protector-all', '-fcf-protection=full']),
- (0, ''))
clean_files(source, executable)
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index 7cfa0de70f..90289f9d40 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -224,7 +224,7 @@ details.
_(defaults to "x86\_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu
riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu
- x86\_64-w64-mingw32 x86\_64-apple-darwin")_
+ x86\_64-w64-mingw32 x86\_64-apple-darwin arm64-apple-darwin")_
* _**SOURCES_PATH**_
@@ -249,7 +249,7 @@ details.
Set the path where _extracted_ SDKs can be found. This is passed through to
the depends tree. Note that this is should be set to the _parent_ directory of
the actual SDK (e.g. `SDK_PATH=$HOME/Downloads/macOS-SDKs` instead of
- `$HOME/Downloads/macOS-SDKs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers`).
+ `$HOME/Downloads/macOS-SDKs/Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers`).
The path that this environment variable points to **must be a directory**, and
**NOT a symlink to a directory**.
diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build
index 98381f3e24..bfffbda742 100755
--- a/contrib/guix/guix-build
+++ b/contrib/guix/guix-build
@@ -76,7 +76,7 @@ mkdir -p "$VERSION_BASE"
# Default to building for all supported HOSTs (overridable by environment)
export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu
x86_64-w64-mingw32
- x86_64-apple-darwin}"
+ x86_64-apple-darwin arm64-apple-darwin}"
# Usage: distsrc_for_host HOST
#
@@ -239,7 +239,7 @@ SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --f
time-machine() {
# shellcheck disable=SC2086
guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \
- --commit=6ba510d76d6847065be725e958718002f3b13c7a \
+ --commit=1ef7a03a148cf5f83ab1820444f6bd50d8e732d1 \
--cores="$JOBS" \
--keep-failed \
--fallback \
diff --git a/contrib/guix/guix-codesign b/contrib/guix/guix-codesign
index e52ad30b8d..2dd30bfa64 100755
--- a/contrib/guix/guix-codesign
+++ b/contrib/guix/guix-codesign
@@ -91,7 +91,7 @@ fi
################
# Default to building for all supported HOSTs (overridable by environment)
-export HOSTS="${HOSTS:-x86_64-w64-mingw32 x86_64-apple-darwin}"
+export HOSTS="${HOSTS:-x86_64-w64-mingw32 x86_64-apple-darwin arm64-apple-darwin}"
# Usage: distsrc_for_host HOST
#
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index 79a3e77f40..e06a469338 100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -377,7 +377,7 @@ mkdir -p "$DISTSRC"
{
find "${DISTNAME}/bin" -type f -executable -print0
find "${DISTNAME}/lib" -type f -print0
- } | xargs -0 -n1 -P"$JOBS" -I{} "${DISTSRC}/contrib/devtools/split-debug.sh" {} {} {}.dbg
+ } | xargs -0 -P"$JOBS" -I{} "${DISTSRC}/contrib/devtools/split-debug.sh" {} {} {}.dbg
;;
esac
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 3528030bec..22b922dc9b 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -397,6 +397,11 @@ thus should be able to compile on most platforms where these exist.")
(string-append indent
"@unittest.skip(\"Disabled by Guix\")\n"
line)))
+ (substitute* "tests/test_validate.py"
+ (("^(.*)def test_revocation_mode_soft" line indent)
+ (string-append indent
+ "@unittest.skip(\"Disabled by Guix\")\n"
+ line)))
#t))
(replace 'check
(lambda _
@@ -574,7 +579,7 @@ inspecting signatures in Mach-O binaries.")
;; Build tools
gnu-make
libtool
- autoconf
+ autoconf-2.71
automake
pkg-config
bison
diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md
index a685aac1c0..df7eac3ec8 100644
--- a/contrib/macdeploy/README.md
+++ b/contrib/macdeploy/README.md
@@ -13,13 +13,13 @@ When complete, it will have produced `Bitcoin-Core.dmg`.
### Step 1: Obtaining `Xcode.app`
Our current macOS SDK
-(`Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`) can be
+(`Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz`) can be
extracted from
-[Xcode_12.1.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip).
+[Xcode_12.2.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.2/Xcode_12.2.xip).
Alternatively, after logging in to your account go to 'Downloads', then 'More'
-and look for [`Xcode_12.1`](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip).
+and look for [`Xcode_12.2`](https://download.developer.apple.com/Developer_Tools/Xcode_12.2/Xcode_12.2.xip).
An Apple ID and cookies enabled for the hostname are needed to download this.
-The `sha256sum` of the archive should be `612443b1894b39368a596ea1607f30cbb0481ad44d5e29c75edb71a6d2cf050f`.
+The `sha256sum` of the archive should be `28d352f8c14a43d9b8a082ac6338dc173cb153f964c6e8fb6ba389e5be528bd0`.
After Xcode version 7.x, Apple started shipping the `Xcode.app` in a `.xip`
archive. This makes the SDK less-trivial to extract on non-macOS machines. One
@@ -30,25 +30,25 @@ approach (tested on Debian Buster) is outlined below:
apt install cpio
git clone https://github.com/bitcoin-core/apple-sdk-tools.git
-# Unpack Xcode_12.1.xip and place the resulting Xcode.app in your current
+# Unpack Xcode_12.2.xip and place the resulting Xcode.app in your current
# working directory
-python3 apple-sdk-tools/extract_xcode.py -f Xcode_12.1.xip | cpio -d -i
+python3 apple-sdk-tools/extract_xcode.py -f Xcode_12.2.xip | cpio -d -i
```
On macOS the process is more straightforward:
```bash
-xip -x Xcode_12.1.xip
+xip -x Xcode_12.2.xip
```
-### Step 2: Generating `Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app`
+### Step 2: Generating `Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app`
-To generate `Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`, run
+To generate `Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz`, run
the script [`gen-sdk`](./gen-sdk) with the path to `Xcode.app` (extracted in the
previous stage) as the first argument.
```bash
-# Generate a Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz from
+# Generate a Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers.tar.gz from
# the supplied Xcode.app
./contrib/macdeploy/gen-sdk '/path/to/Xcode.app'
```
diff --git a/depends/Makefile b/depends/Makefile
index d2a3c35f1e..73e2af5501 100644
--- a/depends/Makefile
+++ b/depends/Makefile
@@ -137,7 +137,7 @@ include packages/packages.mk
build_id:=$(shell env CC='$(build_CC)' CXX='$(build_CXX)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))')
$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' CXX='$(host_CXX)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))')
-qrencode_packages_$(NO_QR) = $(qrencode_packages)
+qrencode_packages_$(NO_QR) = $(qrencode_$(host_os)_packages)
qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages) $(qrencode_packages_)
diff --git a/depends/README.md b/depends/README.md
index 9f0b60adf8..6b6af99dee 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -29,6 +29,7 @@ Common `host-platform-triplet`s for cross compilation are:
- `x86_64-pc-linux-gnu` for x86 Linux
- `x86_64-w64-mingw32` for Win64
- `x86_64-apple-darwin` for macOS
+- `arm64-apple-darwin` for ARM macOS
- `arm-linux-gnueabihf` for Linux ARM 32 bit
- `aarch64-linux-gnu` for Linux ARM 64 bit
- `powerpc64-linux-gnu` for Linux POWER 64-bit (big endian)
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index ea92bb7793..6bf30b499a 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -1,7 +1,7 @@
OSX_MIN_VERSION=10.15
-OSX_SDK_VERSION=10.15.6
-XCODE_VERSION=12.1
-XCODE_BUILD_ID=12A7403
+OSX_SDK_VERSION=11.0
+XCODE_VERSION=12.2
+XCODE_BUILD_ID=12B45b
LD64_VERSION=609
OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index 5fe2b2bbb8..fe2425ffaf 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -26,8 +26,7 @@ $(package)_config_libraries=filesystem,system,test
$(package)_cxxflags+=-std=c++17
$(package)_cxxflags_linux=-fPIC
$(package)_cxxflags_android=-fPIC
-$(package)_cxxflags_x86_64_darwin=-fcf-protection=full
-$(package)_cxxflags_mingw32=-fcf-protection=full
+$(package)_cxxflags_x86_64=-fcf-protection=full
endef
define $(package)_preprocess_cmds
diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk
index 77866c8e7a..4c66b3bdb9 100644
--- a/depends/packages/packages.mk
+++ b/depends/packages/packages.mk
@@ -1,10 +1,12 @@
packages:=boost libevent
-qrencode_packages = qrencode
+qrencode_linux_packages = qrencode
+qrencode_android_packages = qrencode
+qrencode_darwin_packages = qrencode
+qrencode_mingw32_packages = qrencode
qt_linux_packages:=qt expat libxcb xcb_proto libXau xproto freetype fontconfig libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm
qt_android_packages=qt
-
qt_darwin_packages=qt
qt_mingw32_packages=qt
diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk
index 9798248c61..f5478a88c4 100644
--- a/depends/packages/zeromq.mk
+++ b/depends/packages/zeromq.mk
@@ -1,12 +1,13 @@
package=zeromq
-$(package)_version=4.3.1
+$(package)_version=4.3.4
$(package)_download_path=https://github.com/zeromq/libzmq/releases/download/v$($(package)_version)/
$(package)_file_name=$(package)-$($(package)_version).tar.gz
-$(package)_sha256_hash=bcbabe1e2c7d0eec4ed612e10b94b112dd5f06fcefa994a0c79a45d835cd21eb
-$(package)_patches=remove_libstd_link.patch
+$(package)_sha256_hash=c593001a89f5a85dd2ddf564805deb860e02471171b3f204944857336295c3e5
+$(package)_patches=remove_libstd_link.patch netbsd_kevent_void.patch
define $(package)_set_vars
- $(package)_config_opts=--without-docs --disable-shared --disable-curve --disable-curve-keygen --disable-perf
+ $(package)_config_opts = --without-docs --disable-shared --disable-valgrind
+ $(package)_config_opts += --disable-perf --disable-curve-keygen --disable-curve --disable-libbsd
$(package)_config_opts += --without-libsodium --without-libgssapi_krb5 --without-pgm --without-norm --without-vmci
$(package)_config_opts += --disable-libunwind --disable-radix-tree --without-gcov --disable-dependency-tracking
$(package)_config_opts += --disable-Werror --disable-drafts --enable-option-checking
@@ -17,10 +18,12 @@ endef
define $(package)_preprocess_cmds
patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch && \
+ patch -p1 < $($(package)_patch_dir)/netbsd_kevent_void.patch && \
cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config
endef
define $(package)_config_cmds
+ ./autogen.sh && \
$($(package)_autoconf)
endef
diff --git a/depends/patches/zeromq/netbsd_kevent_void.patch b/depends/patches/zeromq/netbsd_kevent_void.patch
new file mode 100644
index 0000000000..845c6bdda6
--- /dev/null
+++ b/depends/patches/zeromq/netbsd_kevent_void.patch
@@ -0,0 +1,57 @@
+commit 129137d5182967dbfcfec66bad843df2a992a78f
+Author: fanquake <fanquake@gmail.com>
+Date: Mon Jan 3 20:13:33 2022 +0800
+
+ problem: kevent udata is now void* on NetBSD Current (10)
+
+ solution: check for the intptr_t variant in configure.
+
+diff --git a/configure.ac b/configure.ac
+index 1a571291..402f8b86 100644
+--- a/configure.ac
++++ b/configure.ac
+@@ -307,6 +307,27 @@ case "${host_os}" in
+ if test "x$libzmq_netbsd_has_atomic" = "xno"; then
+ AC_DEFINE(ZMQ_FORCE_MUTEXES, 1, [Force to use mutexes])
+ fi
++ # NetBSD Current (to become 10) has changed the type of udata in it's
++ # kevent struct from intptr_t to void * to align with darwin and other
++ # BSDs, see upstream commit:
++ # https://github.com/NetBSD/src/commit/e5ead823eb916b56589d2c6c560dbcfe4a2d0afc
++ AC_MSG_CHECKING([whether kevent udata type is intptr_t])
++ AC_LANG_PUSH([C++])
++ AC_LINK_IFELSE([AC_LANG_PROGRAM(
++ [[#include <sys/types.h>
++ #include <sys/event.h>
++ #include <sys/time.h>]],
++ [[struct kevent ev;
++ intptr_t udata;
++ EV_SET(&ev, 0, 0, EV_ADD, 0, 0, udata);
++ return 0;]])],
++ [libzmq_netbsd_kevent_udata_intptr_t=yes],
++ [libzmq_netbsd_kevent_udata_intptr_t=no])
++ AC_LANG_POP([C++])
++ AC_MSG_RESULT([$libzmq_netbsd_kevent_udata_intptr_t])
++ if test "x$libzmq_netbsd_kevent_udata_intptr_t" = "xyes"; then
++ AC_DEFINE(ZMQ_NETBSD_KEVENT_UDATA_INTPTR_T, 1, [kevent udata type is intptr_t])
++ fi
+ ;;
+ *openbsd*|*bitrig*)
+ # Define on OpenBSD to enable all library features
+diff --git a/src/kqueue.cpp b/src/kqueue.cpp
+index 53d82ac4..a6a7a7f2 100644
+--- a/src/kqueue.cpp
++++ b/src/kqueue.cpp
+@@ -46,9 +46,9 @@
+ #include "i_poll_events.hpp"
+ #include "likely.hpp"
+
+-// NetBSD defines (struct kevent).udata as intptr_t, everyone else
+-// as void *.
+-#if defined ZMQ_HAVE_NETBSD
++// NetBSD up to version 9 defines (struct kevent).udata as intptr_t,
++// everyone else as void *.
++#if defined ZMQ_HAVE_NETBSD && defined(ZMQ_NETBSD_KEVENT_UDATA_INTPTR_T)
+ #define kevent_udata_t intptr_t
+ #else
+ #define kevent_udata_t void *
diff --git a/doc/REST-interface.md b/doc/REST-interface.md
index 51a73b89fc..1f0a07a284 100644
--- a/doc/REST-interface.md
+++ b/doc/REST-interface.md
@@ -108,6 +108,7 @@ $ curl localhost:18332/rest/getutxos/checkmempool/b2cdfd7b89def827ff8af7cd9bff76
"value" : 8.8687,
"scriptPubKey" : {
"asm" : "OP_DUP OP_HASH160 1c7cebb529b86a04c683dfa87be49de35bcf589e OP_EQUALVERIFY OP_CHECKSIG",
+ "desc" : "addr(mi7as51dvLJsizWnTMurtRmrP8hG2m1XvD)#gj9tznmy"
"hex" : "76a9141c7cebb529b86a04c683dfa87be49de35bcf589e88ac",
"type" : "pubkeyhash",
"address" : "mi7as51dvLJsizWnTMurtRmrP8hG2m1XvD"
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 490ffd3c00..63315cdcc2 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -20,7 +20,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| PCRE | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) |
| Python (tests) | | [3.6](https://www.python.org/downloads) | | | |
| qrencode | [3.4.4](https://fukuchi.org/works/qrencode) | | No | | |
-| Qt | [5.12.11](https://download.qt.io/official_releases/qt/) | [5.9.5](https://github.com/bitcoin/bitcoin/issues/20104) | No | | |
+| Qt | [5.15.2](https://download.qt.io/official_releases/qt/) | [5.9.5](https://github.com/bitcoin/bitcoin/issues/20104) | No | | |
| SQLite | [3.32.1](https://sqlite.org/download.html) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | | | |
| XCB | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) |
| systemtap ([tracing](tracing.md))| [4.5](https://sourceware.org/systemtap/ftp/releases/) | | | | |
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index 73d04837f1..9abfbc9213 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -71,6 +71,15 @@ block^@M-^?M-^?M-^?M-^?M-^?nM-^?M-^?
In this case the fuzzer managed to create a `block` message which when passed to `ProcessMessage(...)` increased coverage.
+It is possible to specify `bitcoind` arguments to the `fuzz` executable.
+Depending on the test, they may be ignored or consumed and alter the behavior
+of the test. Just make sure to use double-dash to distinguish them from the
+fuzzer's own arguments:
+
+```sh
+$ FUZZ=address_deserialize_v2 src/test/fuzz/fuzz -runs=1 fuzz_seed_corpus/address_deserialize_v2 --checkaddrman=5 --printtoconsole=1
+```
+
## Fuzzing corpora
The project's collection of seed corpora is found in the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) repo.
diff --git a/doc/release-notes-23508.md b/doc/release-notes-23508.md
new file mode 100644
index 0000000000..098654e00b
--- /dev/null
+++ b/doc/release-notes-23508.md
@@ -0,0 +1,9 @@
+Updated RPCs
+------------
+
+- Information on soft fork status has been moved from `getblockchaininfo`
+ to `getdeploymentinfo` which allows querying soft fork status at any
+ block, rather than just at the chain tip. Inclusion of soft fork
+ status in `getblockchaininfo` can currently be restored using the
+ configuration `-deprecatedrpc=softforks`, but this will be removed in
+ a future release. (#23508)
diff --git a/doc/tor.md b/doc/tor.md
index 8dc82ca91e..d23d8a1810 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -40,9 +40,11 @@ outgoing connections, but more is possible.
-onion=ip:port Set the proxy server to use for Tor onion services. You do not
need to set this if it's the same as -proxy. You can use -onion=0
to explicitly disable access to onion services.
+ ------------------------------------------------------------------
Note: Only the -proxy option sets the proxy for DNS requests;
with -onion they will not route over Tor, so use -proxy if you
have privacy concerns.
+ ------------------------------------------------------------------
-listen When using -proxy, listening is disabled by default. If you want
to manually configure an onion service (see section 3), you'll
diff --git a/share/setup.nsi.in b/share/setup.nsi.in
index 85ae7c57af..c7b149345c 100644
--- a/share/setup.nsi.in
+++ b/share/setup.nsi.in
@@ -53,7 +53,7 @@ Var StartMenuGroup
# Installer attributes
InstallDir $PROGRAMFILES64\Bitcoin
-CRCCheck on
+CRCCheck force
XPStyle on
BrandingText " "
ShowInstDetails show
diff --git a/src/banman.cpp b/src/banman.cpp
index 95b927c1ff..b28e3f7f7c 100644
--- a/src/banman.cpp
+++ b/src/banman.cpp
@@ -7,6 +7,7 @@
#include <netaddress.h>
#include <node/ui_interface.h>
+#include <sync.h>
#include <util/system.h>
#include <util/time.h>
#include <util/translation.h>
@@ -39,18 +40,23 @@ BanMan::~BanMan()
void BanMan::DumpBanlist()
{
- SweepBanned(); // clean unused entries (if bantime has expired)
-
- if (!BannedSetIsDirty()) return;
-
- int64_t n_start = GetTimeMillis();
+ static Mutex dump_mutex;
+ LOCK(dump_mutex);
banmap_t banmap;
- GetBanned(banmap);
- if (m_ban_db.Write(banmap)) {
+ {
+ LOCK(m_cs_banned);
+ SweepBanned();
+ if (!BannedSetIsDirty()) return;
+ banmap = m_banned;
SetBannedSetDirty(false);
}
+ int64_t n_start = GetTimeMillis();
+ if (!m_ban_db.Write(banmap)) {
+ SetBannedSetDirty(true);
+ }
+
LogPrint(BCLog::NET, "Flushed %d banned node addresses/subnets to disk %dms\n", banmap.size(),
GetTimeMillis() - n_start);
}
diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp
index 2d94e835f0..3ca58b923e 100644
--- a/src/bench/addrman.cpp
+++ b/src/bench/addrman.cpp
@@ -16,6 +16,9 @@
static constexpr size_t NUM_SOURCES = 64;
static constexpr size_t NUM_ADDRESSES_PER_SOURCE = 256;
+static const std::vector<bool> EMPTY_ASMAP;
+static constexpr uint32_t ADDRMAN_CONSISTENCY_CHECK_RATIO{0};
+
static std::vector<CAddress> g_sources;
static std::vector<std::vector<CAddress>> g_addresses;
@@ -74,14 +77,14 @@ static void AddrManAdd(benchmark::Bench& bench)
CreateAddresses();
bench.run([&] {
- AddrMan addrman{/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0};
+ AddrMan addrman{EMPTY_ASMAP, /*deterministic=*/false, ADDRMAN_CONSISTENCY_CHECK_RATIO};
AddAddressesToAddrMan(addrman);
});
}
static void AddrManSelect(benchmark::Bench& bench)
{
- AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ AddrMan addrman{EMPTY_ASMAP, /*deterministic=*/false, ADDRMAN_CONSISTENCY_CHECK_RATIO};
FillAddrMan(addrman);
@@ -93,7 +96,7 @@ static void AddrManSelect(benchmark::Bench& bench)
static void AddrManGetAddr(benchmark::Bench& bench)
{
- AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ AddrMan addrman{EMPTY_ASMAP, /*deterministic=*/false, ADDRMAN_CONSISTENCY_CHECK_RATIO};
FillAddrMan(addrman);
@@ -122,7 +125,7 @@ static void AddrManAddThenGood(benchmark::Bench& bench)
//
// This has some overhead (exactly the result of AddrManAdd benchmark), but that overhead is constant so improvements in
// AddrMan::Good() will still be noticeable.
- AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ AddrMan addrman{EMPTY_ASMAP, /*deterministic=*/false, ADDRMAN_CONSISTENCY_CHECK_RATIO};
AddAddressesToAddrMan(addrman);
markSomeAsGood(addrman);
diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp
index f696396e12..9bd176f0a0 100644
--- a/src/bench/bench.cpp
+++ b/src/bench/bench.cpp
@@ -4,10 +4,10 @@
#include <bench/bench.h>
+#include <fs.h>
#include <test/util/setup_common.h>
#include <chrono>
-#include <fstream>
#include <functional>
#include <iostream>
#include <map>
@@ -19,6 +19,8 @@ using namespace std::chrono_literals;
const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
+const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS{};
+
namespace {
void GenerateTemplateResults(const std::vector<ankerl::nanobench::Result>& benchmarkResults, const std::string& filename, const char* tpl)
@@ -27,7 +29,7 @@ void GenerateTemplateResults(const std::vector<ankerl::nanobench::Result>& bench
// nothing to write, bail out
return;
}
- std::ofstream fout(filename);
+ fsbridge::ofstream fout{fs::PathFromString(filename)};
if (fout.is_open()) {
ankerl::nanobench::render(tpl, benchmarkResults, fout);
} else {
diff --git a/src/bench/checkblock.cpp b/src/bench/checkblock.cpp
index a9f3f5f84d..52e5cb743f 100644
--- a/src/bench/checkblock.cpp
+++ b/src/bench/checkblock.cpp
@@ -17,8 +17,8 @@
static void DeserializeBlockTest(benchmark::Bench& bench)
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
- char a = '\0';
- stream.write(&a, 1); // Prevent compaction
+ std::byte a{0};
+ stream.write({&a, 1}); // Prevent compaction
bench.unit("block").run([&] {
CBlock block;
@@ -31,8 +31,8 @@ static void DeserializeBlockTest(benchmark::Bench& bench)
static void DeserializeAndCheckBlockTest(benchmark::Bench& bench)
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
- char a = '\0';
- stream.write(&a, 1); // Prevent compaction
+ std::byte a{0};
+ stream.write({&a, 1}); // Prevent compaction
ArgsManager bench_args;
const auto chainParams = CreateChainParams(bench_args, CBaseChainParams::MAIN);
diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp
index 9bc31461d2..2143bcf950 100644
--- a/src/bench/rpc_blockchain.cpp
+++ b/src/bench/rpc_blockchain.cpp
@@ -23,8 +23,8 @@ struct TestBlockAndIndex {
TestBlockAndIndex()
{
CDataStream stream(benchmark::data::block413567, SER_NETWORK, PROTOCOL_VERSION);
- char a = '\0';
- stream.write(&a, 1); // Prevent compaction
+ std::byte a{0};
+ stream.write({&a, 1}); // Prevent compaction
stream >> block;
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index edec883264..ec07114d6e 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -12,6 +12,7 @@
#include <consensus/consensus.h>
#include <core_io.h>
#include <key_io.h>
+#include <fs.h>
#include <policy/policy.h>
#include <policy/rbf.h>
#include <primitives/transaction.h>
@@ -158,7 +159,7 @@ static void RegisterLoad(const std::string& strInput)
std::string key = strInput.substr(0, pos);
std::string filename = strInput.substr(pos + 1, std::string::npos);
- FILE *f = fopen(filename.c_str(), "r");
+ FILE *f = fsbridge::fopen(filename.c_str(), "r");
if (!f) {
std::string strErr = "Cannot open file " + filename;
throw std::runtime_error(strErr);
@@ -433,13 +434,16 @@ static void MutateTxAddOutData(CMutableTransaction& tx, const std::string& strIn
if (pos==0)
throw std::runtime_error("TX output value not specified");
- if (pos != std::string::npos) {
+ if (pos == std::string::npos) {
+ pos = 0;
+ } else {
// Extract and validate VALUE
value = ExtractAndValidateValue(strInput.substr(0, pos));
+ ++pos;
}
// extract and validate DATA
- std::string strData = strInput.substr(pos + 1, std::string::npos);
+ const std::string strData{strInput.substr(pos, std::string::npos)};
if (!IsHex(strData))
throw std::runtime_error("invalid TX output data");
diff --git a/src/chain.cpp b/src/chain.cpp
index e0c29372dd..b8158f7b0b 100644
--- a/src/chain.cpp
+++ b/src/chain.cpp
@@ -151,7 +151,7 @@ int64_t GetBlockProofEquivalentTime(const CBlockIndex& to, const CBlockIndex& fr
if (r.bits() > 63) {
return sign * std::numeric_limits<int64_t>::max();
}
- return sign * r.GetLow64();
+ return sign * int64_t(r.GetLow64());
}
/** Find the last common ancestor two blocks have.
diff --git a/src/chain.h b/src/chain.h
index 55bdf4cd56..24b5026aba 100644
--- a/src/chain.h
+++ b/src/chain.h
@@ -10,6 +10,7 @@
#include <consensus/params.h>
#include <flatfile.h>
#include <primitives/block.h>
+#include <sync.h>
#include <tinyformat.h>
#include <uint256.h>
@@ -37,6 +38,8 @@ static constexpr int64_t TIMESTAMP_WINDOW = MAX_FUTURE_BLOCK_TIME;
*/
static constexpr int64_t MAX_BLOCK_TIME_GAP = 90 * 60;
+extern RecursiveMutex cs_main;
+
class CBlockFileInfo
{
public:
@@ -161,13 +164,13 @@ public:
int nHeight{0};
//! Which # file this block is stored in (blk?????.dat)
- int nFile{0};
+ int nFile GUARDED_BY(::cs_main){0};
//! Byte offset within blk?????.dat where this block's data is stored
- unsigned int nDataPos{0};
+ unsigned int nDataPos GUARDED_BY(::cs_main){0};
//! Byte offset within rev?????.dat where this block's undo data is stored
- unsigned int nUndoPos{0};
+ unsigned int nUndoPos GUARDED_BY(::cs_main){0};
//! (memory only) Total amount of work (expected number of hashes) in the chain up to and including this block
arith_uint256 nChainWork{};
@@ -195,7 +198,7 @@ public:
//! load to avoid the block index being spuriously rewound.
//! @sa NeedsRedownload
//! @sa ActivateSnapshot
- uint32_t nStatus{0};
+ uint32_t nStatus GUARDED_BY(::cs_main){0};
//! block header
int32_t nVersion{0};
@@ -223,8 +226,9 @@ public:
{
}
- FlatFilePos GetBlockPos() const
+ FlatFilePos GetBlockPos() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
FlatFilePos ret;
if (nStatus & BLOCK_HAVE_DATA) {
ret.nFile = nFile;
@@ -233,8 +237,9 @@ public:
return ret;
}
- FlatFilePos GetUndoPos() const
+ FlatFilePos GetUndoPos() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
FlatFilePos ret;
if (nStatus & BLOCK_HAVE_UNDO) {
ret.nFile = nFile;
@@ -306,7 +311,9 @@ public:
//! Check whether this block index entry is valid up to the passed validity level.
bool IsValid(enum BlockStatus nUpTo = BLOCK_VALID_TRANSACTIONS) const
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed.
if (nStatus & BLOCK_FAILED_MASK)
return false;
@@ -315,12 +322,17 @@ public:
//! @returns true if the block is assumed-valid; this means it is queued to be
//! validated by a background chainstate.
- bool IsAssumedValid() const { return nStatus & BLOCK_ASSUMED_VALID; }
+ bool IsAssumedValid() const EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
+ {
+ AssertLockHeld(::cs_main);
+ return nStatus & BLOCK_ASSUMED_VALID;
+ }
//! Raise the validity level of this block index entry.
//! Returns true if the validity was changed.
- bool RaiseValidity(enum BlockStatus nUpTo)
+ bool RaiseValidity(enum BlockStatus nUpTo) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
assert(!(nUpTo & ~BLOCK_VALID_MASK)); // Only validity flags allowed.
if (nStatus & BLOCK_FAILED_MASK) return false;
@@ -370,6 +382,7 @@ public:
SERIALIZE_METHODS(CDiskBlockIndex, obj)
{
+ LOCK(::cs_main);
int _nVersion = s.GetVersion();
if (!(s.GetType() & SER_GETHASH)) READWRITE(VARINT_MODE(_nVersion, VarIntMode::NONNEGATIVE_SIGNED));
@@ -462,7 +475,7 @@ public:
/** Return the maximal height in the chain. Is equal to chain.Tip() ? chain.Tip()->nHeight : -1. */
int Height() const
{
- return vChain.size() - 1;
+ return int(vChain.size()) - 1;
}
/** Set/initialize a chain with a given tip. */
diff --git a/src/common/bloom.cpp b/src/common/bloom.cpp
index 0bb72dbcbb..c744d05a0e 100644
--- a/src/common/bloom.cpp
+++ b/src/common/bloom.cpp
@@ -62,7 +62,7 @@ void CBloomFilter::insert(const COutPoint& outpoint)
{
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << outpoint;
- insert(stream);
+ insert(MakeUCharSpan(stream));
}
bool CBloomFilter::contains(Span<const unsigned char> vKey) const
@@ -83,7 +83,7 @@ bool CBloomFilter::contains(const COutPoint& outpoint) const
{
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
stream << outpoint;
- return contains(stream);
+ return contains(MakeUCharSpan(stream));
}
bool CBloomFilter::IsWithinSizeConstraints() const
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 067f1e4f4e..5ea62cf3ed 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -8,6 +8,7 @@
#include <consensus/consensus.h>
#include <consensus/validation.h>
#include <key_io.h>
+#include <script/descriptor.h>
#include <script/script.h>
#include <script/standard.h>
#include <serialize.h>
@@ -152,6 +153,7 @@ void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool include
CTxDestination address;
out.pushKV("asm", ScriptToAsmStr(scriptPubKey));
+ out.pushKV("desc", InferDescriptor(scriptPubKey, DUMMY_SIGNING_PROVIDER)->ToString());
if (include_hex) out.pushKV("hex", HexStr(scriptPubKey));
std::vector<std::vector<unsigned char>> solns;
diff --git a/src/crypto/chacha_poly_aead.cpp b/src/crypto/chacha_poly_aead.cpp
index 19087b7d75..4f3e6f7fa3 100644
--- a/src/crypto/chacha_poly_aead.cpp
+++ b/src/crypto/chacha_poly_aead.cpp
@@ -73,7 +73,7 @@ bool ChaCha20Poly1305AEAD::Crypt(uint64_t seqnr_payload, uint64_t seqnr_aad, int
return false;
}
memory_cleanse(expected_tag, sizeof(expected_tag));
- // MAC has been successfully verified, make sure we don't covert it in decryption
+ // MAC has been successfully verified, make sure we don't convert it in decryption
src_len -= POLY1305_TAGLEN;
}
diff --git a/src/dbwrapper.h b/src/dbwrapper.h
index 12db0fffcc..1109cb5888 100644
--- a/src/dbwrapper.h
+++ b/src/dbwrapper.h
@@ -147,7 +147,7 @@ public:
template<typename K> bool GetKey(K& key) {
leveldb::Slice slKey = piter->key();
try {
- CDataStream ssKey(MakeUCharSpan(slKey), SER_DISK, CLIENT_VERSION);
+ CDataStream ssKey{MakeByteSpan(slKey), SER_DISK, CLIENT_VERSION};
ssKey >> key;
} catch (const std::exception&) {
return false;
@@ -158,7 +158,7 @@ public:
template<typename V> bool GetValue(V& value) {
leveldb::Slice slValue = piter->value();
try {
- CDataStream ssValue(MakeUCharSpan(slValue), SER_DISK, CLIENT_VERSION);
+ CDataStream ssValue{MakeByteSpan(slValue), SER_DISK, CLIENT_VERSION};
ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent));
ssValue >> value;
} catch (const std::exception&) {
@@ -244,7 +244,7 @@ public:
dbwrapper_private::HandleError(status);
}
try {
- CDataStream ssValue(MakeUCharSpan(strValue), SER_DISK, CLIENT_VERSION);
+ CDataStream ssValue{MakeByteSpan(strValue), SER_DISK, CLIENT_VERSION};
ssValue.Xor(obfuscate_key);
ssValue >> value;
} catch (const std::exception&) {
diff --git a/src/fs.cpp b/src/fs.cpp
index 34a0348578..8fcadcb3ef 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -7,7 +7,6 @@
#ifndef WIN32
#include <cstring>
#include <fcntl.h>
-#include <string>
#include <sys/file.h>
#include <sys/utsname.h>
#include <unistd.h>
@@ -20,6 +19,9 @@
#include <windows.h>
#endif
+#include <cassert>
+#include <string>
+
namespace fsbridge {
FILE *fopen(const fs::path& p, const char *mode)
diff --git a/src/fs.h b/src/fs.h
index 9f18794539..bc36636084 100644
--- a/src/fs.h
+++ b/src/fs.h
@@ -88,7 +88,7 @@ static inline auto quoted(const std::string& s)
// Allow safe path append operations.
static inline path operator+(path p1, path p2)
{
- p1 += std::move(p2);
+ p1 += static_cast<boost::filesystem::path&&>(p2);
return p1;
}
diff --git a/src/hash.h b/src/hash.h
index 1456a899d8..9f582842c1 100644
--- a/src/hash.h
+++ b/src/hash.h
@@ -111,8 +111,9 @@ public:
int GetType() const { return nType; }
int GetVersion() const { return nVersion; }
- void write(const char *pch, size_t size) {
- ctx.Write((const unsigned char*)pch, size);
+ void write(Span<const std::byte> src)
+ {
+ ctx.Write(UCharCast(src.data()), src.size());
}
/** Compute the double-SHA256 hash of all data written to this object.
@@ -162,18 +163,18 @@ private:
public:
explicit CHashVerifier(Source* source_) : CHashWriter(source_->GetType(), source_->GetVersion()), source(source_) {}
- void read(char* pch, size_t nSize)
+ void read(Span<std::byte> dst)
{
- source->read(pch, nSize);
- this->write(pch, nSize);
+ source->read(dst);
+ this->write(dst);
}
void ignore(size_t nSize)
{
- char data[1024];
+ std::byte data[1024];
while (nSize > 0) {
size_t now = std::min<size_t>(nSize, 1024);
- read(data, now);
+ read({data, now});
nSize -= now;
}
}
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index e9aeb58194..e1d807f39a 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -59,7 +59,9 @@ bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
// Exclude genesis block transaction because outputs are not spendable.
if (pindex->nHeight == 0) return true;
- CDiskTxPos pos(pindex->GetBlockPos(), GetSizeOfCompactSize(block.vtx.size()));
+ CDiskTxPos pos{
+ WITH_LOCK(::cs_main, return pindex->GetBlockPos()),
+ GetSizeOfCompactSize(block.vtx.size())};
std::vector<std::pair<uint256, CDiskTxPos>> vPos;
vPos.reserve(block.vtx.size());
for (const auto& tx : block.vtx) {
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 4f5105a5c1..ddfb4bda95 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -116,9 +116,6 @@ public:
//! or one of its ancestors.
virtual std::optional<int> findLocatorFork(const CBlockLocator& locator) = 0;
- //! Check if transaction will be final given chain height current time.
- virtual bool checkFinalTx(const CTransaction& tx) = 0;
-
//! Return whether node has the block and optionally return block metadata
//! or contents.
virtual bool findBlock(const uint256& hash, const FoundBlock& block={}) = 0;
diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h
index aa33a3c951..f26ac866dc 100644
--- a/src/interfaces/wallet.h
+++ b/src/interfaces/wallet.h
@@ -405,7 +405,6 @@ struct WalletTxStatus
int depth_in_main_chain;
unsigned int time_received;
uint32_t lock_time;
- bool is_final;
bool is_trusted;
bool is_abandoned;
bool is_coinbase;
diff --git a/src/net.cpp b/src/net.cpp
index 7b8a87f90c..be56d1e2d2 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -112,9 +112,9 @@ static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256
//
bool fDiscover = true;
bool fListen = true;
-RecursiveMutex cs_mapLocalHost;
-std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost);
-static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {};
+Mutex g_maplocalhost_mutex;
+std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex);
+static bool vfLimited[NET_MAX] GUARDED_BY(g_maplocalhost_mutex) = {};
std::string strSubVersion;
void CConnman::AddAddrFetch(const std::string& strDest)
@@ -137,7 +137,7 @@ bool GetLocal(CService& addr, const CNetAddr *paddrPeer)
int nBestScore = -1;
int nBestReachability = -1;
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
for (const auto& entry : mapLocalHost)
{
int nScore = entry.second.nScore;
@@ -193,7 +193,7 @@ CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
static int GetnScore(const CService& addr)
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
const auto it = mapLocalHost.find(addr);
return (it != mapLocalHost.end()) ? it->second.nScore : 0;
}
@@ -264,7 +264,7 @@ bool AddLocal(const CService& addr_, int nScore)
LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore);
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
const auto [it, is_newly_added] = mapLocalHost.emplace(addr, LocalServiceInfo());
LocalServiceInfo &info = it->second;
if (is_newly_added || nScore >= info.nScore) {
@@ -283,7 +283,7 @@ bool AddLocal(const CNetAddr &addr, int nScore)
void RemoveLocal(const CService& addr)
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
LogPrintf("RemoveLocal(%s)\n", addr.ToString());
mapLocalHost.erase(addr);
}
@@ -292,13 +292,13 @@ void SetReachable(enum Network net, bool reachable)
{
if (net == NET_UNROUTABLE || net == NET_INTERNAL)
return;
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
vfLimited[net] = !reachable;
}
bool IsReachable(enum Network net)
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
return !vfLimited[net];
}
@@ -310,7 +310,7 @@ bool IsReachable(const CNetAddr &addr)
/** vote for a local address */
bool SeenLocal(const CService& addr)
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
const auto it = mapLocalHost.find(addr);
if (it == mapLocalHost.end()) return false;
++it->second.nScore;
@@ -321,7 +321,7 @@ bool SeenLocal(const CService& addr)
/** check whether a given address is potentially local */
bool IsLocal(const CService& addr)
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
return mapLocalHost.count(addr) > 0;
}
@@ -553,12 +553,14 @@ std::string ConnectionTypeAsString(ConnectionType conn_type)
CService CNode::GetAddrLocal() const
{
- LOCK(cs_addrLocal);
+ AssertLockNotHeld(m_addr_local_mutex);
+ LOCK(m_addr_local_mutex);
return addrLocal;
}
void CNode::SetAddrLocal(const CService& addrLocalIn) {
- LOCK(cs_addrLocal);
+ AssertLockNotHeld(m_addr_local_mutex);
+ LOCK(m_addr_local_mutex);
if (addrLocal.IsValid()) {
error("Addr local already set for node: %i. Refusing to change from %s to %s", id, addrLocal.ToString(), addrLocalIn.ToString());
} else {
@@ -657,7 +659,7 @@ bool CNode::ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete)
// Store received bytes per message command
// to prevent a memory DOS, only allow valid commands
- auto i = mapRecvBytesPerMsgCmd.find(msg.m_command);
+ auto i = mapRecvBytesPerMsgCmd.find(msg.m_type);
if (i == mapRecvBytesPerMsgCmd.end()) {
i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER);
}
@@ -747,7 +749,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const std::chrono::microseconds
CNetMessage msg(std::move(vRecv));
// store command string, time, and sizes
- msg.m_command = hdr.GetCommand();
+ msg.m_type = hdr.GetCommand();
msg.m_time = time;
msg.m_message_size = hdr.nMessageSize;
msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE;
@@ -760,7 +762,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const std::chrono::microseconds
// Check checksum and header command string
if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) {
LogPrint(BCLog::NET, "Header error: Wrong checksum (%s, %u bytes), expected %s was %s, peer=%d\n",
- SanitizeString(msg.m_command), msg.m_message_size,
+ SanitizeString(msg.m_type), msg.m_message_size,
HexStr(Span{hash}.first(CMessageHeader::CHECKSUM_SIZE)),
HexStr(hdr.pchChecksum),
m_node_id);
@@ -1878,8 +1880,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
auto start = GetTime<std::chrono::microseconds>();
// Minimum time before next feeler connection (in microseconds).
- auto next_feeler = PoissonNextSend(start, FEELER_INTERVAL);
- auto next_extra_block_relay = PoissonNextSend(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
+ auto next_feeler = GetExponentialRand(start, FEELER_INTERVAL);
+ auto next_extra_block_relay = GetExponentialRand(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED);
bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS);
@@ -1999,7 +2001,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
//
// This is similar to the logic for trying extra outbound (full-relay)
// peers, except:
- // - we do this all the time on a poisson timer, rather than just when
+ // - we do this all the time on an exponential timer, rather than just when
// our tip is stale
// - we potentially disconnect our next-youngest block-relay-only peer, if our
// newest block-relay-only peer delivers a block more recently.
@@ -2008,10 +2010,10 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// Because we can promote these connections to block-relay-only
// connections, they do not get their own ConnectionType enum
// (similar to how we deal with extra outbound peers).
- next_extra_block_relay = PoissonNextSend(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
+ next_extra_block_relay = GetExponentialRand(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
conn_type = ConnectionType::BLOCK_RELAY;
} else if (now > next_feeler) {
- next_feeler = PoissonNextSend(now, FEELER_INTERVAL);
+ next_feeler = GetExponentialRand(now, FEELER_INTERVAL);
conn_type = ConnectionType::FEELER;
fFeeler = true;
} else {
@@ -3058,23 +3060,6 @@ bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func)
return found != nullptr && NodeFullyConnected(found) && func(found);
}
-std::chrono::microseconds CConnman::PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval)
-{
- if (m_next_send_inv_to_incoming.load() < now) {
- // If this function were called from multiple threads simultaneously
- // it would possible that both update the next send variable, and return a different result to their caller.
- // This is not possible in practice as only the net processing thread invokes this function.
- m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval);
- }
- return m_next_send_inv_to_incoming;
-}
-
-std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval)
-{
- double unscaled = -log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */);
- return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us);
-}
-
CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const
{
return CSipHasher(nSeed0, nSeed1).Write(id);
@@ -3106,11 +3091,11 @@ void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Spa
CAutoFile f(fsbridge::fopen(path, "ab"), SER_DISK, CLIENT_VERSION);
ser_writedata64(f, now.count());
- f.write(msg_type.data(), msg_type.length());
+ f.write(MakeByteSpan(msg_type));
for (auto i = msg_type.length(); i < CMessageHeader::COMMAND_SIZE; ++i) {
f << uint8_t{'\0'};
}
uint32_t size = data.size();
ser_writedata32(f, size);
- f.write((const char*)data.data(), data.size());
+ f.write(AsBytes(data));
}
diff --git a/src/net.h b/src/net.h
index c79abb91c3..4301733525 100644
--- a/src/net.h
+++ b/src/net.h
@@ -230,8 +230,8 @@ struct LocalServiceInfo {
uint16_t nPort;
};
-extern RecursiveMutex cs_mapLocalHost;
-extern std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost);
+extern Mutex g_maplocalhost_mutex;
+extern std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex);
extern const std::string NET_MESSAGE_COMMAND_OTHER;
typedef std::map<std::string, uint64_t> mapMsgCmdSize; //command, total bytes
@@ -278,7 +278,7 @@ public:
/** Transport protocol agnostic message container.
* Ideally it should only contain receive time, payload,
- * command and size.
+ * type and size.
*/
class CNetMessage {
public:
@@ -286,7 +286,7 @@ public:
std::chrono::microseconds m_time{0}; //!< time of message receipt
uint32_t m_message_size{0}; //!< size of the payload
uint32_t m_raw_message_size{0}; //!< used wire size of the message (including header/checksum)
- std::string m_command;
+ std::string m_type;
CNetMessage(CDataStream&& recv_in) : m_recv(std::move(recv_in)) {}
@@ -618,9 +618,9 @@ public:
return m_greatest_common_version;
}
- CService GetAddrLocal() const;
+ CService GetAddrLocal() const LOCKS_EXCLUDED(m_addr_local_mutex);
//! May not be called more than once
- void SetAddrLocal(const CService& addrLocalIn);
+ void SetAddrLocal(const CService& addrLocalIn) LOCKS_EXCLUDED(m_addr_local_mutex);
CNode* AddRef()
{
@@ -693,8 +693,8 @@ private:
std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread
// Our address, as reported by the peer
- CService addrLocal GUARDED_BY(cs_addrLocal);
- mutable RecursiveMutex cs_addrLocal;
+ CService addrLocal GUARDED_BY(m_addr_local_mutex);
+ mutable Mutex m_addr_local_mutex;
mapMsgCmdSize mapSendBytesPerMsgCmd GUARDED_BY(cs_vSend);
mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv);
@@ -936,12 +936,6 @@ public:
void WakeMessageHandler();
- /** Attempts to obfuscate tx time through exponentially distributed emitting.
- Works assuming that a single interval is used.
- Variable intervals will result in privacy decrease.
- */
- std::chrono::microseconds PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval);
-
/** Return true if we should disconnect the peer for failing an inactivity check. */
bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const;
@@ -1221,8 +1215,6 @@ private:
*/
std::atomic_bool m_start_extra_block_relay_peers{false};
- std::atomic<std::chrono::microseconds> m_next_send_inv_to_incoming{0us};
-
/**
* A vector of -bind=<address>:<port>=onion arguments each of which is
* an address and port that are designated for incoming Tor connections.
@@ -1270,9 +1262,6 @@ private:
friend struct ConnmanTestMsg;
};
-/** Return a timestamp in the future (in microseconds) for exponentially distributed events. */
-std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval);
-
/** Dump binary message to file, with timestamp */
void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 273cb4fccb..3cebca1a77 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -320,7 +320,7 @@ public:
/** Implement PeerManager */
void StartScheduledTasks(CScheduler& scheduler) override;
void CheckForStaleTipAndEvictPeers() override;
- bool FetchBlock(NodeId id, const uint256& hash, const CBlockIndex& index) override;
+ std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) override;
bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) const override;
bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; }
void SendPings() override;
@@ -450,6 +450,8 @@ private:
*/
std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
+ std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us};
+
/** Number of nodes with fSyncStarted. */
int nSyncStarted GUARDED_BY(cs_main) = 0;
@@ -524,6 +526,15 @@ private:
Mutex m_recent_confirmed_transactions_mutex;
CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001};
+ /**
+ * For sending `inv`s to inbound peers, we use a single (exponentially
+ * distributed) timer for all peers. If we used a separate timer for each
+ * peer, a spy node could make multiple inbound connections to us to
+ * accurately determine when we received the transaction (and potentially
+ * determine the transaction's origin). */
+ std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now,
+ std::chrono::seconds average_interval);
+
/** Have we requested this block from a peer */
bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -825,6 +836,18 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS
nPreferredDownload += state->fPreferredDownload;
}
+std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now,
+ std::chrono::seconds average_interval)
+{
+ if (m_next_inv_to_inbounds.load() < now) {
+ // If this function were called from multiple threads simultaneously
+ // it would possible that both update the next send variable, and return a different result to their caller.
+ // This is not possible in practice as only the net processing thread invokes this function.
+ m_next_inv_to_inbounds = GetExponentialRand(now, average_interval);
+ }
+ return m_next_inv_to_inbounds;
+}
+
bool PeerManagerImpl::IsBlockRequested(const uint256& hash)
{
return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end();
@@ -1437,39 +1460,39 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
(GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
}
-bool PeerManagerImpl::FetchBlock(NodeId id, const uint256& hash, const CBlockIndex& index)
+std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBlockIndex& block_index)
{
- if (fImporting || fReindex) return false;
+ if (fImporting) return "Importing...";
+ if (fReindex) return "Reindexing...";
LOCK(cs_main);
// Ensure this peer exists and hasn't been disconnected
- CNodeState* state = State(id);
- if (state == nullptr) return false;
+ CNodeState* state = State(peer_id);
+ if (state == nullptr) return "Peer does not exist";
// Ignore pre-segwit peers
- if (!state->fHaveWitness) return false;
+ if (!state->fHaveWitness) return "Pre-SegWit peer";
- // Mark block as in-flight unless it already is
- if (!BlockRequested(id, index)) return false;
+ // Mark block as in-flight unless it already is (for this peer).
+ // If a block was already in-flight for a different peer, its BLOCKTXN
+ // response will be dropped.
+ if (!BlockRequested(peer_id, block_index)) return "Already requested from this peer";
// Construct message to request the block
+ const uint256& hash{block_index.GetBlockHash()};
std::vector<CInv> invs{CInv(MSG_BLOCK | MSG_WITNESS_FLAG, hash)};
// Send block request message to the peer
- bool success = m_connman.ForNode(id, [this, &invs](CNode* node) {
+ bool success = m_connman.ForNode(peer_id, [this, &invs](CNode* node) {
const CNetMsgMaker msgMaker(node->GetCommonVersion());
this->m_connman.PushMessage(node, msgMaker.Make(NetMsgType::GETDATA, invs));
return true;
});
- if (success) {
- LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
- hash.ToString(), id);
- } else {
- RemoveBlockRequest(hash);
- LogPrint(BCLog::NET, "Failed to request block %s from peer=%d\n",
- hash.ToString(), id);
- }
- return success;
+ if (!success) return "Peer not fully connected";
+
+ LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n",
+ hash.ToString(), peer_id);
+ return std::nullopt;
}
std::unique_ptr<PeerManager> PeerManager::make(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman,
@@ -1857,7 +1880,7 @@ void PeerManagerImpl::ProcessGetBlockData(CNode& pfrom, Peer& peer, const CInv&
// Fast-path: in this case it is possible to serve the block directly from disk,
// as the network format matches the format on disk
std::vector<uint8_t> block_data;
- if (!ReadRawBlockFromDisk(block_data, pindex, m_chainparams.MessageStart())) {
+ if (!ReadRawBlockFromDisk(block_data, pindex->GetBlockPos(), m_chainparams.MessageStart())) {
assert(!"cannot load block from disk");
}
m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::BLOCK, Span{block_data}));
@@ -4154,32 +4177,28 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
pfrom->GetId(),
pfrom->m_addr_name.c_str(),
pfrom->ConnectionTypeAsString().c_str(),
- msg.m_command.c_str(),
+ msg.m_type.c_str(),
msg.m_recv.size(),
msg.m_recv.data()
);
if (gArgs.GetBoolArg("-capturemessages", false)) {
- CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
+ CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true);
}
msg.SetVersion(pfrom->GetCommonVersion());
- const std::string& msg_type = msg.m_command;
-
- // Message size
- unsigned int nMessageSize = msg.m_message_size;
try {
- ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc);
+ ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc);
if (interruptMsgProc) return false;
{
LOCK(peer->m_getdata_requests_mutex);
if (!peer->m_getdata_requests.empty()) fMoreWork = true;
}
} catch (const std::exception& e) {
- LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name());
+ LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name());
} catch (...) {
- LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize);
+ LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size);
}
return fMoreWork;
@@ -4434,13 +4453,13 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros
FastRandomContext insecure_rand;
PushAddress(peer, *local_addr, insecure_rand);
}
- peer.m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
+ peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
// We sent an `addr` message to this peer recently. Nothing more to do.
if (current_time <= peer.m_next_addr_send) return;
- peer.m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
+ peer.m_next_addr_send = GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL);
if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) {
// Should be impossible since we always check size before adding to
@@ -4512,7 +4531,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, std::chrono::microseconds c
m_connman.PushMessage(&pto, CNetMsgMaker(pto.GetCommonVersion()).Make(NetMsgType::FEEFILTER, filterToSend));
pto.m_tx_relay->lastSentFeeFilter = filterToSend;
}
- pto.m_tx_relay->m_next_send_feefilter = PoissonNextSend(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
+ pto.m_tx_relay->m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL);
}
// If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY
// until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY.
@@ -4792,9 +4811,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
if (pto->m_tx_relay->nNextInvSend < current_time) {
fSendTrickle = true;
if (pto->IsInboundConn()) {
- pto->m_tx_relay->nNextInvSend = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
+ pto->m_tx_relay->nNextInvSend = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL);
} else {
- pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
+ pto->m_tx_relay->nNextInvSend = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL);
}
}
diff --git a/src/net_processing.h b/src/net_processing.h
index 27775cea97..e30f9f516c 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -45,12 +45,11 @@ public:
/**
* Attempt to manually fetch block from a given peer. We must already have the header.
*
- * @param[in] id The peer id
- * @param[in] hash The block hash
- * @param[in] pindex The blockindex
- * @returns Whether a request was successfully made
+ * @param[in] peer_id The peer id
+ * @param[in] block_index The blockindex
+ * @returns std::nullopt if a request was successfully made, otherwise an error message
*/
- virtual bool FetchBlock(NodeId id, const uint256& hash, const CBlockIndex& pindex) = 0;
+ virtual std::optional<std::string> FetchBlock(NodeId peer_id, const CBlockIndex& block_index) = 0;
/** Begin running background tasks, should only be called once */
virtual void StartScheduledTasks(CScheduler& scheduler) = 0;
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index cbfdcb6f11..7691c9a5ce 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -429,6 +429,7 @@ CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
bool IsBlockPruned(const CBlockIndex* pblockindex)
{
+ AssertLockHeld(::cs_main);
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
}
@@ -513,7 +514,8 @@ static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
{
- FlatFilePos pos = pindex->GetUndoPos();
+ const FlatFilePos pos{WITH_LOCK(::cs_main, return pindex->GetUndoPos())};
+
if (pos.IsNull()) {
return error("%s: no undo data available", __func__);
}
@@ -712,6 +714,7 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa
bool BlockManager::WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
{
+ AssertLockHeld(::cs_main);
// Write undo information to disk
if (pindex->GetUndoPos().IsNull()) {
FlatFilePos _pos;
@@ -810,7 +813,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
}
block.resize(blk_size); // Zeroing of memory is intentional here
- filein.read((char*)block.data(), blk_size);
+ filein.read(MakeWritableByteSpan(block));
} catch (const std::exception& e) {
return error("%s: Read from block file failed: %s for %s", __func__, e.what(), pos.ToString());
}
@@ -818,17 +821,6 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
return true;
}
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start)
-{
- FlatFilePos block_pos;
- {
- LOCK(cs_main);
- block_pos = pindex->GetBlockPos();
- }
-
- return ReadRawBlockFromDisk(block, block_pos, message_start);
-}
-
/** Store block on disk. If dbp is non-nullptr, the file is known to already reside on disk */
FlatFilePos BlockManager::SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp)
{
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 78c9210892..42e46797d2 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -7,12 +7,15 @@
#include <fs.h>
#include <protocol.h> // For CMessageHeader::MessageStartChars
+#include <sync.h>
#include <txdb.h>
#include <atomic>
#include <cstdint>
#include <vector>
+extern RecursiveMutex cs_main;
+
class ArgsManager;
class BlockValidationState;
class CBlock;
@@ -146,7 +149,8 @@ public:
/** Get block file info entry for one block file */
CBlockFileInfo* GetBlockFileInfo(size_t n);
- bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams);
+ bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp);
@@ -163,7 +167,7 @@ public:
};
//! Check whether the block associated with this index entry is pruned or not.
-bool IsBlockPruned(const CBlockIndex* pblockindex);
+bool IsBlockPruned(const CBlockIndex* pblockindex) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
void CleanupBlockRevFiles();
@@ -181,7 +185,6 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune);
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams);
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams);
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, const CMessageHeader::MessageStartChars& message_start);
-bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start);
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex);
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 1a48957f0f..ffad289fa9 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -486,11 +486,6 @@ public:
const CChain& active = Assert(m_node.chainman)->ActiveChain();
return active.GetLocator();
}
- bool checkFinalTx(const CTransaction& tx) override
- {
- LOCK(cs_main);
- return CheckFinalTx(chainman().ActiveChain().Tip(), tx);
- }
std::optional<int> findLocatorFork(const CBlockLocator& locator) override
{
LOCK(cs_main);
diff --git a/src/node/ui_interface.h b/src/node/ui_interface.h
index 5c7c3e7074..d02238b549 100644
--- a/src/node/ui_interface.h
+++ b/src/node/ui_interface.h
@@ -25,8 +25,7 @@ class CClientUIInterface
{
public:
/** Flags for CClientUIInterface::ThreadSafeMessageBox */
- enum MessageBoxFlags
- {
+ enum MessageBoxFlags : uint32_t {
ICON_INFORMATION = 0,
ICON_WARNING = (1U << 0),
ICON_ERROR = (1U << 1),
diff --git a/src/policy/packages.h b/src/policy/packages.h
index d2744f1265..9f274f6b7d 100644
--- a/src/policy/packages.h
+++ b/src/policy/packages.h
@@ -25,6 +25,7 @@ enum class PackageValidationResult {
PCKG_RESULT_UNSET = 0, //!< Initial value. The package has not yet been rejected.
PCKG_POLICY, //!< The package itself is invalid (e.g. too many transactions).
PCKG_TX, //!< At least one tx is invalid.
+ PCKG_MEMPOOL_ERROR, //!< Mempool logic error.
};
/** A package is an ordered list of transactions. The transactions cannot conflict with (spend the
diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h
index 67ea4a3747..1fcbc45c72 100644
--- a/src/primitives/transaction.h
+++ b/src/primitives/transaction.h
@@ -70,25 +70,45 @@ public:
uint32_t nSequence;
CScriptWitness scriptWitness; //!< Only serialized through CTransaction
- /* Setting nSequence to this value for every input in a transaction
- * disables nLockTime. */
+ /**
+ * Setting nSequence to this value for every input in a transaction
+ * disables nLockTime/IsFinalTx().
+ * It fails OP_CHECKLOCKTIMEVERIFY/CheckLockTime() for any input that has
+ * it set (BIP 65).
+ * It has SEQUENCE_LOCKTIME_DISABLE_FLAG set (BIP 68/112).
+ */
static const uint32_t SEQUENCE_FINAL = 0xffffffff;
+ /**
+ * This is the maximum sequence number that enables both nLockTime and
+ * OP_CHECKLOCKTIMEVERIFY (BIP 65).
+ * It has SEQUENCE_LOCKTIME_DISABLE_FLAG set (BIP 68/112).
+ */
+ static const uint32_t MAX_SEQUENCE_NONFINAL{SEQUENCE_FINAL - 1};
- /* Below flags apply in the context of BIP 68*/
- /* If this flag set, CTxIn::nSequence is NOT interpreted as a
- * relative lock-time. */
+ // Below flags apply in the context of BIP 68. BIP 68 requires the tx
+ // version to be set to 2, or higher.
+ /**
+ * If this flag is set, CTxIn::nSequence is NOT interpreted as a
+ * relative lock-time.
+ * It skips SequenceLocks() for any input that has it set (BIP 68).
+ * It fails OP_CHECKSEQUENCEVERIFY/CheckSequence() for any input that has
+ * it set (BIP 112).
+ */
static const uint32_t SEQUENCE_LOCKTIME_DISABLE_FLAG = (1U << 31);
- /* If CTxIn::nSequence encodes a relative lock-time and this flag
+ /**
+ * If CTxIn::nSequence encodes a relative lock-time and this flag
* is set, the relative lock-time has units of 512 seconds,
* otherwise it specifies blocks with a granularity of 1. */
static const uint32_t SEQUENCE_LOCKTIME_TYPE_FLAG = (1 << 22);
- /* If CTxIn::nSequence encodes a relative lock-time, this mask is
+ /**
+ * If CTxIn::nSequence encodes a relative lock-time, this mask is
* applied to extract that lock-time from the sequence field. */
static const uint32_t SEQUENCE_LOCKTIME_MASK = 0x0000ffff;
- /* In order to use the same number of bits to encode roughly the
+ /**
+ * In order to use the same number of bits to encode roughly the
* same wall-clock duration, and because blocks are naturally
* limited to occur every 600s on average, the minimum granularity
* for time-based relative lock-time is fixed at 512 seconds.
diff --git a/src/psbt.cpp b/src/psbt.cpp
index 8248609ba6..c8c73e130b 100644
--- a/src/psbt.cpp
+++ b/src/psbt.cpp
@@ -399,7 +399,7 @@ bool DecodeBase64PSBT(PartiallySignedTransaction& psbt, const std::string& base6
bool DecodeRawPSBT(PartiallySignedTransaction& psbt, const std::string& tx_data, std::string& error)
{
- CDataStream ss_data(MakeUCharSpan(tx_data), SER_NETWORK, PROTOCOL_VERSION);
+ CDataStream ss_data(MakeByteSpan(tx_data), SER_NETWORK, PROTOCOL_VERSION);
try {
ss_data >> psbt;
if (!ss_data.empty()) {
diff --git a/src/pubkey.h b/src/pubkey.h
index 349081205b..dfe06f834c 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -142,14 +142,14 @@ public:
{
unsigned int len = size();
::WriteCompactSize(s, len);
- s.write((char*)vch, len);
+ s.write(AsBytes(Span{vch, len}));
}
template <typename Stream>
void Unserialize(Stream& s)
{
const unsigned int len(::ReadCompactSize(s));
if (len <= SIZE) {
- s.read((char*)vch, len);
+ s.read(AsWritableBytes(Span{vch, len}));
if (len != size()) {
Invalidate();
}
diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui
index 15e0d3fad9..2196801023 100644
--- a/src/qt/forms/debugwindow.ui
+++ b/src/qt/forms/debugwindow.ui
@@ -1355,10 +1355,10 @@
<item row="13" column="0">
<widget class="QLabel" name="peerLastTxLabel">
<property name="toolTip">
- <string>Elapsed time since a novel transaction accepted into our mempool was received from this peer.</string>
+ <string extracomment="Tooltip text for the Last Transaction field in the peer details area.">Elapsed time since a novel transaction accepted into our mempool was received from this peer.</string>
</property>
<property name="text">
- <string>Last Tx</string>
+ <string>Last Transaction</string>
</property>
</widget>
</item>
@@ -1592,6 +1592,84 @@
</widget>
</item>
<item row="23" column="0">
+ <widget class="QLabel" name="peerAddrRelayEnabledLabel">
+ <property name="toolTip">
+ <string extracomment="Tooltip text for the Address Relay field in the peer details area.">Whether we relay addresses to this peer.</string>
+ </property>
+ <property name="text">
+ <string>Address Relay</string>
+ </property>
+ </widget>
+ </item>
+ <item row="23" column="1">
+ <widget class="QLabel" name="peerAddrRelayEnabled">
+ <property name="cursor">
+ <cursorShape>IBeamCursor</cursorShape>
+ </property>
+ <property name="text">
+ <string>N/A</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ <property name="textInteractionFlags">
+ <set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
+ </property>
+ </widget>
+ </item>
+ <item row="24" column="0">
+ <widget class="QLabel" name="peerAddrProcessedLabel">
+ <property name="toolTip">
+ <string extracomment="Tooltip text for the Addresses Processed field in the peer details area.">Total number of addresses processed, excluding those dropped due to rate-limiting.</string>
+ </property>
+ <property name="text">
+ <string>Addresses Processed</string>
+ </property>
+ </widget>
+ </item>
+ <item row="24" column="1">
+ <widget class="QLabel" name="peerAddrProcessed">
+ <property name="cursor">
+ <cursorShape>IBeamCursor</cursorShape>
+ </property>
+ <property name="text">
+ <string>N/A</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ <property name="textInteractionFlags">
+ <set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
+ </property>
+ </widget>
+ </item>
+ <item row="25" column="0">
+ <widget class="QLabel" name="peerAddrRateLimitedLabel">
+ <property name="toolTip">
+ <string extracomment="Tooltip text for the Addresses Rate-Limited field in the peer details area.">Total number of addresses dropped due to rate-limiting.</string>
+ </property>
+ <property name="text">
+ <string>Addresses Rate-Limited</string>
+ </property>
+ </widget>
+ </item>
+ <item row="25" column="1">
+ <widget class="QLabel" name="peerAddrRateLimited">
+ <property name="cursor">
+ <cursorShape>IBeamCursor</cursorShape>
+ </property>
+ <property name="text">
+ <string>N/A</string>
+ </property>
+ <property name="textFormat">
+ <enum>Qt::PlainText</enum>
+ </property>
+ <property name="textInteractionFlags">
+ <set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set>
+ </property>
+ </widget>
+ </item>
+ <item row="26" column="0">
<spacer name="verticalSpacer_3">
<property name="orientation">
<enum>Qt::Vertical</enum>
diff --git a/src/qt/guiconstants.h b/src/qt/guiconstants.h
index 1adcd5b6b9..fcdf6056c9 100644
--- a/src/qt/guiconstants.h
+++ b/src/qt/guiconstants.h
@@ -33,8 +33,6 @@ static const bool DEFAULT_SPLASHSCREEN = true;
#define COLOR_NEGATIVE QColor(255, 0, 0)
/* Transaction list -- bare address (without label) */
#define COLOR_BAREADDRESS QColor(140, 140, 140)
-/* Transaction list -- TX status decoration - open until date */
-#define COLOR_TX_STATUS_OPENUNTILDATE QColor(64, 64, 255)
/* Transaction list -- TX status decoration - danger, tx needs attention */
#define COLOR_TX_STATUS_DANGER QColor(200, 100, 100)
/* Transaction list -- TX status decoration - default color */
diff --git a/src/qt/psbtoperationsdialog.cpp b/src/qt/psbtoperationsdialog.cpp
index 0962dfe9db..d328290cbc 100644
--- a/src/qt/psbtoperationsdialog.cpp
+++ b/src/qt/psbtoperationsdialog.cpp
@@ -158,7 +158,7 @@ void PSBTOperationsDialog::saveTransaction() {
if (filename.isEmpty()) {
return;
}
- std::ofstream out(filename.toLocal8Bit().data(), std::ofstream::out | std::ofstream::binary);
+ fsbridge::ofstream out{filename.toLocal8Bit().data(), fsbridge::ofstream::out | fsbridge::ofstream::binary};
out << ssTx.str();
out.close();
showStatus(tr("PSBT saved to disk."), StatusLevel::INFO);
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 1cadfaeeb9..08729a7722 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -1215,6 +1215,9 @@ void RPCConsole::updateDetailWidget()
}
ui->peerHeight->setText(QString::number(stats->nodeStateStats.m_starting_height));
ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStateStats.m_ping_wait));
+ ui->peerAddrRelayEnabled->setText(stats->nodeStateStats.m_addr_relay_enabled ? ts.yes : ts.no);
+ ui->peerAddrProcessed->setText(QString::number(stats->nodeStateStats.m_addr_processed));
+ ui->peerAddrRateLimited->setText(QString::number(stats->nodeStateStats.m_addr_rate_limited));
}
ui->peersTabRightPanel->show();
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 50436a46d8..1206f610cd 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -509,7 +509,7 @@ void SendCoinsDialog::sendButtonClicked([[maybe_unused]] bool checked)
if (filename.isEmpty()) {
return;
}
- std::ofstream out(filename.toLocal8Bit().data(), std::ofstream::out | std::ofstream::binary);
+ fsbridge::ofstream out{filename.toLocal8Bit().data(), fsbridge::ofstream::out | fsbridge::ofstream::binary};
out << ssTx.str();
out.close();
Q_EMIT message(tr("PSBT saved"), "PSBT saved to disk", CClientUIInterface::MSG_INFORMATION);
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index 11aa61c7fc..10b7e2ffe7 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -22,6 +22,7 @@
#include <QApplication>
#include <QObject>
#include <QTest>
+#include <functional>
#if defined(QT_STATICPLUGIN)
#include <QtPlugin>
@@ -43,6 +44,8 @@ using node::NodeContext;
const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
+const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS{};
+
// This is all you need to run all the tests
int main(int argc, char* argv[])
{
diff --git a/src/qt/transactiondesc.cpp b/src/qt/transactiondesc.cpp
index 0504639cde..be5851d627 100644
--- a/src/qt/transactiondesc.cpp
+++ b/src/qt/transactiondesc.cpp
@@ -18,7 +18,6 @@
#include <interfaces/wallet.h>
#include <key_io.h>
#include <policy/policy.h>
-#include <script/script.h>
#include <util/system.h>
#include <validation.h>
#include <wallet/ismine.h>
@@ -35,14 +34,6 @@ using wallet::isminetype;
QString TransactionDesc::FormatTxStatus(const interfaces::WalletTx& wtx, const interfaces::WalletTxStatus& status, bool inMempool, int numBlocks)
{
- if (!status.is_final)
- {
- if (wtx.tx->nLockTime < LOCKTIME_THRESHOLD)
- return tr("Open for %n more block(s)", "", wtx.tx->nLockTime - numBlocks);
- else
- return tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx.tx->nLockTime));
- }
- else
{
int nDepth = status.depth_in_main_chain;
if (nDepth < 0) {
diff --git a/src/qt/transactionrecord.cpp b/src/qt/transactionrecord.cpp
index 5386569973..26144ba197 100644
--- a/src/qt/transactionrecord.cpp
+++ b/src/qt/transactionrecord.cpp
@@ -179,21 +179,8 @@ void TransactionRecord::updateStatus(const interfaces::WalletTxStatus& wtx, cons
status.depth = wtx.depth_in_main_chain;
status.m_cur_block_hash = block_hash;
- const bool up_to_date = ((int64_t)QDateTime::currentMSecsSinceEpoch() / 1000 - block_time < MAX_BLOCK_TIME_GAP);
- if (up_to_date && !wtx.is_final) {
- if (wtx.lock_time < LOCKTIME_THRESHOLD) {
- status.status = TransactionStatus::OpenUntilBlock;
- status.open_for = wtx.lock_time - numBlocks;
- }
- else
- {
- status.status = TransactionStatus::OpenUntilDate;
- status.open_for = wtx.lock_time;
- }
- }
// For generated transactions, determine maturity
- else if(type == TransactionRecord::Generated)
- {
+ if (type == TransactionRecord::Generated) {
if (wtx.blocks_to_maturity > 0)
{
status.status = TransactionStatus::Immature;
diff --git a/src/qt/transactionrecord.h b/src/qt/transactionrecord.h
index 1c139efabc..dd34656d5f 100644
--- a/src/qt/transactionrecord.h
+++ b/src/qt/transactionrecord.h
@@ -30,8 +30,6 @@ public:
enum Status {
Confirmed, /**< Have 6 or more confirmations (normal tx) or fully mature (mined tx) **/
/// Normal (sent/received) transactions
- OpenUntilDate, /**< Transaction not yet final, waiting for date */
- OpenUntilBlock, /**< Transaction not yet final, waiting for block */
Unconfirmed, /**< Not yet mined into a block **/
Confirming, /**< Confirmed, but waiting for the recommended number of confirmations **/
Conflicted, /**< Conflicts with other transaction or mempool **/
diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp
index b42c3f8c24..44b4fee2e7 100644
--- a/src/qt/transactiontablemodel.cpp
+++ b/src/qt/transactiontablemodel.cpp
@@ -316,12 +316,6 @@ QString TransactionTableModel::formatTxStatus(const TransactionRecord *wtx) cons
switch(wtx->status.status)
{
- case TransactionStatus::OpenUntilBlock:
- status = tr("Open for %n more block(s)","",wtx->status.open_for);
- break;
- case TransactionStatus::OpenUntilDate:
- status = tr("Open until %1").arg(GUIUtil::dateTimeStr(wtx->status.open_for));
- break;
case TransactionStatus::Unconfirmed:
status = tr("Unconfirmed");
break;
@@ -475,9 +469,6 @@ QVariant TransactionTableModel::txStatusDecoration(const TransactionRecord *wtx)
{
switch(wtx->status.status)
{
- case TransactionStatus::OpenUntilBlock:
- case TransactionStatus::OpenUntilDate:
- return COLOR_TX_STATUS_OPENUNTILDATE;
case TransactionStatus::Unconfirmed:
return QIcon(":/icons/transaction_0");
case TransactionStatus::Abandoned:
diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp
index 98f5ebce99..fba83dd510 100644
--- a/src/qt/walletframe.cpp
+++ b/src/qt/walletframe.cpp
@@ -210,7 +210,7 @@ void WalletFrame::gotoLoadPSBT(bool from_clipboard)
Q_EMIT message(tr("Error"), tr("PSBT file must be smaller than 100 MiB"), CClientUIInterface::MSG_ERROR);
return;
}
- std::ifstream in(filename.toLocal8Bit().data(), std::ios::binary);
+ fsbridge::ifstream in{filename.toLocal8Bit().data(), std::ios::binary};
data = std::string(std::istreambuf_iterator<char>{in}, {});
}
diff --git a/src/random.cpp b/src/random.cpp
index 6eb06c5d47..5dae80fe31 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -19,6 +19,7 @@
#include <sync.h> // for Mutex
#include <util/time.h> // for GetTimeMicros()
+#include <cmath>
#include <stdlib.h>
#include <thread>
@@ -714,3 +715,9 @@ void RandomInit()
ReportHardwareRand();
}
+
+std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval)
+{
+ double unscaled = -std::log1p(GetRand(uint64_t{1} << 48) * -0.0000000000000035527136788 /* -1/2^48 */);
+ return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us);
+}
diff --git a/src/random.h b/src/random.h
index 0c6dc24983..97302d61ab 100644
--- a/src/random.h
+++ b/src/random.h
@@ -10,7 +10,7 @@
#include <crypto/common.h>
#include <uint256.h>
-#include <chrono> // For std::chrono::microseconds
+#include <chrono>
#include <cstdint>
#include <limits>
@@ -82,6 +82,18 @@ D GetRandomDuration(typename std::common_type<D>::type max) noexcept
};
constexpr auto GetRandMicros = GetRandomDuration<std::chrono::microseconds>;
constexpr auto GetRandMillis = GetRandomDuration<std::chrono::milliseconds>;
+
+/**
+ * Return a timestamp in the future sampled from an exponential distribution
+ * (https://en.wikipedia.org/wiki/Exponential_distribution). This distribution
+ * is memoryless and should be used for repeated network events (e.g. sending a
+ * certain type of message) to minimize leaking information to observers.
+ *
+ * The probability of an event occurring before time x is 1 - e^-(x/a) where a
+ * is the average interval between events.
+ * */
+std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval);
+
int GetRandInt(int nMax) noexcept;
uint256 GetRandHash() noexcept;
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index ccc859619d..7cbe7e6159 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -185,7 +185,7 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIn
case TxVerbosity::SHOW_DETAILS:
case TxVerbosity::SHOW_DETAILS_AND_PREVOUT:
CBlockUndo blockUndo;
- const bool have_undo = !IsBlockPruned(blockindex) && UndoReadFromDisk(blockUndo, blockindex);
+ const bool have_undo{WITH_LOCK(::cs_main, return !IsBlockPruned(blockindex) && UndoReadFromDisk(blockUndo, blockindex))};
for (size_t i = 0; i < block.vtx.size(); ++i) {
const CTransactionRef& tx = block.vtx.at(i);
@@ -790,17 +790,15 @@ static RPCHelpMan getblockfrompeer()
{
return RPCHelpMan{
"getblockfrompeer",
- "\nAttempt to fetch block from a given peer.\n"
- "\nWe must have the header for this block, e.g. using submitheader.\n"
- "\nReturns {} if a block-request was successfully scheduled\n",
+ "Attempt to fetch block from a given peer.\n\n"
+ "We must have the header for this block, e.g. using submitheader.\n"
+ "Subsequent calls for the same block and a new peer will cause the response from the previous peer to be ignored.\n\n"
+ "Returns an empty JSON object if the request was successfully scheduled.",
{
- {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash"},
- {"nodeid", RPCArg::Type::NUM, RPCArg::Optional::NO, "The node ID (see getpeerinfo for node IDs)"},
+ {"block_hash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash to try to fetch"},
+ {"peer_id", RPCArg::Type::NUM, RPCArg::Optional::NO, "The peer to fetch it from (see getpeerinfo for peer IDs)"},
},
- RPCResult{RPCResult::Type::OBJ, "", "",
- {
- {RPCResult::Type::STR, "warnings", /*optional=*/true, "any warnings"},
- }},
+ RPCResult{RPCResult::Type::OBJ, "", /*optional=*/false, "", {}},
RPCExamples{
HelpExampleCli("getblockfrompeer", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\" 0")
+ HelpExampleRpc("getblockfrompeer", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\" 0")
@@ -810,31 +808,25 @@ static RPCHelpMan getblockfrompeer()
const NodeContext& node = EnsureAnyNodeContext(request.context);
ChainstateManager& chainman = EnsureChainman(node);
PeerManager& peerman = EnsurePeerman(node);
- CConnman& connman = EnsureConnman(node);
- uint256 hash(ParseHashV(request.params[0], "hash"));
+ const uint256& block_hash{ParseHashV(request.params[0], "block_hash")};
+ const NodeId peer_id{request.params[1].get_int64()};
- const NodeId nodeid = static_cast<NodeId>(request.params[1].get_int64());
-
- // Check that the peer with nodeid exists
- if (!connman.ForNode(nodeid, [](CNode* node) {return true;})) {
- throw JSONRPCError(RPC_MISC_ERROR, strprintf("Peer nodeid %d does not exist", nodeid));
- }
-
- const CBlockIndex* const index = WITH_LOCK(cs_main, return chainman.m_blockman.LookupBlockIndex(hash););
+ const CBlockIndex* const index = WITH_LOCK(cs_main, return chainman.m_blockman.LookupBlockIndex(block_hash););
if (!index) {
throw JSONRPCError(RPC_MISC_ERROR, "Block header missing");
}
- UniValue result = UniValue::VOBJ;
+ const bool block_has_data = WITH_LOCK(::cs_main, return index->nStatus & BLOCK_HAVE_DATA);
+ if (block_has_data) {
+ throw JSONRPCError(RPC_MISC_ERROR, "Block already downloaded");
+ }
- if (index->nStatus & BLOCK_HAVE_DATA) {
- result.pushKV("warnings", "Block already downloaded");
- } else if (!peerman.FetchBlock(nodeid, hash, *index)) {
- throw JSONRPCError(RPC_MISC_ERROR, "Failed to fetch block from peer");
+ if (const auto err{peerman.FetchBlock(peer_id, *index)}) {
+ throw JSONRPCError(RPC_MISC_ERROR, err.value());
}
- return result;
+ return UniValue::VOBJ;
},
};
}
@@ -938,8 +930,9 @@ static RPCHelpMan getblockheader()
};
}
-static CBlock GetBlockChecked(const CBlockIndex* pblockindex)
+static CBlock GetBlockChecked(const CBlockIndex* pblockindex) EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
+ AssertLockHeld(::cs_main);
CBlock block;
if (IsBlockPruned(pblockindex)) {
throw JSONRPCError(RPC_MISC_ERROR, "Block not available (pruned data)");
@@ -955,8 +948,9 @@ static CBlock GetBlockChecked(const CBlockIndex* pblockindex)
return block;
}
-static CBlockUndo GetUndoChecked(const CBlockIndex* pblockindex)
+static CBlockUndo GetUndoChecked(const CBlockIndex* pblockindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
+ AssertLockHeld(::cs_main);
CBlockUndo blockUndo;
if (IsBlockPruned(pblockindex)) {
throw JSONRPCError(RPC_MISC_ERROR, "Undo data not available (pruned data)");
@@ -1340,6 +1334,7 @@ static RPCHelpMan gettxout()
{RPCResult::Type::STR_AMOUNT, "value", "The transaction value in " + CURRENCY_UNIT},
{RPCResult::Type::OBJ, "scriptPubKey", "", {
{RPCResult::Type::STR, "asm", ""},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the output"},
{RPCResult::Type::STR_HEX, "hex", ""},
{RPCResult::Type::STR, "type", "The type, eg pubkeyhash"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
@@ -1443,7 +1438,7 @@ static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue&
UniValue rv(UniValue::VOBJ);
rv.pushKV("type", "buried");
- // getblockchaininfo reports the softfork as active from when the chain height is
+ // getdeploymentinfo reports the softfork as active from when the chain height is
// one below the activation height
rv.pushKV("active", DeploymentActiveAfter(active_chain_tip, params, dep));
rv.pushKV("height", params.DeploymentHeight(dep));
@@ -1455,51 +1450,82 @@ static void SoftForkDescPushBack(const CBlockIndex* active_chain_tip, UniValue&
// For BIP9 deployments.
if (!DeploymentEnabled(consensusParams, id)) return;
+ if (active_chain_tip == nullptr) return;
+
+ auto get_state_name = [](const ThresholdState state) -> std::string {
+ switch (state) {
+ case ThresholdState::DEFINED: return "defined";
+ case ThresholdState::STARTED: return "started";
+ case ThresholdState::LOCKED_IN: return "locked_in";
+ case ThresholdState::ACTIVE: return "active";
+ case ThresholdState::FAILED: return "failed";
+ }
+ return "invalid";
+ };
UniValue bip9(UniValue::VOBJ);
- const ThresholdState thresholdState = g_versionbitscache.State(active_chain_tip, consensusParams, id);
- switch (thresholdState) {
- case ThresholdState::DEFINED: bip9.pushKV("status", "defined"); break;
- case ThresholdState::STARTED: bip9.pushKV("status", "started"); break;
- case ThresholdState::LOCKED_IN: bip9.pushKV("status", "locked_in"); break;
- case ThresholdState::ACTIVE: bip9.pushKV("status", "active"); break;
- case ThresholdState::FAILED: bip9.pushKV("status", "failed"); break;
- }
- const bool has_signal = (ThresholdState::STARTED == thresholdState || ThresholdState::LOCKED_IN == thresholdState);
+
+ const ThresholdState next_state = g_versionbitscache.State(active_chain_tip, consensusParams, id);
+ const ThresholdState current_state = g_versionbitscache.State(active_chain_tip->pprev, consensusParams, id);
+
+ const bool has_signal = (ThresholdState::STARTED == current_state || ThresholdState::LOCKED_IN == current_state);
+
+ // BIP9 parameters
if (has_signal) {
bip9.pushKV("bit", consensusParams.vDeployments[id].bit);
}
bip9.pushKV("start_time", consensusParams.vDeployments[id].nStartTime);
bip9.pushKV("timeout", consensusParams.vDeployments[id].nTimeout);
- int64_t since_height = g_versionbitscache.StateSinceHeight(active_chain_tip, consensusParams, id);
- bip9.pushKV("since", since_height);
+ bip9.pushKV("min_activation_height", consensusParams.vDeployments[id].min_activation_height);
+
+ // BIP9 status
+ bip9.pushKV("status", get_state_name(current_state));
+ bip9.pushKV("since", g_versionbitscache.StateSinceHeight(active_chain_tip->pprev, consensusParams, id));
+ bip9.pushKV("status-next", get_state_name(next_state));
+
+ // BIP9 signalling status, if applicable
if (has_signal) {
UniValue statsUV(UniValue::VOBJ);
- BIP9Stats statsStruct = g_versionbitscache.Statistics(active_chain_tip, consensusParams, id);
+ std::vector<bool> signals;
+ BIP9Stats statsStruct = g_versionbitscache.Statistics(active_chain_tip, consensusParams, id, &signals);
statsUV.pushKV("period", statsStruct.period);
statsUV.pushKV("elapsed", statsStruct.elapsed);
statsUV.pushKV("count", statsStruct.count);
- if (ThresholdState::LOCKED_IN != thresholdState) {
+ if (ThresholdState::LOCKED_IN != current_state) {
statsUV.pushKV("threshold", statsStruct.threshold);
statsUV.pushKV("possible", statsStruct.possible);
}
bip9.pushKV("statistics", statsUV);
+
+ std::string sig;
+ sig.reserve(signals.size());
+ for (const bool s : signals) {
+ sig.push_back(s ? '#' : '-');
+ }
+ bip9.pushKV("signalling", sig);
}
- bip9.pushKV("min_activation_height", consensusParams.vDeployments[id].min_activation_height);
UniValue rv(UniValue::VOBJ);
rv.pushKV("type", "bip9");
- rv.pushKV("bip9", bip9);
- if (ThresholdState::ACTIVE == thresholdState) {
- rv.pushKV("height", since_height);
+ if (ThresholdState::ACTIVE == next_state) {
+ rv.pushKV("height", g_versionbitscache.StateSinceHeight(active_chain_tip, consensusParams, id));
}
- rv.pushKV("active", ThresholdState::ACTIVE == thresholdState);
+ rv.pushKV("active", ThresholdState::ACTIVE == next_state);
+ rv.pushKV("bip9", bip9);
softforks.pushKV(DeploymentName(id), rv);
}
+namespace {
+/* TODO: when -dprecatedrpc=softforks is removed, drop these */
+UniValue DeploymentInfo(const CBlockIndex* tip, const Consensus::Params& consensusParams);
+extern const std::vector<RPCResult> RPCHelpForDeployment;
+}
+
+// used by rest.cpp:rest_chaininfo, so cannot be static
RPCHelpMan getblockchaininfo()
{
+ /* TODO: from v24, remove -deprecatedrpc=softforks */
return RPCHelpMan{"getblockchaininfo",
"Returns an object containing various state info regarding blockchain processing.\n",
{},
@@ -1521,31 +1547,11 @@ RPCHelpMan getblockchaininfo()
{RPCResult::Type::NUM, "pruneheight", /*optional=*/true, "lowest-height complete block stored (only present if pruning is enabled)"},
{RPCResult::Type::BOOL, "automatic_pruning", /*optional=*/true, "whether automatic pruning is enabled (only present if pruning is enabled)"},
{RPCResult::Type::NUM, "prune_target_size", /*optional=*/true, "the target size used by pruning (only present if automatic pruning is enabled)"},
- {RPCResult::Type::OBJ_DYN, "softforks", "status of softforks",
+ {RPCResult::Type::OBJ_DYN, "softforks", "(DEPRECATED, returned only if config option -deprecatedrpc=softforks is passed) status of softforks",
{
{RPCResult::Type::OBJ, "xxxx", "name of the softfork",
- {
- {RPCResult::Type::STR, "type", "one of \"buried\", \"bip9\""},
- {RPCResult::Type::OBJ, "bip9", /*optional=*/true, "status of bip9 softforks (only for \"bip9\" type)",
- {
- {RPCResult::Type::STR, "status", "one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\""},
- {RPCResult::Type::NUM, "bit", /*optional=*/true, "the bit (0-28) in the block version field used to signal this softfork (only for \"started\" and \"locked_in\" status)"},
- {RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"},
- {RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"},
- {RPCResult::Type::NUM, "since", "height of the first block to which the status applies"},
- {RPCResult::Type::NUM, "min_activation_height", "minimum height of blocks for which the rules may be enforced"},
- {RPCResult::Type::OBJ, "statistics", /*optional=*/true, "numeric statistics about signalling for a softfork (only for \"started\" and \"locked_in\" status)",
- {
- {RPCResult::Type::NUM, "period", "the length in blocks of the signalling period"},
- {RPCResult::Type::NUM, "threshold", /*optional=*/true, "the number of blocks with the version bit set required to activate the feature (only for \"started\" status)"},
- {RPCResult::Type::NUM, "elapsed", "the number of blocks elapsed since the beginning of the current period"},
- {RPCResult::Type::NUM, "count", "the number of blocks with the version bit set in the current period"},
- {RPCResult::Type::BOOL, "possible", /*optional=*/true, "returns false if there are not enough blocks left in this period to pass activation threshold (only for \"started\" status)"},
- }},
- }},
- {RPCResult::Type::NUM, "height", /*optional=*/true, "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"},
- {RPCResult::Type::BOOL, "active", "true if the rules are enforced for the mempool and the next block"},
- }},
+ RPCHelpForDeployment
+ },
}},
{RPCResult::Type::STR, "warnings", "any network and blockchain warnings"},
}},
@@ -1593,7 +1599,45 @@ RPCHelpMan getblockchaininfo()
}
}
- const Consensus::Params& consensusParams = Params().GetConsensus();
+ if (IsDeprecatedRPCEnabled("softforks")) {
+ const Consensus::Params& consensusParams = Params().GetConsensus();
+ obj.pushKV("softforks", DeploymentInfo(tip, consensusParams));
+ }
+
+ obj.pushKV("warnings", GetWarnings(false).original);
+ return obj;
+},
+ };
+}
+
+namespace {
+const std::vector<RPCResult> RPCHelpForDeployment{
+ {RPCResult::Type::STR, "type", "one of \"buried\", \"bip9\""},
+ {RPCResult::Type::NUM, "height", /*optional=*/true, "height of the first block which the rules are or will be enforced (only for \"buried\" type, or \"bip9\" type with \"active\" status)"},
+ {RPCResult::Type::BOOL, "active", "true if the rules are enforced for the mempool and the next block"},
+ {RPCResult::Type::OBJ, "bip9", /*optional=*/true, "status of bip9 softforks (only for \"bip9\" type)",
+ {
+ {RPCResult::Type::NUM, "bit", /*optional=*/true, "the bit (0-28) in the block version field used to signal this softfork (only for \"started\" and \"locked_in\" status)"},
+ {RPCResult::Type::NUM_TIME, "start_time", "the minimum median time past of a block at which the bit gains its meaning"},
+ {RPCResult::Type::NUM_TIME, "timeout", "the median time past of a block at which the deployment is considered failed if not yet locked in"},
+ {RPCResult::Type::NUM, "min_activation_height", "minimum height of blocks for which the rules may be enforced"},
+ {RPCResult::Type::STR, "status", "bip9 status of specified block (one of \"defined\", \"started\", \"locked_in\", \"active\", \"failed\")"},
+ {RPCResult::Type::NUM, "since", "height of the first block to which the status applies"},
+ {RPCResult::Type::STR, "status-next", "bip9 status of next block"},
+ {RPCResult::Type::OBJ, "statistics", /*optional=*/true, "numeric statistics about signalling for a softfork (only for \"started\" and \"locked_in\" status)",
+ {
+ {RPCResult::Type::NUM, "period", "the length in blocks of the signalling period"},
+ {RPCResult::Type::NUM, "threshold", /*optional=*/true, "the number of blocks with the version bit set required to activate the feature (only for \"started\" status)"},
+ {RPCResult::Type::NUM, "elapsed", "the number of blocks elapsed since the beginning of the current period"},
+ {RPCResult::Type::NUM, "count", "the number of blocks with the version bit set in the current period"},
+ {RPCResult::Type::BOOL, "possible", /*optional=*/true, "returns false if there are not enough blocks left in this period to pass activation threshold (only for \"started\" status)"},
+ }},
+ {RPCResult::Type::STR, "signalling", "indicates blocks that signalled with a # and blocks that did not with a -"},
+ }},
+};
+
+UniValue DeploymentInfo(const CBlockIndex* tip, const Consensus::Params& consensusParams)
+{
UniValue softforks(UniValue::VOBJ);
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_HEIGHTINCB);
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_DERSIG);
@@ -1602,11 +1646,53 @@ RPCHelpMan getblockchaininfo()
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_SEGWIT);
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_TESTDUMMY);
SoftForkDescPushBack(tip, softforks, consensusParams, Consensus::DEPLOYMENT_TAPROOT);
- obj.pushKV("softforks", softforks);
+ return softforks;
+}
+} // anon namespace
- obj.pushKV("warnings", GetWarnings(false).original);
- return obj;
-},
+static RPCHelpMan getdeploymentinfo()
+{
+ return RPCHelpMan{"getdeploymentinfo",
+ "Returns an object containing various state info regarding soft-forks.",
+ {
+ {"blockhash", RPCArg::Type::STR_HEX, RPCArg::Default{"chain tip"}, "The block hash at which to query fork state"},
+ },
+ RPCResult{
+ RPCResult::Type::OBJ, "", "", {
+ {RPCResult::Type::STR, "hash", "requested block hash (or tip)"},
+ {RPCResult::Type::NUM, "height", "requested block height (or tip)"},
+ {RPCResult::Type::OBJ, "deployments", "", {
+ {RPCResult::Type::OBJ, "xxxx", "name of the deployment", RPCHelpForDeployment}
+ }},
+ }
+ },
+ RPCExamples{ HelpExampleCli("getdeploymentinfo", "") + HelpExampleRpc("getdeploymentinfo", "") },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+ {
+ ChainstateManager& chainman = EnsureAnyChainman(request.context);
+ LOCK(cs_main);
+ CChainState& active_chainstate = chainman.ActiveChainstate();
+
+ const CBlockIndex* tip;
+ if (request.params[0].isNull()) {
+ tip = active_chainstate.m_chain.Tip();
+ CHECK_NONFATAL(tip);
+ } else {
+ uint256 hash(ParseHashV(request.params[0], "blockhash"));
+ tip = chainman.m_blockman.LookupBlockIndex(hash);
+ if (!tip) {
+ throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found");
+ }
+ }
+
+ const Consensus::Params& consensusParams = Params().GetConsensus();
+
+ UniValue deploymentinfo(UniValue::VOBJ);
+ deploymentinfo.pushKV("hash", tip->GetBlockHash().ToString());
+ deploymentinfo.pushKV("height", tip->nHeight);
+ deploymentinfo.pushKV("deployments", DeploymentInfo(tip, consensusParams));
+ return deploymentinfo;
+ },
};
}
@@ -2756,6 +2842,7 @@ static const CRPCCommand commands[] =
{ "blockchain", &getblockheader, },
{ "blockchain", &getchaintips, },
{ "blockchain", &getdifficulty, },
+ { "blockchain", &getdeploymentinfo, },
{ "blockchain", &getmempoolancestors, },
{ "blockchain", &getmempooldescendants, },
{ "blockchain", &getmempoolentry, },
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 003ba8bb20..c480a093a4 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -60,7 +60,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "getbalance", 1, "minconf" },
{ "getbalance", 2, "include_watchonly" },
{ "getbalance", 3, "avoid_reuse" },
- { "getblockfrompeer", 1, "nodeid" },
+ { "getblockfrompeer", 1, "peer_id" },
{ "getblockhash", 0, "height" },
{ "waitforblockheight", 0, "height" },
{ "waitforblockheight", 1, "timeout" },
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 6fe990691a..3d7c00edfc 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -648,7 +648,7 @@ static RPCHelpMan getnetworkinfo()
obj.pushKV("incrementalfee", ValueFromAmount(::incrementalRelayFee.GetFeePerK()));
UniValue localAddresses(UniValue::VARR);
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
for (const std::pair<const CNetAddr, LocalServiceInfo> &item : mapLocalHost)
{
UniValue rec(UniValue::VOBJ);
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index f227fde0f7..ff0d8a4e0f 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -179,6 +179,7 @@ static RPCHelpMan getrawtransaction()
{RPCResult::Type::OBJ, "scriptPubKey", "",
{
{RPCResult::Type::STR, "asm", "the asm"},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the output"},
{RPCResult::Type::STR, "hex", "the hex"},
{RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
@@ -240,7 +241,8 @@ static RPCHelpMan getrawtransaction()
if (!tx) {
std::string errmsg;
if (blockindex) {
- if (!(blockindex->nStatus & BLOCK_HAVE_DATA)) {
+ const bool block_has_data = WITH_LOCK(::cs_main, return blockindex->nStatus & BLOCK_HAVE_DATA);
+ if (!block_has_data) {
throw JSONRPCError(RPC_MISC_ERROR, "Block not available");
}
errmsg = "No such transaction found in the provided block";
@@ -506,6 +508,7 @@ static RPCHelpMan decoderawtransaction()
{RPCResult::Type::OBJ, "scriptPubKey", "",
{
{RPCResult::Type::STR, "asm", "the asm"},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the output"},
{RPCResult::Type::STR_HEX, "hex", "the hex"},
{RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
@@ -561,6 +564,7 @@ static RPCHelpMan decodescript()
RPCResult::Type::OBJ, "", "",
{
{RPCResult::Type::STR, "asm", "Script public key"},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the script"},
{RPCResult::Type::STR, "type", "The output type (e.g. " + GetAllOutputTypes() + ")"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
{RPCResult::Type::STR, "p2sh", /*optional=*/true,
@@ -572,6 +576,7 @@ static RPCHelpMan decodescript()
{RPCResult::Type::STR_HEX, "hex", "Hex string of the script public key"},
{RPCResult::Type::STR, "type", "The type of the script public key (e.g. witness_v0_keyhash or witness_v0_scripthash)"},
{RPCResult::Type::STR, "address", /*optional=*/true, "The Bitcoin address (only if a well-defined address exists)"},
+ {RPCResult::Type::STR, "desc", "Inferred descriptor for the script"},
{RPCResult::Type::STR, "p2sh-segwit", "address of the P2SH script wrapping this witness redeem script"},
}},
},
diff --git a/src/rpc/rawtransaction_util.cpp b/src/rpc/rawtransaction_util.cpp
index 3459897fe5..e23fe34480 100644
--- a/src/rpc/rawtransaction_util.cpp
+++ b/src/rpc/rawtransaction_util.cpp
@@ -63,7 +63,7 @@ CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniVal
if (rbf) {
nSequence = MAX_BIP125_RBF_SEQUENCE; /* CTxIn::SEQUENCE_FINAL - 2 */
} else if (rawTx.nLockTime) {
- nSequence = CTxIn::SEQUENCE_FINAL - 1;
+ nSequence = CTxIn::MAX_SEQUENCE_NONFINAL; /* CTxIn::SEQUENCE_FINAL - 1 */
} else {
nSequence = CTxIn::SEQUENCE_FINAL;
}
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 57e3da0351..5ef7e26ce8 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -831,11 +831,14 @@ void RPCResult::ToSections(Sections& sections, const OuterType outer_type, const
}
case Type::OBJ_DYN:
case Type::OBJ: {
+ if (m_inner.empty()) {
+ sections.PushSection({indent + maybe_key + "{}", Description("empty JSON object")});
+ return;
+ }
sections.PushSection({indent + maybe_key + "{", Description("json object")});
for (const auto& i : m_inner) {
i.ToSections(sections, OuterType::OBJ, current_indent + 2);
}
- CHECK_NONFATAL(!m_inner.empty());
if (m_type == Type::OBJ_DYN && m_inner.back().m_type != Type::ELISION) {
// If the dictionary keys are dynamic, use three dots for continuation
sections.PushSection({indent_next + "...", ""});
@@ -886,6 +889,17 @@ bool RPCResult::MatchesType(const UniValue& result) const
CHECK_NONFATAL(false);
}
+void RPCResult::CheckInnerDoc() const
+{
+ if (m_type == Type::OBJ) {
+ // May or may not be empty
+ return;
+ }
+ // Everything else must either be empty or not
+ const bool inner_needed{m_type == Type::ARR || m_type == Type::ARR_FIXED || m_type == Type::OBJ_DYN};
+ CHECK_NONFATAL(inner_needed != m_inner.empty());
+}
+
std::string RPCArg::ToStringObj(const bool oneline) const
{
std::string res;
diff --git a/src/rpc/util.h b/src/rpc/util.h
index d43ee33b0f..25ebf78fa1 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -267,8 +267,7 @@ struct RPCResult {
m_cond{std::move(cond)}
{
CHECK_NONFATAL(!m_cond.empty());
- const bool inner_needed{type == Type::ARR || type == Type::ARR_FIXED || type == Type::OBJ || type == Type::OBJ_DYN};
- CHECK_NONFATAL(inner_needed != inner.empty());
+ CheckInnerDoc();
}
RPCResult(
@@ -292,8 +291,7 @@ struct RPCResult {
m_description{std::move(description)},
m_cond{}
{
- const bool inner_needed{type == Type::ARR || type == Type::ARR_FIXED || type == Type::OBJ || type == Type::OBJ_DYN};
- CHECK_NONFATAL(inner_needed != inner.empty());
+ CheckInnerDoc();
}
RPCResult(
@@ -311,6 +309,9 @@ struct RPCResult {
std::string ToDescriptionString() const;
/** Check whether the result JSON type matches. */
bool MatchesType(const UniValue& result) const;
+
+private:
+ void CheckInnerDoc() const;
};
struct RPCResults {
diff --git a/src/scheduler.cpp b/src/scheduler.cpp
index 378f866e09..0b2ad3c553 100644
--- a/src/scheduler.cpp
+++ b/src/scheduler.cpp
@@ -136,7 +136,7 @@ bool CScheduler::AreThreadsServicingQueue() const
void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue()
{
{
- LOCK(m_cs_callbacks_pending);
+ LOCK(m_callbacks_mutex);
// Try to avoid scheduling too many copies here, but if we
// accidentally have two ProcessQueue's scheduled at once its
// not a big deal.
@@ -150,7 +150,7 @@ void SingleThreadedSchedulerClient::ProcessQueue()
{
std::function<void()> callback;
{
- LOCK(m_cs_callbacks_pending);
+ LOCK(m_callbacks_mutex);
if (m_are_callbacks_running) return;
if (m_callbacks_pending.empty()) return;
m_are_callbacks_running = true;
@@ -167,7 +167,7 @@ void SingleThreadedSchedulerClient::ProcessQueue()
~RAIICallbacksRunning()
{
{
- LOCK(instance->m_cs_callbacks_pending);
+ LOCK(instance->m_callbacks_mutex);
instance->m_are_callbacks_running = false;
}
instance->MaybeScheduleProcessQueue();
@@ -182,7 +182,7 @@ void SingleThreadedSchedulerClient::AddToProcessQueue(std::function<void()> func
assert(m_pscheduler);
{
- LOCK(m_cs_callbacks_pending);
+ LOCK(m_callbacks_mutex);
m_callbacks_pending.emplace_back(std::move(func));
}
MaybeScheduleProcessQueue();
@@ -194,13 +194,13 @@ void SingleThreadedSchedulerClient::EmptyQueue()
bool should_continue = true;
while (should_continue) {
ProcessQueue();
- LOCK(m_cs_callbacks_pending);
+ LOCK(m_callbacks_mutex);
should_continue = !m_callbacks_pending.empty();
}
}
size_t SingleThreadedSchedulerClient::CallbacksPending()
{
- LOCK(m_cs_callbacks_pending);
+ LOCK(m_callbacks_mutex);
return m_callbacks_pending.size();
}
diff --git a/src/scheduler.h b/src/scheduler.h
index 5366a5989c..bb0abfbf7a 100644
--- a/src/scheduler.h
+++ b/src/scheduler.h
@@ -119,9 +119,9 @@ class SingleThreadedSchedulerClient
private:
CScheduler* m_pscheduler;
- RecursiveMutex m_cs_callbacks_pending;
- std::list<std::function<void()>> m_callbacks_pending GUARDED_BY(m_cs_callbacks_pending);
- bool m_are_callbacks_running GUARDED_BY(m_cs_callbacks_pending) = false;
+ Mutex m_callbacks_mutex;
+ std::list<std::function<void()>> m_callbacks_pending GUARDED_BY(m_callbacks_mutex);
+ bool m_are_callbacks_running GUARDED_BY(m_callbacks_mutex) = false;
void MaybeScheduleProcessQueue();
void ProcessQueue();
diff --git a/src/script/bitcoinconsensus.cpp b/src/script/bitcoinconsensus.cpp
index dd15e6104c..f7f9dfc262 100644
--- a/src/script/bitcoinconsensus.cpp
+++ b/src/script/bitcoinconsensus.cpp
@@ -22,20 +22,23 @@ public:
m_remaining(txToLen)
{}
- void read(char* pch, size_t nSize)
+ void read(Span<std::byte> dst)
{
- if (nSize > m_remaining)
+ if (dst.size() > m_remaining) {
throw std::ios_base::failure(std::string(__func__) + ": end of data");
+ }
- if (pch == nullptr)
+ if (dst.data() == nullptr) {
throw std::ios_base::failure(std::string(__func__) + ": bad destination buffer");
+ }
- if (m_data == nullptr)
+ if (m_data == nullptr) {
throw std::ios_base::failure(std::string(__func__) + ": bad source buffer");
+ }
- memcpy(pch, m_data, nSize);
- m_remaining -= nSize;
- m_data += nSize;
+ memcpy(dst.data(), m_data, dst.size());
+ m_remaining -= dst.size();
+ m_data += dst.size();
}
template<typename T>
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index 95ffe40a74..11b1a1c887 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -1303,12 +1303,12 @@ public:
it = itBegin;
while (scriptCode.GetOp(it, opcode)) {
if (opcode == OP_CODESEPARATOR) {
- s.write((char*)&itBegin[0], it-itBegin-1);
+ s.write(AsBytes(Span{&itBegin[0], size_t(it - itBegin - 1)}));
itBegin = it;
}
}
if (itBegin != scriptCode.end())
- s.write((char*)&itBegin[0], it-itBegin);
+ s.write(AsBytes(Span{&itBegin[0], size_t(it - itBegin)}));
}
/** Serialize an input of txTo */
@@ -1500,7 +1500,7 @@ static bool HandleMissingData(MissingDataBehavior mdb)
}
template<typename T>
-bool SignatureHashSchnorr(uint256& hash_out, const ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb)
+bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb)
{
uint8_t ext_flag, key_version;
switch (sigversion) {
@@ -1568,9 +1568,12 @@ bool SignatureHashSchnorr(uint256& hash_out, const ScriptExecutionData& execdata
// Data about the output (if only one).
if (output_type == SIGHASH_SINGLE) {
if (in_pos >= tx_to.vout.size()) return false;
- CHashWriter sha_single_output(SER_GETHASH, 0);
- sha_single_output << tx_to.vout[in_pos];
- ss << sha_single_output.GetSHA256();
+ if (!execdata.m_output_hash) {
+ CHashWriter sha_single_output(SER_GETHASH, 0);
+ sha_single_output << tx_to.vout[in_pos];
+ execdata.m_output_hash = sha_single_output.GetSHA256();
+ }
+ ss << execdata.m_output_hash.value();
}
// Additional data for BIP 342 signatures
@@ -1692,7 +1695,7 @@ bool GenericTransactionSignatureChecker<T>::CheckECDSASignature(const std::vecto
}
template <class T>
-bool GenericTransactionSignatureChecker<T>::CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey_in, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror) const
+bool GenericTransactionSignatureChecker<T>::CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey_in, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) const
{
assert(sigversion == SigVersion::TAPROOT || sigversion == SigVersion::TAPSCRIPT);
// Schnorr signatures have 32-byte public keys. The caller is responsible for enforcing this.
diff --git a/src/script/interpreter.h b/src/script/interpreter.h
index 2a28f1a2d3..cf1953ad22 100644
--- a/src/script/interpreter.h
+++ b/src/script/interpreter.h
@@ -11,6 +11,7 @@
#include <span.h>
#include <primitives/transaction.h>
+#include <optional>
#include <vector>
#include <stdint.h>
@@ -215,6 +216,9 @@ struct ScriptExecutionData
bool m_validation_weight_left_init = false;
//! How much validation weight is left (decremented for every successful non-empty signature check).
int64_t m_validation_weight_left;
+
+ //! The hash of the corresponding output
+ std::optional<uint256> m_output_hash;
};
/** Signature hash sizes */
@@ -244,7 +248,7 @@ public:
return false;
}
- virtual bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const
+ virtual bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror = nullptr) const
{
return false;
}
@@ -272,7 +276,7 @@ enum class MissingDataBehavior
};
template<typename T>
-bool SignatureHashSchnorr(uint256& hash_out, const ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb);
+bool SignatureHashSchnorr(uint256& hash_out, ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb);
template <class T>
class GenericTransactionSignatureChecker : public BaseSignatureChecker
@@ -292,7 +296,7 @@ public:
GenericTransactionSignatureChecker(const T* txToIn, unsigned int nInIn, const CAmount& amountIn, MissingDataBehavior mdb) : txTo(txToIn), m_mdb(mdb), nIn(nInIn), amount(amountIn), txdata(nullptr) {}
GenericTransactionSignatureChecker(const T* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData& txdataIn, MissingDataBehavior mdb) : txTo(txToIn), m_mdb(mdb), nIn(nInIn), amount(amountIn), txdata(&txdataIn) {}
bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override;
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override;
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override;
bool CheckLockTime(const CScriptNum& nLockTime) const override;
bool CheckSequence(const CScriptNum& nSequence) const override;
};
@@ -313,7 +317,7 @@ public:
return m_checker.CheckECDSASignature(scriptSig, vchPubKey, scriptCode, sigversion);
}
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override
{
return m_checker.CheckSchnorrSignature(sig, pubkey, sigversion, execdata, serror);
}
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 8e08448480..371a937bc8 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -542,7 +542,7 @@ class DummySignatureChecker final : public BaseSignatureChecker
public:
DummySignatureChecker() {}
bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return true; }
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror) const override { return true; }
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) const override { return true; }
};
const DummySignatureChecker DUMMY_CHECKER;
diff --git a/src/serialize.h b/src/serialize.h
index 4cc4b0338c..44bb471f25 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -47,79 +47,72 @@ static const unsigned int MAX_VECTOR_ALLOCATE = 5000000;
struct deserialize_type {};
constexpr deserialize_type deserialize {};
-//! Safely convert odd char pointer types to standard ones.
-inline char* CharCast(char* c) { return c; }
-inline char* CharCast(unsigned char* c) { return (char*)c; }
-inline const char* CharCast(const char* c) { return c; }
-inline const char* CharCast(const unsigned char* c) { return (const char*)c; }
-
/*
* Lowest-level serialization and conversion.
- * @note Sizes of these types are verified in the tests
*/
template<typename Stream> inline void ser_writedata8(Stream &s, uint8_t obj)
{
- s.write((char*)&obj, 1);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata16(Stream &s, uint16_t obj)
{
obj = htole16(obj);
- s.write((char*)&obj, 2);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata16be(Stream &s, uint16_t obj)
{
obj = htobe16(obj);
- s.write((char*)&obj, 2);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata32(Stream &s, uint32_t obj)
{
obj = htole32(obj);
- s.write((char*)&obj, 4);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata32be(Stream &s, uint32_t obj)
{
obj = htobe32(obj);
- s.write((char*)&obj, 4);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline void ser_writedata64(Stream &s, uint64_t obj)
{
obj = htole64(obj);
- s.write((char*)&obj, 8);
+ s.write(AsBytes(Span{&obj, 1}));
}
template<typename Stream> inline uint8_t ser_readdata8(Stream &s)
{
uint8_t obj;
- s.read((char*)&obj, 1);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return obj;
}
template<typename Stream> inline uint16_t ser_readdata16(Stream &s)
{
uint16_t obj;
- s.read((char*)&obj, 2);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le16toh(obj);
}
template<typename Stream> inline uint16_t ser_readdata16be(Stream &s)
{
uint16_t obj;
- s.read((char*)&obj, 2);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return be16toh(obj);
}
template<typename Stream> inline uint32_t ser_readdata32(Stream &s)
{
uint32_t obj;
- s.read((char*)&obj, 4);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le32toh(obj);
}
template<typename Stream> inline uint32_t ser_readdata32be(Stream &s)
{
uint32_t obj;
- s.read((char*)&obj, 4);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return be32toh(obj);
}
template<typename Stream> inline uint64_t ser_readdata64(Stream &s)
{
uint64_t obj;
- s.read((char*)&obj, 8);
+ s.read(AsWritableBytes(Span{&obj, 1}));
return le64toh(obj);
}
@@ -127,7 +120,7 @@ template<typename Stream> inline uint64_t ser_readdata64(Stream &s)
/////////////////////////////////////////////////////////////////
//
// Templates for serializing to anything that looks like a stream,
-// i.e. anything that supports .read(char*, size_t) and .write(char*, size_t)
+// i.e. anything that supports .read(Span<std::byte>) and .write(Span<const std::byte>)
//
class CSizeComputer;
@@ -196,7 +189,7 @@ template<typename X> const X& ReadWriteAsHelper(const X& x) { return x; }
FORMATTER_METHODS(cls, obj)
#ifndef CHAR_EQUALS_INT8
-template<typename Stream> inline void Serialize(Stream& s, char a ) { ser_writedata8(s, a); } // TODO Get rid of bare char
+template <typename Stream> void Serialize(Stream&, char) = delete; // char serialization forbidden. Use uint8_t or int8_t
#endif
template<typename Stream> inline void Serialize(Stream& s, int8_t a ) { ser_writedata8(s, a); }
template<typename Stream> inline void Serialize(Stream& s, uint8_t a ) { ser_writedata8(s, a); }
@@ -206,13 +199,13 @@ template<typename Stream> inline void Serialize(Stream& s, int32_t a ) { ser_wri
template<typename Stream> inline void Serialize(Stream& s, uint32_t a) { ser_writedata32(s, a); }
template<typename Stream> inline void Serialize(Stream& s, int64_t a ) { ser_writedata64(s, a); }
template<typename Stream> inline void Serialize(Stream& s, uint64_t a) { ser_writedata64(s, a); }
-template<typename Stream, int N> inline void Serialize(Stream& s, const char (&a)[N]) { s.write(a, N); }
-template<typename Stream, int N> inline void Serialize(Stream& s, const unsigned char (&a)[N]) { s.write(CharCast(a), N); }
-template<typename Stream> inline void Serialize(Stream& s, const Span<const unsigned char>& span) { s.write(CharCast(span.data()), span.size()); }
-template<typename Stream> inline void Serialize(Stream& s, const Span<unsigned char>& span) { s.write(CharCast(span.data()), span.size()); }
+template<typename Stream, int N> inline void Serialize(Stream& s, const char (&a)[N]) { s.write(MakeByteSpan(a)); }
+template<typename Stream, int N> inline void Serialize(Stream& s, const unsigned char (&a)[N]) { s.write(MakeByteSpan(a)); }
+template<typename Stream> inline void Serialize(Stream& s, const Span<const unsigned char>& span) { s.write(AsBytes(span)); }
+template<typename Stream> inline void Serialize(Stream& s, const Span<unsigned char>& span) { s.write(AsBytes(span)); }
#ifndef CHAR_EQUALS_INT8
-template<typename Stream> inline void Unserialize(Stream& s, char& a ) { a = ser_readdata8(s); } // TODO Get rid of bare char
+template <typename Stream> void Unserialize(Stream&, char) = delete; // char serialization forbidden. Use uint8_t or int8_t
#endif
template<typename Stream> inline void Unserialize(Stream& s, int8_t& a ) { a = ser_readdata8(s); }
template<typename Stream> inline void Unserialize(Stream& s, uint8_t& a ) { a = ser_readdata8(s); }
@@ -222,9 +215,9 @@ template<typename Stream> inline void Unserialize(Stream& s, int32_t& a ) { a =
template<typename Stream> inline void Unserialize(Stream& s, uint32_t& a) { a = ser_readdata32(s); }
template<typename Stream> inline void Unserialize(Stream& s, int64_t& a ) { a = ser_readdata64(s); }
template<typename Stream> inline void Unserialize(Stream& s, uint64_t& a) { a = ser_readdata64(s); }
-template<typename Stream, int N> inline void Unserialize(Stream& s, char (&a)[N]) { s.read(a, N); }
-template<typename Stream, int N> inline void Unserialize(Stream& s, unsigned char (&a)[N]) { s.read(CharCast(a), N); }
-template<typename Stream> inline void Unserialize(Stream& s, Span<unsigned char>& span) { s.read(CharCast(span.data()), span.size()); }
+template<typename Stream, int N> inline void Unserialize(Stream& s, char (&a)[N]) { s.read(MakeWritableByteSpan(a)); }
+template<typename Stream, int N> inline void Unserialize(Stream& s, unsigned char (&a)[N]) { s.read(MakeWritableByteSpan(a)); }
+template<typename Stream> inline void Unserialize(Stream& s, Span<unsigned char>& span) { s.read(AsWritableBytes(span)); }
template <typename Stream> inline void Serialize(Stream& s, bool a) { uint8_t f = a; ser_writedata8(s, f); }
template <typename Stream> inline void Unserialize(Stream& s, bool& a) { uint8_t f = ser_readdata8(s); a = f; }
@@ -479,10 +472,10 @@ struct CustomUintFormatter
if (v < 0 || v > MAX) throw std::ios_base::failure("CustomUintFormatter value out of range");
if (BigEndian) {
uint64_t raw = htobe64(v);
- s.write(((const char*)&raw) + 8 - Bytes, Bytes);
+ s.write({BytePtr(&raw) + 8 - Bytes, Bytes});
} else {
uint64_t raw = htole64(v);
- s.write((const char*)&raw, Bytes);
+ s.write({BytePtr(&raw), Bytes});
}
}
@@ -492,10 +485,10 @@ struct CustomUintFormatter
static_assert(std::numeric_limits<U>::max() >= MAX && std::numeric_limits<U>::min() <= 0, "Assigned type too small");
uint64_t raw = 0;
if (BigEndian) {
- s.read(((char*)&raw) + 8 - Bytes, Bytes);
+ s.read({BytePtr(&raw) + 8 - Bytes, Bytes});
v = static_cast<I>(be64toh(raw));
} else {
- s.read((char*)&raw, Bytes);
+ s.read({BytePtr(&raw), Bytes});
v = static_cast<I>(le64toh(raw));
}
}
@@ -551,7 +544,7 @@ struct LimitedStringFormatter
throw std::ios_base::failure("String length limit exceeded");
}
v.resize(size);
- if (size != 0) s.read((char*)v.data(), size);
+ if (size != 0) s.read(MakeWritableByteSpan(v));
}
template<typename Stream>
@@ -715,7 +708,7 @@ void Serialize(Stream& os, const std::basic_string<C>& str)
{
WriteCompactSize(os, str.size());
if (!str.empty())
- os.write((char*)str.data(), str.size() * sizeof(C));
+ os.write(MakeByteSpan(str));
}
template<typename Stream, typename C>
@@ -724,7 +717,7 @@ void Unserialize(Stream& is, std::basic_string<C>& str)
unsigned int nSize = ReadCompactSize(is);
str.resize(nSize);
if (nSize != 0)
- is.read((char*)str.data(), nSize * sizeof(C));
+ is.read(MakeWritableByteSpan(str));
}
@@ -737,7 +730,7 @@ void Serialize_impl(Stream& os, const prevector<N, T>& v, const unsigned char&)
{
WriteCompactSize(os, v.size());
if (!v.empty())
- os.write((char*)v.data(), v.size() * sizeof(T));
+ os.write(MakeByteSpan(v));
}
template<typename Stream, unsigned int N, typename T, typename V>
@@ -764,7 +757,7 @@ void Unserialize_impl(Stream& is, prevector<N, T>& v, const unsigned char&)
{
unsigned int blk = std::min(nSize - i, (unsigned int)(1 + 4999999 / sizeof(T)));
v.resize_uninitialized(i + blk);
- is.read((char*)&v[i], blk * sizeof(T));
+ is.read(AsWritableBytes(Span{&v[i], blk}));
i += blk;
}
}
@@ -791,7 +784,7 @@ void Serialize_impl(Stream& os, const std::vector<T, A>& v, const unsigned char&
{
WriteCompactSize(os, v.size());
if (!v.empty())
- os.write((char*)v.data(), v.size() * sizeof(T));
+ os.write(MakeByteSpan(v));
}
template<typename Stream, typename T, typename A>
@@ -830,7 +823,7 @@ void Unserialize_impl(Stream& is, std::vector<T, A>& v, const unsigned char&)
{
unsigned int blk = std::min(nSize - i, (unsigned int)(1 + 4999999 / sizeof(T)));
v.resize(i + blk);
- is.read((char*)&v[i], blk * sizeof(T));
+ is.read(AsWritableBytes(Span{&v[i], blk}));
i += blk;
}
}
@@ -995,9 +988,9 @@ protected:
public:
explicit CSizeComputer(int nVersionIn) : nSize(0), nVersion(nVersionIn) {}
- void write(const char *psz, size_t _nSize)
+ void write(Span<const std::byte> src)
{
- this->nSize += _nSize;
+ this->nSize += src.size();
}
/** Pretend _nSize bytes are written, without specifying them. */
diff --git a/src/span.h b/src/span.h
index 47390a5bb2..b627b993c2 100644
--- a/src/span.h
+++ b/src/span.h
@@ -243,16 +243,21 @@ T& SpanPopBack(Span<T>& span)
return back;
}
+//! Convert a data pointer to a std::byte data pointer.
+//! Where possible, please use the safer AsBytes helpers.
+inline const std::byte* BytePtr(const void* data) { return reinterpret_cast<const std::byte*>(data); }
+inline std::byte* BytePtr(void* data) { return reinterpret_cast<std::byte*>(data); }
+
// From C++20 as_bytes and as_writeable_bytes
template <typename T>
Span<const std::byte> AsBytes(Span<T> s) noexcept
{
- return {reinterpret_cast<const std::byte*>(s.data()), s.size_bytes()};
+ return {BytePtr(s.data()), s.size_bytes()};
}
template <typename T>
Span<std::byte> AsWritableBytes(Span<T> s) noexcept
{
- return {reinterpret_cast<std::byte*>(s.data()), s.size_bytes()};
+ return {BytePtr(s.data()), s.size_bytes()};
}
template <typename V>
diff --git a/src/streams.h b/src/streams.h
index 384bb632b0..2f26be6dd8 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -49,14 +49,14 @@ public:
return (*this);
}
- void write(const char* pch, size_t nSize)
+ void write(Span<const std::byte> src)
{
- stream->write(pch, nSize);
+ stream->write(src);
}
- void read(char* pch, size_t nSize)
+ void read(Span<std::byte> dst)
{
- stream->read(pch, nSize);
+ stream->read(dst);
}
int GetVersion() const { return nVersion; }
@@ -94,17 +94,17 @@ class CVectorWriter
{
::SerializeMany(*this, std::forward<Args>(args)...);
}
- void write(const char* pch, size_t nSize)
+ void write(Span<const std::byte> src)
{
assert(nPos <= vchData.size());
- size_t nOverwrite = std::min(nSize, vchData.size() - nPos);
+ size_t nOverwrite = std::min(src.size(), vchData.size() - nPos);
if (nOverwrite) {
- memcpy(vchData.data() + nPos, reinterpret_cast<const unsigned char*>(pch), nOverwrite);
+ memcpy(vchData.data() + nPos, src.data(), nOverwrite);
}
- if (nOverwrite < nSize) {
- vchData.insert(vchData.end(), reinterpret_cast<const unsigned char*>(pch) + nOverwrite, reinterpret_cast<const unsigned char*>(pch) + nSize);
+ if (nOverwrite < src.size()) {
+ vchData.insert(vchData.end(), UCharCast(src.data()) + nOverwrite, UCharCast(src.end()));
}
- nPos += nSize;
+ nPos += src.size();
}
template<typename T>
CVectorWriter& operator<<(const T& obj)
@@ -161,18 +161,18 @@ public:
size_t size() const { return m_data.size(); }
bool empty() const { return m_data.empty(); }
- void read(char* dst, size_t n)
+ void read(Span<std::byte> dst)
{
- if (n == 0) {
+ if (dst.size() == 0) {
return;
}
// Read from the beginning of the buffer
- if (n > m_data.size()) {
+ if (dst.size() > m_data.size()) {
throw std::ios_base::failure("SpanReader::read(): end of data");
}
- memcpy(dst, m_data.data(), n);
- m_data = m_data.subspan(n);
+ memcpy(dst.data(), m_data.data(), dst.size());
+ m_data = m_data.subspan(dst.size());
}
};
@@ -206,6 +206,7 @@ public:
: nType{nTypeIn},
nVersion{nVersionIn} {}
+ explicit CDataStream(Span<const uint8_t> sp, int type, int version) : CDataStream{AsBytes(sp), type, version} {}
explicit CDataStream(Span<const value_type> sp, int nTypeIn, int nVersionIn)
: vch(sp.data(), sp.data() + sp.size()),
nType{nTypeIn},
@@ -221,7 +222,7 @@ public:
std::string str() const
{
- return (std::string(begin(), end()));
+ return std::string{UCharCast(data()), UCharCast(data() + size())};
}
@@ -342,16 +343,16 @@ public:
void SetVersion(int n) { nVersion = n; }
int GetVersion() const { return nVersion; }
- void read(char* pch, size_t nSize)
+ void read(Span<value_type> dst)
{
- if (nSize == 0) return;
+ if (dst.size() == 0) return;
// Read from the beginning of the buffer
- unsigned int nReadPosNext = nReadPos + nSize;
+ unsigned int nReadPosNext = nReadPos + dst.size();
if (nReadPosNext > vch.size()) {
throw std::ios_base::failure("CDataStream::read(): end of data");
}
- memcpy(pch, &vch[nReadPos], nSize);
+ memcpy(dst.data(), &vch[nReadPos], dst.size());
if (nReadPosNext == vch.size())
{
nReadPos = 0;
@@ -379,10 +380,10 @@ public:
nReadPos = nReadPosNext;
}
- void write(const char* pch, size_t nSize)
+ void write(Span<const value_type> src)
{
// Write to the end of the buffer
- vch.insert(vch.end(), pch, pch + nSize);
+ vch.insert(vch.end(), src.begin(), src.end());
}
template<typename Stream>
@@ -390,7 +391,7 @@ public:
{
// Special case: stream << stream concatenates like stream += stream
if (!vch.empty())
- s.write((char*)vch.data(), vch.size() * sizeof(value_type));
+ s.write(MakeByteSpan(vch));
}
template<typename T>
@@ -421,7 +422,7 @@ public:
}
for (size_type i = 0, j = 0; i != size(); i++) {
- vch[i] ^= key[j++];
+ vch[i] ^= std::byte{key[j++]};
// This potentially acts on very many bytes of data, so it's
// important that we calculate `j`, i.e. the `key` index in this
@@ -594,12 +595,13 @@ public:
int GetType() const { return nType; }
int GetVersion() const { return nVersion; }
- void read(char* pch, size_t nSize)
+ void read(Span<std::byte> dst)
{
if (!file)
throw std::ios_base::failure("CAutoFile::read: file handle is nullptr");
- if (fread(pch, 1, nSize, file) != nSize)
+ if (fread(dst.data(), 1, dst.size(), file) != dst.size()) {
throw std::ios_base::failure(feof(file) ? "CAutoFile::read: end of file" : "CAutoFile::read: fread failed");
+ }
}
void ignore(size_t nSize)
@@ -615,12 +617,13 @@ public:
}
}
- void write(const char* pch, size_t nSize)
+ void write(Span<const std::byte> src)
{
if (!file)
throw std::ios_base::failure("CAutoFile::write: file handle is nullptr");
- if (fwrite(pch, 1, nSize, file) != nSize)
+ if (fwrite(src.data(), 1, src.size(), file) != src.size()) {
throw std::ios_base::failure("CAutoFile::write: write failed");
+ }
}
template<typename T>
@@ -661,7 +664,7 @@ private:
uint64_t nReadPos; //!< how many bytes have been read from this
uint64_t nReadLimit; //!< up to which position we're allowed to read
uint64_t nRewind; //!< how many bytes we guarantee to rewind
- std::vector<char> vchBuf; //!< the buffer
+ std::vector<std::byte> vchBuf; //!< the buffer
protected:
//! read data from the source to fill the buffer
@@ -682,8 +685,8 @@ protected:
}
public:
- CBufferedFile(FILE *fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn) :
- nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, 0)
+ CBufferedFile(FILE* fileIn, uint64_t nBufSize, uint64_t nRewindIn, int nTypeIn, int nVersionIn)
+ : nType(nTypeIn), nVersion(nVersionIn), nSrcPos(0), nReadPos(0), nReadLimit(std::numeric_limits<uint64_t>::max()), nRewind(nRewindIn), vchBuf(nBufSize, std::byte{0})
{
if (nRewindIn >= nBufSize)
throw std::ios_base::failure("Rewind limit must be less than buffer size");
@@ -716,22 +719,23 @@ public:
}
//! read a number of bytes
- void read(char *pch, size_t nSize) {
- if (nSize + nReadPos > nReadLimit)
+ void read(Span<std::byte> dst)
+ {
+ if (dst.size() + nReadPos > nReadLimit) {
throw std::ios_base::failure("Read attempted past buffer limit");
- while (nSize > 0) {
+ }
+ while (dst.size() > 0) {
if (nReadPos == nSrcPos)
Fill();
unsigned int pos = nReadPos % vchBuf.size();
- size_t nNow = nSize;
+ size_t nNow = dst.size();
if (nNow + pos > vchBuf.size())
nNow = vchBuf.size() - pos;
if (nNow + nReadPos > nSrcPos)
nNow = nSrcPos - nReadPos;
- memcpy(pch, &vchBuf[pos], nNow);
+ memcpy(dst.data(), &vchBuf[pos], nNow);
nReadPos += nNow;
- pch += nNow;
- nSize -= nNow;
+ dst = dst.subspan(nNow);
}
}
@@ -774,12 +778,14 @@ public:
}
//! search for a given byte in the stream, and remain positioned on it
- void FindByte(char ch) {
+ void FindByte(uint8_t ch)
+ {
while (true) {
if (nReadPos == nSrcPos)
Fill();
- if (vchBuf[nReadPos % vchBuf.size()] == ch)
+ if (vchBuf[nReadPos % vchBuf.size()] == std::byte{ch}) {
break;
+ }
nReadPos++;
}
}
diff --git a/src/support/allocators/zeroafterfree.h b/src/support/allocators/zeroafterfree.h
index bc9c95eb53..0befe0ffcd 100644
--- a/src/support/allocators/zeroafterfree.h
+++ b/src/support/allocators/zeroafterfree.h
@@ -41,6 +41,6 @@ struct zero_after_free_allocator : public std::allocator<T> {
};
/** Byte-vector that clears its contents before deletion. */
-using SerializeData = std::vector<uint8_t, zero_after_free_allocator<uint8_t>>;
+using SerializeData = std::vector<std::byte, zero_after_free_allocator<std::byte>>;
#endif // BITCOIN_SUPPORT_ALLOCATORS_ZEROAFTERFREE_H
diff --git a/src/test/README.md b/src/test/README.md
index d03411c3ed..90d0e7102d 100644
--- a/src/test/README.md
+++ b/src/test/README.md
@@ -33,19 +33,31 @@ the `src/qt/test/test_main.cpp` file.
### Running individual tests
-`test_bitcoin` has some built-in command-line arguments; for
-example, to run just the `getarg_tests` verbosely:
+`test_bitcoin` accepts the command line arguments from the boost framework.
+For example, to run just the `getarg_tests` suite of tests:
- test_bitcoin --log_level=all --run_test=getarg_tests -- DEBUG_LOG_OUT
+```bash
+test_bitcoin --log_level=all --run_test=getarg_tests
+```
`log_level` controls the verbosity of the test framework, which logs when a
-test case is entered, for example. The `DEBUG_LOG_OUT` after the two dashes
-redirects the debug log, which would normally go to a file in the test datadir
+test case is entered, for example. `test_bitcoin` also accepts the command
+line arguments accepted by `bitcoind`. Use `--` to separate both types of
+arguments:
+
+```bash
+test_bitcoin --log_level=all --run_test=getarg_tests -- -printtoconsole=1
+```
+
+The `-printtoconsole=1` after the two dashes redirects the debug log, which
+would normally go to a file in the test datadir
(`BasicTestingSetup::m_path_root`), to the standard terminal output.
... or to run just the doubledash test:
- test_bitcoin --run_test=getarg_tests/doubledash
+```bash
+test_bitcoin --run_test=getarg_tests/doubledash
+```
Run `test_bitcoin --help` for the full list.
@@ -68,7 +80,7 @@ on failure. For running individual tests verbosely, refer to the section
To write to logs from unit tests you need to use specific message methods
provided by Boost. The simplest is `BOOST_TEST_MESSAGE`.
-For debugging you can launch the `test_bitcoin` executable with `gdb`or `lldb` and
+For debugging you can launch the `test_bitcoin` executable with `gdb` or `lldb` and
start debugging, just like you would with any other program:
```bash
@@ -95,7 +107,7 @@ Running the tests and hitting a segmentation fault should now produce a file cal
`/proc/sys/kernel/core_pattern`).
You can then explore the core dump using
-``` bash
+```bash
gdb src/test/test_bitcoin core
(gbd) bt # produce a backtrace for where a segfault occurred
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index 752bd0af9e..efc30b6822 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -21,6 +21,15 @@
#include <string>
using namespace std::literals;
+using node::NodeContext;
+
+static const std::vector<bool> EMPTY_ASMAP;
+static const bool DETERMINISTIC{true};
+
+static int32_t GetCheckRatio(const NodeContext& node_ctx)
+{
+ return std::clamp<int32_t>(node_ctx.args->GetIntArg("-checkaddrman", 100), 0, 1000000);
+}
static CNetAddr ResolveIP(const std::string& ip)
{
@@ -49,17 +58,11 @@ static std::vector<bool> FromBytes(const unsigned char* source, int vector_size)
return result;
}
-/* Utility function to create a deterministic addrman, as used in most tests */
-static std::unique_ptr<AddrMan> TestAddrMan(std::vector<bool> asmap = std::vector<bool>())
-{
- return std::make_unique<AddrMan>(asmap, /*deterministic=*/true, /*consistency_check_ratio=*/100);
-}
-
BOOST_FIXTURE_TEST_SUITE(addrman_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(addrman_simple)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
CNetAddr source = ResolveIP("252.2.2.2");
@@ -93,7 +96,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
BOOST_CHECK(addrman->size() >= 1);
// Test: reset addrman and test AddrMan::Add multiple addresses works as expected
- addrman = TestAddrMan();
+ addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
std::vector<CAddress> vAddr;
vAddr.push_back(CAddress(ResolveService("250.1.1.3", 8333), NODE_NONE));
vAddr.push_back(CAddress(ResolveService("250.1.1.4", 8333), NODE_NONE));
@@ -103,7 +106,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
BOOST_AUTO_TEST_CASE(addrman_ports)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
CNetAddr source = ResolveIP("252.2.2.2");
@@ -132,7 +135,7 @@ BOOST_AUTO_TEST_CASE(addrman_ports)
BOOST_AUTO_TEST_CASE(addrman_select)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
CNetAddr source = ResolveIP("252.2.2.2");
@@ -191,7 +194,7 @@ BOOST_AUTO_TEST_CASE(addrman_select)
BOOST_AUTO_TEST_CASE(addrman_new_collisions)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
CNetAddr source = ResolveIP("252.2.2.2");
@@ -220,7 +223,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
BOOST_AUTO_TEST_CASE(addrman_new_multiplicity)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
CAddress addr{CAddress(ResolveService("253.3.3.3", 8333), NODE_NONE)};
int64_t start_time{GetAdjustedTime()};
addr.nTime = start_time;
@@ -252,7 +255,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_multiplicity)
BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
CNetAddr source = ResolveIP("252.2.2.2");
@@ -283,7 +286,7 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
BOOST_AUTO_TEST_CASE(addrman_getaddr)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
// Test: Sanity check, GetAddr should never return anything if addrman
// is empty.
@@ -604,9 +607,11 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
{
std::vector<bool> asmap1 = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
- auto addrman_asmap1 = TestAddrMan(asmap1);
- auto addrman_asmap1_dup = TestAddrMan(asmap1);
- auto addrman_noasmap = TestAddrMan();
+ const auto ratio = GetCheckRatio(m_node);
+ auto addrman_asmap1 = std::make_unique<AddrMan>(asmap1, DETERMINISTIC, ratio);
+ auto addrman_asmap1_dup = std::make_unique<AddrMan>(asmap1, DETERMINISTIC, ratio);
+ auto addrman_noasmap = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, ratio);
+
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
CAddress addr = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
@@ -634,8 +639,8 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
BOOST_CHECK(addr_pos1.position != addr_pos3.position);
// deserializing non-asmaped peers.dat to asmaped addrman
- addrman_asmap1 = TestAddrMan(asmap1);
- addrman_noasmap = TestAddrMan();
+ addrman_asmap1 = std::make_unique<AddrMan>(asmap1, DETERMINISTIC, ratio);
+ addrman_noasmap = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, ratio);
addrman_noasmap->Add({addr}, default_source);
stream << *addrman_noasmap;
stream >> *addrman_asmap1;
@@ -646,8 +651,8 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
BOOST_CHECK(addr_pos4 == addr_pos2);
// used to map to different buckets, now maps to the same bucket.
- addrman_asmap1 = TestAddrMan(asmap1);
- addrman_noasmap = TestAddrMan();
+ addrman_asmap1 = std::make_unique<AddrMan>(asmap1, DETERMINISTIC, ratio);
+ addrman_noasmap = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, ratio);
CAddress addr1 = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.2.1.1"), NODE_NONE);
addrman_noasmap->Add({addr, addr2}, default_source);
@@ -666,7 +671,7 @@ BOOST_AUTO_TEST_CASE(remove_invalid)
{
// Confirm that invalid addresses are ignored in unserialization.
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
const CAddress new1{ResolveService("5.5.5.5"), NODE_NONE};
@@ -698,14 +703,14 @@ BOOST_AUTO_TEST_CASE(remove_invalid)
BOOST_REQUIRE(pos + sizeof(tried2_raw_replacement) <= stream.size());
memcpy(stream.data() + pos, tried2_raw_replacement, sizeof(tried2_raw_replacement));
- addrman = TestAddrMan();
+ addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
stream >> *addrman;
BOOST_CHECK_EQUAL(addrman->size(), 2);
}
BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
BOOST_CHECK(addrman->size() == 0);
@@ -738,7 +743,7 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
BOOST_AUTO_TEST_CASE(addrman_noevict)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
// Add 35 addresses.
CNetAddr source = ResolveIP("252.2.2.2");
@@ -790,7 +795,7 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
BOOST_AUTO_TEST_CASE(addrman_evictionworks)
{
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
BOOST_CHECK(addrman->size() == 0);
@@ -860,8 +865,7 @@ static CDataStream AddrmanToStream(const AddrMan& addrman)
BOOST_AUTO_TEST_CASE(load_addrman)
{
- AddrMan addrman{/*asmap=*/ std::vector<bool>(), /*deterministic=*/ true,
- /*consistency_check_ratio=*/ 100};
+ AddrMan addrman{EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)};
CService addr1, addr2, addr3;
BOOST_CHECK(Lookup("250.7.1.1", addr1, 8333, false));
@@ -880,7 +884,7 @@ BOOST_AUTO_TEST_CASE(load_addrman)
// Test that the de-serialization does not throw an exception.
CDataStream ssPeers1 = AddrmanToStream(addrman);
bool exceptionThrown = false;
- AddrMan addrman1(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/100);
+ AddrMan addrman1{EMPTY_ASMAP, !DETERMINISTIC, GetCheckRatio(m_node)};
BOOST_CHECK(addrman1.size() == 0);
try {
@@ -897,7 +901,7 @@ BOOST_AUTO_TEST_CASE(load_addrman)
// Test that ReadFromStream creates an addrman with the correct number of addrs.
CDataStream ssPeers2 = AddrmanToStream(addrman);
- AddrMan addrman2(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/100);
+ AddrMan addrman2{EMPTY_ASMAP, !DETERMINISTIC, GetCheckRatio(m_node)};
BOOST_CHECK(addrman2.size() == 0);
ReadFromStream(addrman2, ssPeers2);
BOOST_CHECK(addrman2.size() == 3);
@@ -935,7 +939,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted)
// Test that the de-serialization of corrupted peers.dat throws an exception.
CDataStream ssPeers1 = MakeCorruptPeersDat();
bool exceptionThrown = false;
- AddrMan addrman1(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/100);
+ AddrMan addrman1{EMPTY_ASMAP, !DETERMINISTIC, GetCheckRatio(m_node)};
BOOST_CHECK(addrman1.size() == 0);
try {
unsigned char pchMsgTmp[4];
@@ -951,7 +955,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted)
// Test that ReadFromStream fails if peers.dat is corrupt
CDataStream ssPeers2 = MakeCorruptPeersDat();
- AddrMan addrman2(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/100);
+ AddrMan addrman2{EMPTY_ASMAP, !DETERMINISTIC, GetCheckRatio(m_node)};
BOOST_CHECK(addrman2.size() == 0);
BOOST_CHECK_THROW(ReadFromStream(addrman2, ssPeers2), std::ios_base::failure);
}
@@ -959,7 +963,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted)
BOOST_AUTO_TEST_CASE(addrman_update_address)
{
// Tests updating nTime via Connected() and nServices via SetServices()
- auto addrman = TestAddrMan();
+ auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node));
CNetAddr source{ResolveIP("252.2.2.2")};
CAddress addr{CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE)};
diff --git a/src/test/arith_uint256_tests.cpp b/src/test/arith_uint256_tests.cpp
index a7494be882..a923d38467 100644
--- a/src/test/arith_uint256_tests.cpp
+++ b/src/test/arith_uint256_tests.cpp
@@ -129,11 +129,11 @@ static void shiftArrayRight(unsigned char* to, const unsigned char* from, unsign
{
unsigned int F = (T+bitsToShift/8);
if (F < arrayLength)
- to[T] = from[F] >> (bitsToShift%8);
+ to[T] = uint8_t(from[F] >> (bitsToShift % 8));
else
to[T] = 0;
if (F + 1 < arrayLength)
- to[T] |= from[(F+1)] << (8-bitsToShift%8);
+ to[T] |= uint8_t(from[(F + 1)] << (8 - bitsToShift % 8));
}
}
@@ -144,9 +144,9 @@ static void shiftArrayLeft(unsigned char* to, const unsigned char* from, unsigne
if (T >= bitsToShift/8)
{
unsigned int F = T-bitsToShift/8;
- to[T] = from[F] << (bitsToShift%8);
+ to[T] = uint8_t(from[F] << (bitsToShift % 8));
if (T >= bitsToShift/8+1)
- to[T] |= from[F-1] >> (8-bitsToShift%8);
+ to[T] |= uint8_t(from[F - 1] >> (8 - bitsToShift % 8));
}
else {
to[T] = 0;
@@ -202,7 +202,7 @@ BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ -
BOOST_CHECK(~ZeroL == MaxL);
unsigned char TmpArray[32];
- for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = ~R1Array[i]; }
+ for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = uint8_t(~R1Array[i]); }
BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (~R1L));
BOOST_CHECK(-ZeroL == ZeroL);
@@ -215,7 +215,7 @@ BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ -
// Check if doing _A_ _OP_ _B_ results in the same as applying _OP_ onto each
// element of Aarray and Barray, and then converting the result into an arith_uint256.
#define CHECKBITWISEOPERATOR(_A_,_B_,_OP_) \
- for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = _A_##Array[i] _OP_ _B_##Array[i]; } \
+ for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = uint8_t(_A_##Array[i] _OP_ _B_##Array[i]); } \
BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (_A_##L _OP_ _B_##L));
#define CHECKASSIGNMENTOPERATOR(_A_,_B_,_OP_) \
diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp
index bd579db205..35c4108caa 100644
--- a/src/test/bloom_tests.cpp
+++ b/src/test/bloom_tests.cpp
@@ -43,8 +43,9 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize)
stream << filter;
std::vector<uint8_t> expected = ParseHex("03614e9b050000000000000001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
BOOST_CHECK_MESSAGE( filter.contains(ParseHex("99108ad8ed9bb6274d3980bab5a85c048f0950c8")), "Bloom filter doesn't contain just-inserted object!");
}
@@ -69,8 +70,9 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak)
stream << filter;
std::vector<uint8_t> expected = ParseHex("03ce4299050000000100008001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
}
BOOST_AUTO_TEST_CASE(bloom_create_insert_key)
@@ -89,8 +91,9 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key)
stream << filter;
std::vector<unsigned char> expected = ParseHex("038fc16b080000000000000001");
+ auto result{MakeUCharSpan(stream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
}
BOOST_AUTO_TEST_CASE(bloom_match)
@@ -341,8 +344,9 @@ BOOST_AUTO_TEST_CASE(merkle_block_3_and_serialize)
merkleStream << merkleBlock;
std::vector<uint8_t> expected = ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f19630101");
+ auto result{MakeUCharSpan(merkleStream)};
- BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), merkleStream.begin(), merkleStream.end());
+ BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), result.begin(), result.end());
}
BOOST_AUTO_TEST_CASE(merkle_block_4)
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index 3921a9d2d1..922fd8e513 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -393,11 +393,11 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
// Update the expected result to know about the new output coins
assert(tx.vout.size() == 1);
const COutPoint outpoint(tx.GetHash(), 0);
- result[outpoint] = Coin(tx.vout[0], height, CTransaction(tx).IsCoinBase());
+ result[outpoint] = Coin{tx.vout[0], int(height), CTransaction(tx).IsCoinBase()};
// Call UpdateCoins on the top cache
CTxUndo undo;
- UpdateCoins(CTransaction(tx), *(stack.back()), undo, height);
+ UpdateCoins(CTransaction(tx), *(stack.back()), undo, int(height));
// Update the utxo set for future spends
utxoset.insert(outpoint);
diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp
index 9b58711eb8..5a3e382c3f 100644
--- a/src/test/descriptor_tests.cpp
+++ b/src/test/descriptor_tests.cpp
@@ -42,6 +42,7 @@ constexpr int HARDENED = 2; // Derivation needs access to private keys
constexpr int UNSOLVABLE = 4; // This descriptor is not expected to be solvable
constexpr int SIGNABLE = 8; // We can sign with this descriptor (this is not true when actual BIP32 derivation is used, as that's not integrated in our signing code)
constexpr int DERIVE_HARDENED = 16; // The final derivation is hardened, i.e. ends with *' or *h
+constexpr int MIXED_PUBKEYS = 32;
/** Compare two descriptors. If only one of them has a checksum, the checksum is ignored. */
bool EqualDescriptor(std::string a, std::string b)
@@ -73,6 +74,18 @@ std::string UseHInsteadOfApostrophe(const std::string& desc)
return ret;
}
+// Count the number of times the string "xpub" appears in a descriptor string
+static size_t CountXpubs(const std::string& desc)
+{
+ size_t count = 0;
+ size_t p = desc.find("xpub", 0);
+ while (p != std::string::npos) {
+ count++;
+ p = desc.find("xpub", p + 1);
+ }
+ return count;
+}
+
const std::set<std::vector<uint32_t>> ONLY_EMPTY{{}};
void DoCheck(const std::string& prv, const std::string& pub, const std::string& norm_prv, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY,
@@ -171,7 +184,8 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string&
// Check whether keys are in the cache
const auto& der_xpub_cache = desc_cache.GetCachedDerivedExtPubKeys();
const auto& parent_xpub_cache = desc_cache.GetCachedParentExtPubKeys();
- if ((flags & RANGE) && !(flags & DERIVE_HARDENED)) {
+ const size_t num_xpubs = CountXpubs(pub1);
+ if ((flags & RANGE) && !(flags & (DERIVE_HARDENED))) {
// For ranged, unhardened derivation, None of the keys in origins should appear in the cache but the cache should have parent keys
// But we can derive one level from each of those parent keys and find them all
BOOST_CHECK(der_xpub_cache.empty());
@@ -183,13 +197,22 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string&
xpub.Derive(der, i);
pubkeys.insert(der.pubkey);
}
+ int count_pks = 0;
for (const auto& origin_pair : script_provider_cached.origins) {
const CPubKey& pk = origin_pair.second.first;
- BOOST_CHECK(pubkeys.count(pk) > 0);
+ count_pks += pubkeys.count(pk);
}
- } else if (pub1.find("xpub") != std::string::npos) {
+ if (flags & MIXED_PUBKEYS) {
+ BOOST_CHECK_EQUAL(num_xpubs, count_pks);
+ } else {
+ BOOST_CHECK_EQUAL(script_provider_cached.origins.size(), count_pks);
+ }
+ } else if (num_xpubs > 0) {
// For ranged, hardened derivation, or not ranged, but has an xpub, all of the keys should appear in the cache
- BOOST_CHECK(der_xpub_cache.size() + parent_xpub_cache.size() == script_provider_cached.origins.size());
+ BOOST_CHECK(der_xpub_cache.size() + parent_xpub_cache.size() == num_xpubs);
+ if (!(flags & MIXED_PUBKEYS)) {
+ BOOST_CHECK(num_xpubs == script_provider_cached.origins.size());
+ }
// Get all of the derived pubkeys
std::set<CPubKey> pubkeys;
for (const auto& xpub_map_pair : der_xpub_cache) {
@@ -206,12 +229,18 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string&
xpub.Derive(der, i);
pubkeys.insert(der.pubkey);
}
+ int count_pks = 0;
for (const auto& origin_pair : script_provider_cached.origins) {
const CPubKey& pk = origin_pair.second.first;
- BOOST_CHECK(pubkeys.count(pk) > 0);
+ count_pks += pubkeys.count(pk);
}
- } else {
- // No xpub, nothing should be cached
+ if (flags & MIXED_PUBKEYS) {
+ BOOST_CHECK_EQUAL(num_xpubs, count_pks);
+ } else {
+ BOOST_CHECK_EQUAL(script_provider_cached.origins.size(), count_pks);
+ }
+ } else if (!(flags & MIXED_PUBKEYS)) {
+ // Only const pubkeys, nothing should be cached
BOOST_CHECK(der_xpub_cache.empty());
BOOST_CHECK(parent_xpub_cache.empty());
}
@@ -333,6 +362,11 @@ BOOST_AUTO_TEST_CASE(descriptor_test)
Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", "wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, OutputType::BECH32, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}});
Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED | DERIVE_HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, OutputType::P2SH_SEGWIT, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}});
Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", "combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, std::nullopt, {{0}, {1}});
+ // Mixed xpubs and const pubkeys
+ Check("wsh(multi(1,xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/0,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))","wsh(multi(1,xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/0,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))","wsh(multi(1,xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/0,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))","wsh(multi(1,xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/0,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", MIXED_PUBKEYS, {{"0020cb155486048b23a6da976d4c6fe071a2dbc8a7b57aaf225b8955f2e2a27b5f00"}},OutputType::BECH32,{{0},{}});
+ // Mixed range xpubs and const pubkeys
+ Check("multi(1,xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)","multi(1,xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)","multi(1,xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)","multi(1,xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", RANGE | MIXED_PUBKEYS, {{"512102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e0762103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd52ae"},{"5121032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ec2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd52ae"},{"5121035d30b6c66dc1e036c45369da8287518cf7e0d6ed1e2b905171c605708f14ca032103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd52ae"}}, std::nullopt,{{2},{1},{0},{}});
+
CheckUnparsable("combo([012345678]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([012345678]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", "Fingerprint is not 4 bytes (9 characters instead of 8 characters)"); // Too long key fingerprint
CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483648)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483648)", "Key path value 2147483648 is out of range"); // BIP 32 path element overflow
CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/1aa)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1aa)", "Key path value '1aa' is not a valid uint32"); // Path is not valid uint
diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp
index 9c85c20e2b..3699abb597 100644
--- a/src/test/fuzz/addrman.cpp
+++ b/src/test/fuzz/addrman.cpp
@@ -11,8 +11,10 @@
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <test/util/setup_common.h>
#include <time.h>
#include <util/asmap.h>
+#include <util/system.h>
#include <cassert>
#include <cstdint>
@@ -20,16 +22,26 @@
#include <string>
#include <vector>
+namespace {
+const BasicTestingSetup* g_setup;
+
+int32_t GetCheckRatio()
+{
+ return std::clamp<int32_t>(g_setup->m_node.args->GetIntArg("-checkaddrman", 0), 0, 1000000);
+}
+} // namespace
+
void initialize_addrman()
{
- SelectParams(CBaseChainParams::REGTEST);
+ static const auto testing_setup = MakeNoLogFileContext<>(CBaseChainParams::REGTEST);
+ g_setup = testing_setup.get();
}
FUZZ_TARGET_INIT(data_stream_addr_man, initialize_addrman)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
CDataStream data_stream = ConsumeDataStream(fuzzed_data_provider);
- AddrMan addr_man(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/0);
+ AddrMan addr_man{/*asmap=*/std::vector<bool>(), /*deterministic=*/false, GetCheckRatio()};
try {
ReadFromStream(addr_man, data_stream);
} catch (const std::exception&) {
@@ -113,7 +125,7 @@ class AddrManDeterministic : public AddrMan
{
public:
explicit AddrManDeterministic(std::vector<bool> asmap, FuzzedDataProvider& fuzzed_data_provider)
- : AddrMan(std::move(asmap), /*deterministic=*/true, /*consistency_check_ratio=*/0)
+ : AddrMan{std::move(asmap), /*deterministic=*/true, GetCheckRatio()}
{
WITH_LOCK(m_impl->cs, m_impl->insecure_rand = FastRandomContext{ConsumeUInt256(fuzzed_data_provider)});
}
diff --git a/src/test/fuzz/autofile.cpp b/src/test/fuzz/autofile.cpp
index 0cc2d12d29..3b410930ed 100644
--- a/src/test/fuzz/autofile.cpp
+++ b/src/test/fuzz/autofile.cpp
@@ -23,16 +23,16 @@ FUZZ_TARGET(autofile)
CallOneOf(
fuzzed_data_provider,
[&] {
- std::array<uint8_t, 4096> arr{};
+ std::array<std::byte, 4096> arr{};
try {
- auto_file.read((char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
+ auto_file.read({arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)});
} catch (const std::ios_base::failure&) {
}
},
[&] {
- const std::array<uint8_t, 4096> arr{};
+ const std::array<std::byte, 4096> arr{};
try {
- auto_file.write((const char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
+ auto_file.write({arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)});
} catch (const std::ios_base::failure&) {
}
},
diff --git a/src/test/fuzz/buffered_file.cpp b/src/test/fuzz/buffered_file.cpp
index c3c2e4050f..a8c3318629 100644
--- a/src/test/fuzz/buffered_file.cpp
+++ b/src/test/fuzz/buffered_file.cpp
@@ -33,9 +33,9 @@ FUZZ_TARGET(buffered_file)
CallOneOf(
fuzzed_data_provider,
[&] {
- std::array<uint8_t, 4096> arr{};
+ std::array<std::byte, 4096> arr{};
try {
- opt_buffered_file->read((char*)arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
+ opt_buffered_file->read({arr.data(), fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096)});
} catch (const std::ios_base::failure&) {
}
},
@@ -53,7 +53,7 @@ FUZZ_TARGET(buffered_file)
return;
}
try {
- opt_buffered_file->FindByte(fuzzed_data_provider.ConsumeIntegral<char>());
+ opt_buffered_file->FindByte(fuzzed_data_provider.ConsumeIntegral<uint8_t>());
} catch (const std::ios_base::failure&) {
}
},
diff --git a/src/test/fuzz/chain.cpp b/src/test/fuzz/chain.cpp
index 326904a811..8c0ed32d51 100644
--- a/src/test/fuzz/chain.cpp
+++ b/src/test/fuzz/chain.cpp
@@ -21,15 +21,18 @@ FUZZ_TARGET(chain)
const uint256 zero{};
disk_block_index->phashBlock = &zero;
- (void)disk_block_index->GetBlockHash();
- (void)disk_block_index->GetBlockPos();
- (void)disk_block_index->GetBlockTime();
- (void)disk_block_index->GetBlockTimeMax();
- (void)disk_block_index->GetMedianTimePast();
- (void)disk_block_index->GetUndoPos();
- (void)disk_block_index->HaveTxsDownloaded();
- (void)disk_block_index->IsValid();
- (void)disk_block_index->ToString();
+ {
+ LOCK(::cs_main);
+ (void)disk_block_index->GetBlockHash();
+ (void)disk_block_index->GetBlockPos();
+ (void)disk_block_index->GetBlockTime();
+ (void)disk_block_index->GetBlockTimeMax();
+ (void)disk_block_index->GetMedianTimePast();
+ (void)disk_block_index->GetUndoPos();
+ (void)disk_block_index->HaveTxsDownloaded();
+ (void)disk_block_index->IsValid();
+ (void)disk_block_index->ToString();
+ }
const CBlockHeader block_header = disk_block_index->GetBlockHeader();
(void)CDiskBlockIndex{*disk_block_index};
@@ -55,7 +58,7 @@ FUZZ_TARGET(chain)
if (block_status & ~BLOCK_VALID_MASK) {
continue;
}
- (void)disk_block_index->RaiseValidity(block_status);
+ WITH_LOCK(::cs_main, (void)disk_block_index->RaiseValidity(block_status));
}
CBlockIndex block_index{block_header};
diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp
index f87b6f1503..a14d28f4ef 100644
--- a/src/test/fuzz/connman.cpp
+++ b/src/test/fuzz/connman.cpp
@@ -12,21 +12,29 @@
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
#include <test/util/setup_common.h>
+#include <util/system.h>
#include <util/translation.h>
#include <cstdint>
#include <vector>
+namespace {
+const BasicTestingSetup* g_setup;
+} // namespace
+
void initialize_connman()
{
static const auto testing_setup = MakeNoLogFileContext<>();
+ g_setup = testing_setup.get();
}
FUZZ_TARGET_INIT(connman, initialize_connman)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
SetMockTime(ConsumeTime(fuzzed_data_provider));
- AddrMan addrman(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/0);
+ AddrMan addrman(/*asmap=*/std::vector<bool>(),
+ /*deterministic=*/false,
+ g_setup->m_node.args->GetIntArg("-checkaddrman", 0));
CConnman connman{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>(), addrman, fuzzed_data_provider.ConsumeBool()};
CNetAddr random_netaddr;
CNode random_node = ConsumeNode(fuzzed_data_provider);
@@ -90,12 +98,6 @@ FUZZ_TARGET_INIT(connman, initialize_connman)
(void)connman.OutboundTargetReached(fuzzed_data_provider.ConsumeBool());
},
[&] {
- // Limit now to int32_t to avoid signed integer overflow
- (void)connman.PoissonNextSendInbound(
- std::chrono::microseconds{fuzzed_data_provider.ConsumeIntegral<int32_t>()},
- std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<int>()});
- },
- [&] {
CSerializedNetMsg serialized_net_msg;
serialized_net_msg.m_type = fuzzed_data_provider.ConsumeRandomLengthString(CMessageHeader::COMMAND_SIZE);
serialized_net_msg.data = ConsumeRandomLengthByteVector(fuzzed_data_provider);
diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp
index 8b4faf2f5f..ed6f172a2a 100644
--- a/src/test/fuzz/deserialize.cpp
+++ b/src/test/fuzz/deserialize.cpp
@@ -22,7 +22,9 @@
#include <pubkey.h>
#include <script/keyorigin.h>
#include <streams.h>
+#include <test/util/setup_common.h>
#include <undo.h>
+#include <util/system.h>
#include <version.h>
#include <exception>
@@ -35,8 +37,15 @@
using node::SnapshotMetadata;
+namespace {
+const BasicTestingSetup* g_setup;
+} // namespace
+
void initialize_deserialize()
{
+ static const auto testing_setup = MakeNoLogFileContext<>();
+ g_setup = testing_setup.get();
+
// Fuzzers using pubkey must hold an ECCVerifyHandle.
static const ECCVerifyHandle verify_handle;
}
@@ -191,7 +200,9 @@ FUZZ_TARGET_DESERIALIZE(blockmerkleroot, {
BlockMerkleRoot(block, &mutated);
})
FUZZ_TARGET_DESERIALIZE(addrman_deserialize, {
- AddrMan am(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/0);
+ AddrMan am(/*asmap=*/std::vector<bool>(),
+ /*deterministic=*/false,
+ g_setup->m_node.args->GetIntArg("-checkaddrman", 0));
DeserializeFromFuzzingInput(buffer, am);
})
FUZZ_TARGET_DESERIALIZE(blockheader_deserialize, {
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index a33297e0ed..60c48e7c22 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -12,6 +12,7 @@
#include <cstdint>
#include <exception>
+#include <functional>
#include <memory>
#include <string>
#include <unistd.h>
@@ -19,6 +20,29 @@
const std::function<void(const std::string&)> G_TEST_LOG_FUN{};
+/**
+ * A copy of the command line arguments that start with `--`.
+ * First `LLVMFuzzerInitialize()` is called, which saves the arguments to `g_args`.
+ * Later, depending on the fuzz test, `G_TEST_COMMAND_LINE_ARGUMENTS()` may be
+ * called by `BasicTestingSetup` constructor to fetch those arguments and store
+ * them in `BasicTestingSetup::m_node::args`.
+ */
+static std::vector<const char*> g_args;
+
+static void SetArgs(int argc, char** argv) {
+ for (int i = 1; i < argc; ++i) {
+ // Only take into account arguments that start with `--`. The others are for the fuzz engine:
+ // `fuzz -runs=1 fuzz_seed_corpus/address_deserialize_v2 --checkaddrman=5`
+ if (strlen(argv[i]) > 2 && argv[i][0] == '-' && argv[i][1] == '-') {
+ g_args.push_back(argv[i]);
+ }
+ }
+}
+
+const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS = []() {
+ return g_args;
+};
+
std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize, TypeHidden>>& FuzzTargets()
{
static std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize, TypeHidden>> g_fuzz_targets;
@@ -56,7 +80,7 @@ void initialize()
}
if (const char* out_path = std::getenv("WRITE_ALL_FUZZ_TARGETS_AND_ABORT")) {
std::cout << "Writing all fuzz target names to '" << out_path << "'." << std::endl;
- std::ofstream out_stream(out_path, std::ios::binary);
+ fsbridge::ofstream out_stream{out_path, std::ios::binary};
for (const auto& t : FuzzTargets()) {
if (std::get<2>(t.second)) continue;
out_stream << t.first << std::endl;
@@ -95,6 +119,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size)
// This function is used by libFuzzer
extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv)
{
+ SetArgs(*argc, *argv);
initialize();
return 0;
}
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 3087f11771..72574612a2 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -206,11 +206,6 @@ FUZZ_TARGET_INIT(integer, initialize_integer)
stream >> deserialized_i8;
assert(i8 == deserialized_i8 && stream.empty());
- char deserialized_ch;
- stream << ch;
- stream >> deserialized_ch;
- assert(ch == deserialized_ch && stream.empty());
-
bool deserialized_b;
stream << b;
stream >> deserialized_b;
diff --git a/src/test/fuzz/p2p_transport_serialization.cpp b/src/test/fuzz/p2p_transport_serialization.cpp
index a7b2b8bfc1..88c22ca305 100644
--- a/src/test/fuzz/p2p_transport_serialization.cpp
+++ b/src/test/fuzz/p2p_transport_serialization.cpp
@@ -70,13 +70,13 @@ FUZZ_TARGET_INIT(p2p_transport_serialization, initialize_p2p_transport_serializa
const std::chrono::microseconds m_time{std::numeric_limits<int64_t>::max()};
bool reject_message{false};
CNetMessage msg = deserializer.GetMessage(m_time, reject_message);
- assert(msg.m_command.size() <= CMessageHeader::COMMAND_SIZE);
+ assert(msg.m_type.size() <= CMessageHeader::COMMAND_SIZE);
assert(msg.m_raw_message_size <= mutable_msg_bytes.size());
assert(msg.m_raw_message_size == CMessageHeader::HEADER_SIZE + msg.m_message_size);
assert(msg.m_time == m_time);
std::vector<unsigned char> header;
- auto msg2 = CNetMsgMaker{msg.m_recv.GetVersion()}.Make(msg.m_command, MakeUCharSpan(msg.m_recv));
+ auto msg2 = CNetMsgMaker{msg.m_recv.GetVersion()}.Make(msg.m_type, MakeUCharSpan(msg.m_recv));
serializer.prepareForTransport(msg2, header);
}
}
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index b6ecf1c492..03a84b697d 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -120,6 +120,7 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"getchaintips",
"getchaintxstats",
"getconnectioncount",
+ "getdeploymentinfo",
"getdescriptorinfo",
"getdifficulty",
"getindexinfo",
@@ -271,7 +272,7 @@ std::string ConsumeScalarRPCArgument(FuzzedDataProvider& fuzzed_data_provider)
}
CDataStream data_stream{SER_NETWORK, PROTOCOL_VERSION};
data_stream << *opt_psbt;
- r = EncodeBase64({data_stream.begin(), data_stream.end()});
+ r = EncodeBase64(data_stream);
},
[&] {
// base58 encoded key
diff --git a/src/test/fuzz/script.cpp b/src/test/fuzz/script.cpp
index eb170aab76..14a59912db 100644
--- a/src/test/fuzz/script.cpp
+++ b/src/test/fuzz/script.cpp
@@ -102,17 +102,6 @@ FUZZ_TARGET_INIT(script, initialize_script)
(void)script.IsPushOnly();
(void)script.GetSigOpCount(/* fAccurate= */ false);
- (void)FormatScript(script);
- (void)ScriptToAsmStr(script, false);
- (void)ScriptToAsmStr(script, true);
-
- UniValue o1(UniValue::VOBJ);
- ScriptPubKeyToUniv(script, o1, true);
- UniValue o2(UniValue::VOBJ);
- ScriptPubKeyToUniv(script, o2, false);
- UniValue o3(UniValue::VOBJ);
- ScriptToUniv(script, o3);
-
{
const std::vector<uint8_t> bytes = ConsumeRandomLengthByteVector(fuzzed_data_provider);
CompressedScript compressed_script;
@@ -178,4 +167,12 @@ FUZZ_TARGET_INIT(script, initialize_script)
Assert(dest == GetScriptForDestination(tx_destination_2));
}
}
+
+ (void)FormatScript(script);
+ (void)ScriptToAsmStr(script, /*fAttemptSighashDecode=*/fuzzed_data_provider.ConsumeBool());
+
+ UniValue o1(UniValue::VOBJ);
+ ScriptPubKeyToUniv(script, o1, /*include_hex=*/fuzzed_data_provider.ConsumeBool());
+ UniValue o3(UniValue::VOBJ);
+ ScriptToUniv(script, o3);
}
diff --git a/src/test/fuzz/signature_checker.cpp b/src/test/fuzz/signature_checker.cpp
index deffe26b17..f6c591aca4 100644
--- a/src/test/fuzz/signature_checker.cpp
+++ b/src/test/fuzz/signature_checker.cpp
@@ -34,7 +34,7 @@ public:
return m_fuzzed_data_provider.ConsumeBool();
}
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror = nullptr) const override
{
return m_fuzzed_data_provider.ConsumeBool();
}
diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp
index 47c2be3faa..2514636d6e 100644
--- a/src/test/fuzz/util.cpp
+++ b/src/test/fuzz/util.cpp
@@ -408,7 +408,7 @@ uint32_t ConsumeSequence(FuzzedDataProvider& fuzzed_data_provider) noexcept
return fuzzed_data_provider.ConsumeBool() ?
fuzzed_data_provider.PickValueInArray({
CTxIn::SEQUENCE_FINAL,
- CTxIn::SEQUENCE_FINAL - 1,
+ CTxIn::MAX_SEQUENCE_NONFINAL,
MAX_BIP125_RBF_SEQUENCE,
}) :
fuzzed_data_provider.ConsumeIntegral<uint32_t>();
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index fd7f40c01d..3bc62878bd 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -328,7 +328,6 @@ void WriteToStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) noe
CallOneOf(
fuzzed_data_provider,
WRITE_TO_STREAM_CASE(bool, fuzzed_data_provider.ConsumeBool()),
- WRITE_TO_STREAM_CASE(char, fuzzed_data_provider.ConsumeIntegral<char>()),
WRITE_TO_STREAM_CASE(int8_t, fuzzed_data_provider.ConsumeIntegral<int8_t>()),
WRITE_TO_STREAM_CASE(uint8_t, fuzzed_data_provider.ConsumeIntegral<uint8_t>()),
WRITE_TO_STREAM_CASE(int16_t, fuzzed_data_provider.ConsumeIntegral<int16_t>()),
@@ -338,7 +337,7 @@ void WriteToStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) noe
WRITE_TO_STREAM_CASE(int64_t, fuzzed_data_provider.ConsumeIntegral<int64_t>()),
WRITE_TO_STREAM_CASE(uint64_t, fuzzed_data_provider.ConsumeIntegral<uint64_t>()),
WRITE_TO_STREAM_CASE(std::string, fuzzed_data_provider.ConsumeRandomLengthString(32)),
- WRITE_TO_STREAM_CASE(std::vector<char>, ConsumeRandomLengthIntegralVector<char>(fuzzed_data_provider)));
+ WRITE_TO_STREAM_CASE(std::vector<uint8_t>, ConsumeRandomLengthIntegralVector<uint8_t>(fuzzed_data_provider)));
} catch (const std::ios_base::failure&) {
break;
}
@@ -358,7 +357,6 @@ void ReadFromStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) no
CallOneOf(
fuzzed_data_provider,
READ_FROM_STREAM_CASE(bool),
- READ_FROM_STREAM_CASE(char),
READ_FROM_STREAM_CASE(int8_t),
READ_FROM_STREAM_CASE(uint8_t),
READ_FROM_STREAM_CASE(int16_t),
@@ -368,7 +366,7 @@ void ReadFromStream(FuzzedDataProvider& fuzzed_data_provider, Stream& stream) no
READ_FROM_STREAM_CASE(int64_t),
READ_FROM_STREAM_CASE(uint64_t),
READ_FROM_STREAM_CASE(std::string),
- READ_FROM_STREAM_CASE(std::vector<char>));
+ READ_FROM_STREAM_CASE(std::vector<uint8_t>));
} catch (const std::ios_base::failure&) {
break;
}
diff --git a/src/test/fuzz/versionbits.cpp b/src/test/fuzz/versionbits.cpp
index cf95c0b9bf..95eb71099d 100644
--- a/src/test/fuzz/versionbits.cpp
+++ b/src/test/fuzz/versionbits.cpp
@@ -51,7 +51,7 @@ public:
ThresholdState GetStateFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateFor(pindexPrev, dummy_params, m_cache); }
int GetStateSinceHeightFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateSinceHeightFor(pindexPrev, dummy_params, m_cache); }
- BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindexPrev) const { return AbstractThresholdConditionChecker::GetStateStatisticsFor(pindexPrev, dummy_params); }
+ BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindex, std::vector<bool>* signals=nullptr) const { return AbstractThresholdConditionChecker::GetStateStatisticsFor(pindex, dummy_params, signals); }
bool Condition(int32_t version) const
{
@@ -220,7 +220,14 @@ FUZZ_TARGET_INIT(versionbits, initialize)
CBlockIndex* prev = blocks.tip();
const int exp_since = checker.GetStateSinceHeightFor(prev);
const ThresholdState exp_state = checker.GetStateFor(prev);
- BIP9Stats last_stats = checker.GetStateStatisticsFor(prev);
+
+ // get statistics from end of previous period, then reset
+ BIP9Stats last_stats;
+ last_stats.period = period;
+ last_stats.threshold = threshold;
+ last_stats.count = last_stats.elapsed = 0;
+ last_stats.possible = (period >= threshold);
+ std::vector<bool> last_signals{};
int prev_next_height = (prev == nullptr ? 0 : prev->nHeight + 1);
assert(exp_since <= prev_next_height);
@@ -241,17 +248,25 @@ FUZZ_TARGET_INIT(versionbits, initialize)
assert(state == exp_state);
assert(since == exp_since);
- // GetStateStatistics may crash when state is not STARTED
- if (state != ThresholdState::STARTED) continue;
-
// check that after mining this block stats change as expected
- const BIP9Stats stats = checker.GetStateStatisticsFor(current_block);
+ std::vector<bool> signals;
+ const BIP9Stats stats = checker.GetStateStatisticsFor(current_block, &signals);
+ const BIP9Stats stats_no_signals = checker.GetStateStatisticsFor(current_block);
+ assert(stats.period == stats_no_signals.period && stats.threshold == stats_no_signals.threshold
+ && stats.elapsed == stats_no_signals.elapsed && stats.count == stats_no_signals.count
+ && stats.possible == stats_no_signals.possible);
+
assert(stats.period == period);
assert(stats.threshold == threshold);
assert(stats.elapsed == b);
assert(stats.count == last_stats.count + (signal ? 1 : 0));
assert(stats.possible == (stats.count + period >= stats.elapsed + threshold));
last_stats = stats;
+
+ assert(signals.size() == last_signals.size() + 1);
+ assert(signals.back() == signal);
+ last_signals.push_back(signal);
+ assert(signals == last_signals);
}
if (exp_state == ThresholdState::STARTED) {
@@ -265,14 +280,12 @@ FUZZ_TARGET_INIT(versionbits, initialize)
CBlockIndex* current_block = blocks.mine_block(signal);
assert(checker.Condition(current_block) == signal);
- // GetStateStatistics is safe on a period boundary
- // and has progressed to a new period
const BIP9Stats stats = checker.GetStateStatisticsFor(current_block);
assert(stats.period == period);
assert(stats.threshold == threshold);
- assert(stats.elapsed == 0);
- assert(stats.count == 0);
- assert(stats.possible == true);
+ assert(stats.elapsed == period);
+ assert(stats.count == blocks_sig);
+ assert(stats.possible == (stats.count + period >= stats.elapsed + threshold));
// More interesting is whether the state changed.
const ThresholdState state = checker.GetStateFor(current_block);
diff --git a/src/test/interfaces_tests.cpp b/src/test/interfaces_tests.cpp
index f4bf6ff8c9..49b7d2003b 100644
--- a/src/test/interfaces_tests.cpp
+++ b/src/test/interfaces_tests.cpp
@@ -123,6 +123,7 @@ BOOST_AUTO_TEST_CASE(findCommonAncestor)
BOOST_AUTO_TEST_CASE(hasBlocks)
{
+ LOCK(::cs_main);
auto& chain = m_node.chain;
const CChain& active = Assert(m_node.chainman)->ActiveChain();
diff --git a/src/test/main.cpp b/src/test/main.cpp
index 5885564074..1ad8fcce3a 100644
--- a/src/test/main.cpp
+++ b/src/test/main.cpp
@@ -11,6 +11,7 @@
#include <test/util/setup_common.h>
+#include <functional>
#include <iostream>
/** Redirect debug log to unit_test.log files */
@@ -24,3 +25,17 @@ const std::function<void(const std::string&)> G_TEST_LOG_FUN = [](const std::str
if (!should_log) return;
std::cout << s;
};
+
+/**
+ * Retrieve the command line arguments from boost.
+ * Allows usage like:
+ * `test_bitcoin --run_test="net_tests/cnode_listen_port" -- -checkaddrman=1 -printtoconsole=1`
+ * which would return `["-checkaddrman=1", "-printtoconsole=1"]`.
+ */
+const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS = []() {
+ std::vector<const char*> args;
+ for (int i = 1; i < boost::unit_test::framework::master_test_suite().argc; ++i) {
+ args.push_back(boost::unit_test::framework::master_test_suite().argv[i]);
+ }
+ return args;
+};
diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp
index e07eb95856..c453dae701 100644
--- a/src/test/miner_tests.cpp
+++ b/src/test/miner_tests.cpp
@@ -459,7 +459,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity)
// absolute height locked
tx.vin[0].prevout.hash = txFirst[2]->GetHash();
- tx.vin[0].nSequence = CTxIn::SEQUENCE_FINAL - 1;
+ tx.vin[0].nSequence = CTxIn::MAX_SEQUENCE_NONFINAL;
prevheights[0] = baseheight + 3;
tx.nLockTime = m_node.chainman->ActiveChain().Tip()->nHeight + 1;
hash = tx.GetHash();
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index 86786af450..b0befe2f58 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -593,7 +593,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test)
// that a normal IPv4 address is among the entries, but if this address is
// !IsRoutable the undefined behavior is easier to trigger deterministically
{
- LOCK(cs_mapLocalHost);
+ LOCK(g_maplocalhost_mutex);
in_addr ipv4AddrLocal;
ipv4AddrLocal.s_addr = 0x0100007f;
CNetAddr addr = CNetAddr(ipv4AddrLocal);
diff --git a/src/test/pow_tests.cpp b/src/test/pow_tests.cpp
index d5a4d3fd80..2f43ae52f7 100644
--- a/src/test/pow_tests.cpp
+++ b/src/test/pow_tests.cpp
@@ -73,7 +73,7 @@ BOOST_AUTO_TEST_CASE(CheckProofOfWork_test_overflow_target)
{
const auto consensus = CreateChainParams(*m_node.args, CBaseChainParams::MAIN)->GetConsensus();
uint256 hash;
- unsigned int nBits = ~0x00800000;
+ unsigned int nBits{~0x00800000U};
hash.SetHex("0x1");
BOOST_CHECK(!CheckProofOfWork(hash, nBits, consensus));
}
diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp
index 12c5848eaf..89814748fe 100644
--- a/src/test/prevector_tests.cpp
+++ b/src/test/prevector_tests.cpp
@@ -220,7 +220,7 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt)
prevector_tester<8, int> test;
for (int i = 0; i < 2048; i++) {
if (InsecureRandBits(2) == 0) {
- test.insert(InsecureRandRange(test.size() + 1), InsecureRand32());
+ test.insert(InsecureRandRange(test.size() + 1), int(InsecureRand32()));
}
if (test.size() > 0 && InsecureRandBits(2) == 1) {
test.erase(InsecureRandRange(test.size()));
@@ -230,7 +230,7 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt)
test.resize(new_size);
}
if (InsecureRandBits(3) == 3) {
- test.insert(InsecureRandRange(test.size() + 1), 1 + InsecureRandBool(), InsecureRand32());
+ test.insert(InsecureRandRange(test.size() + 1), 1 + InsecureRandBool(), int(InsecureRand32()));
}
if (InsecureRandBits(3) == 4) {
int del = std::min<int>(test.size(), 1 + (InsecureRandBool()));
@@ -238,7 +238,7 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt)
test.erase(beg, beg + del);
}
if (InsecureRandBits(4) == 5) {
- test.push_back(InsecureRand32());
+ test.push_back(int(InsecureRand32()));
}
if (test.size() > 0 && InsecureRandBits(4) == 6) {
test.pop_back();
@@ -247,7 +247,7 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt)
int values[4];
int num = 1 + (InsecureRandBits(2));
for (int k = 0; k < num; k++) {
- values[k] = InsecureRand32();
+ values[k] = int(InsecureRand32());
}
test.insert_range(InsecureRandRange(test.size() + 1), values, values + num);
}
@@ -263,13 +263,13 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt)
test.shrink_to_fit();
}
if (test.size() > 0) {
- test.update(InsecureRandRange(test.size()), InsecureRand32());
+ test.update(InsecureRandRange(test.size()), int(InsecureRand32()));
}
if (InsecureRandBits(10) == 11) {
test.clear();
}
if (InsecureRandBits(9) == 12) {
- test.assign(InsecureRandBits(5), InsecureRand32());
+ test.assign(InsecureRandBits(5), int(InsecureRand32()));
}
if (InsecureRandBits(3) == 3) {
test.swap();
@@ -283,8 +283,8 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt)
if (InsecureRandBits(5) == 19) {
unsigned int num = 1 + (InsecureRandBits(4));
std::vector<int> values(num);
- for (auto &v : values) {
- v = InsecureRand32();
+ for (int& v : values) {
+ v = int(InsecureRand32());
}
test.resize_uninitialized(values);
}
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index eacd7ae894..4906bd2386 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -155,10 +155,10 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript
if (libconsensus_flags == flags) {
int expectedSuccessCode = expect ? 1 : 0;
if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), txCredit.vout[0].nValue, stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), txCredit.vout[0].nValue, UCharCast(stream.data()), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
} else {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), 0, stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), 0, UCharCast(stream.data()), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
}
}
#endif
@@ -923,7 +923,7 @@ BOOST_AUTO_TEST_CASE(script_build)
}
#ifdef UPDATE_JSON_TESTS
- FILE* file = fopen("script_tests.json.gen", "w");
+ FILE* file = fsbridge::fopen("script_tests.json.gen", "w");
fputs(strGen.c_str(), file);
fclose(file);
#endif
@@ -1520,7 +1520,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_returns_true)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 1);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_OK);
}
@@ -1543,7 +1543,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_index_err)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_INDEX);
}
@@ -1566,7 +1566,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_size)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size() * 2, nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size() * 2, nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_SIZE_MISMATCH);
}
@@ -1589,7 +1589,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_serialization)
stream << 0xffffffff;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_DESERIALIZE);
}
@@ -1612,7 +1612,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_amount_required_err)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_AMOUNT_REQUIRED);
}
@@ -1635,7 +1635,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_invalid_flags)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), UCharCast(stream.data()), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_INVALID_FLAGS);
}
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index e91c203c26..8b8133b689 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -61,7 +61,7 @@ public:
BOOST_AUTO_TEST_CASE(sizes)
{
- BOOST_CHECK_EQUAL(sizeof(char), GetSerializeSize(char(0), 0));
+ BOOST_CHECK_EQUAL(sizeof(unsigned char), GetSerializeSize((unsigned char)0, 0));
BOOST_CHECK_EQUAL(sizeof(int8_t), GetSerializeSize(int8_t(0), 0));
BOOST_CHECK_EQUAL(sizeof(uint8_t), GetSerializeSize(uint8_t(0), 0));
BOOST_CHECK_EQUAL(sizeof(int16_t), GetSerializeSize(int16_t(0), 0));
@@ -74,7 +74,7 @@ BOOST_AUTO_TEST_CASE(sizes)
BOOST_CHECK_EQUAL(sizeof(uint8_t), GetSerializeSize(bool(0), 0));
// Sanity-check GetSerializeSize and c++ type matching
- BOOST_CHECK_EQUAL(GetSerializeSize(char(0), 0), 1U);
+ BOOST_CHECK_EQUAL(GetSerializeSize((unsigned char)0, 0), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(int8_t(0), 0), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(uint8_t(0), 0), 1U);
BOOST_CHECK_EQUAL(GetSerializeSize(int16_t(0), 0), 2U);
@@ -186,76 +186,78 @@ BOOST_AUTO_TEST_CASE(noncanonical)
std::vector<char>::size_type n;
// zero encoded with three bytes:
- ss.write("\xfd\x00\x00", 3);
+ ss.write(MakeByteSpan("\xfd\x00\x00").first(3));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// 0xfc encoded with three bytes:
- ss.write("\xfd\xfc\x00", 3);
+ ss.write(MakeByteSpan("\xfd\xfc\x00").first(3));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// 0xfd encoded with three bytes is OK:
- ss.write("\xfd\xfd\x00", 3);
+ ss.write(MakeByteSpan("\xfd\xfd\x00").first(3));
n = ReadCompactSize(ss);
BOOST_CHECK(n == 0xfd);
// zero encoded with five bytes:
- ss.write("\xfe\x00\x00\x00\x00", 5);
+ ss.write(MakeByteSpan("\xfe\x00\x00\x00\x00").first(5));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// 0xffff encoded with five bytes:
- ss.write("\xfe\xff\xff\x00\x00", 5);
+ ss.write(MakeByteSpan("\xfe\xff\xff\x00\x00").first(5));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// zero encoded with nine bytes:
- ss.write("\xff\x00\x00\x00\x00\x00\x00\x00\x00", 9);
+ ss.write(MakeByteSpan("\xff\x00\x00\x00\x00\x00\x00\x00\x00").first(9));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
// 0x01ffffff encoded with nine bytes:
- ss.write("\xff\xff\xff\xff\x01\x00\x00\x00\x00", 9);
+ ss.write(MakeByteSpan("\xff\xff\xff\xff\x01\x00\x00\x00\x00").first(9));
BOOST_CHECK_EXCEPTION(ReadCompactSize(ss), std::ios_base::failure, isCanonicalException);
}
BOOST_AUTO_TEST_CASE(insert_delete)
{
+ constexpr auto B2I{[](std::byte b) { return std::to_integer<uint8_t>(b); }};
+
// Test inserting/deleting bytes.
CDataStream ss(SER_DISK, 0);
BOOST_CHECK_EQUAL(ss.size(), 0U);
- ss.write("\x00\x01\x02\xff", 4);
+ ss.write(MakeByteSpan("\x00\x01\x02\xff").first(4));
BOOST_CHECK_EQUAL(ss.size(), 4U);
- char c = (char)11;
+ uint8_t c{11};
// Inserting at beginning/end/middle:
- ss.insert(ss.begin(), c);
+ ss.insert(ss.begin(), std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 5U);
- BOOST_CHECK_EQUAL(ss[0], c);
- BOOST_CHECK_EQUAL(ss[1], 0);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), c);
+ BOOST_CHECK_EQUAL(B2I(ss[1]), 0);
- ss.insert(ss.end(), c);
+ ss.insert(ss.end(), std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 6U);
- BOOST_CHECK_EQUAL(ss[4], 0xff);
- BOOST_CHECK_EQUAL(ss[5], c);
+ BOOST_CHECK_EQUAL(B2I(ss[4]), 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[5]), c);
- ss.insert(ss.begin()+2, c);
+ ss.insert(ss.begin() + 2, std::byte{c});
BOOST_CHECK_EQUAL(ss.size(), 7U);
- BOOST_CHECK_EQUAL(ss[2], c);
+ BOOST_CHECK_EQUAL(B2I(ss[2]), c);
// Delete at beginning/end/middle
ss.erase(ss.begin());
BOOST_CHECK_EQUAL(ss.size(), 6U);
- BOOST_CHECK_EQUAL(ss[0], 0);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), 0);
ss.erase(ss.begin()+ss.size()-1);
BOOST_CHECK_EQUAL(ss.size(), 5U);
- BOOST_CHECK_EQUAL(ss[4], 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[4]), 0xff);
ss.erase(ss.begin()+1);
BOOST_CHECK_EQUAL(ss.size(), 4U);
- BOOST_CHECK_EQUAL(ss[0], 0);
- BOOST_CHECK_EQUAL(ss[1], 1);
- BOOST_CHECK_EQUAL(ss[2], 2);
- BOOST_CHECK_EQUAL(ss[3], 0xff);
+ BOOST_CHECK_EQUAL(B2I(ss[0]), 0);
+ BOOST_CHECK_EQUAL(B2I(ss[1]), 1);
+ BOOST_CHECK_EQUAL(B2I(ss[2]), 2);
+ BOOST_CHECK_EQUAL(B2I(ss[3]), 0xff);
}
BOOST_AUTO_TEST_CASE(class_methods)
diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp
index c16087bc1b..1601b02356 100644
--- a/src/test/sighash_tests.cpp
+++ b/src/test/sighash_tests.cpp
@@ -91,8 +91,9 @@ void static RandomScript(CScript &script) {
script << oplist[InsecureRandRange(std::size(oplist))];
}
-void static RandomTransaction(CMutableTransaction &tx, bool fSingle) {
- tx.nVersion = InsecureRand32();
+void static RandomTransaction(CMutableTransaction& tx, bool fSingle)
+{
+ tx.nVersion = int(InsecureRand32());
tx.vin.clear();
tx.vout.clear();
tx.nLockTime = (InsecureRandBool()) ? InsecureRand32() : 0;
@@ -126,7 +127,7 @@ BOOST_AUTO_TEST_CASE(sighash_test)
int nRandomTests = 50000;
#endif
for (int i=0; i<nRandomTests; i++) {
- int nHashType = InsecureRand32();
+ int nHashType{int(InsecureRand32())};
CMutableTransaction txTo;
RandomTransaction(txTo, (nHashType & 0x1f) == SIGHASH_SINGLE);
CScript scriptCode;
diff --git a/src/test/skiplist_tests.cpp b/src/test/skiplist_tests.cpp
index 7ede79279f..6dadf09176 100644
--- a/src/test/skiplist_tests.cpp
+++ b/src/test/skiplist_tests.cpp
@@ -114,8 +114,8 @@ BOOST_AUTO_TEST_CASE(findearliestatleast_test)
} else {
// randomly choose something in the range [MTP, MTP*2]
int64_t medianTimePast = vBlocksMain[i].GetMedianTimePast();
- int r = InsecureRandRange(medianTimePast);
- vBlocksMain[i].nTime = r + medianTimePast;
+ int r{int(InsecureRandRange(medianTimePast))};
+ vBlocksMain[i].nTime = uint32_t(r + medianTimePast);
vBlocksMain[i].nTimeMax = std::max(vBlocksMain[i].nTime, vBlocksMain[i-1].nTimeMax);
}
}
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index 3571927397..af0f86274e 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -160,22 +160,18 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
{
- std::vector<uint8_t> in;
+ std::vector<std::byte> in;
std::vector<char> expected_xor;
- std::vector<unsigned char> key;
CDataStream ds(in, 0, 0);
// Degenerate case
-
- key.push_back('\x00');
- key.push_back('\x00');
- ds.Xor(key);
+ ds.Xor({0x00, 0x00});
BOOST_CHECK_EQUAL(
std::string(expected_xor.begin(), expected_xor.end()),
ds.str());
- in.push_back('\x0f');
- in.push_back('\xf0');
+ in.push_back(std::byte{0x0f});
+ in.push_back(std::byte{0xf0});
expected_xor.push_back('\xf0');
expected_xor.push_back('\x0f');
@@ -183,10 +179,8 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
ds.clear();
ds.insert(ds.begin(), in.begin(), in.end());
- key.clear();
- key.push_back('\xff');
- ds.Xor(key);
+ ds.Xor({0xff});
BOOST_CHECK_EQUAL(
std::string(expected_xor.begin(), expected_xor.end()),
ds.str());
@@ -195,19 +189,15 @@ BOOST_AUTO_TEST_CASE(streams_serializedata_xor)
in.clear();
expected_xor.clear();
- in.push_back('\xf0');
- in.push_back('\x0f');
+ in.push_back(std::byte{0xf0});
+ in.push_back(std::byte{0x0f});
expected_xor.push_back('\x0f');
expected_xor.push_back('\x00');
ds.clear();
ds.insert(ds.begin(), in.begin(), in.end());
- key.clear();
- key.push_back('\xff');
- key.push_back('\x0f');
-
- ds.Xor(key);
+ ds.Xor({0xff, 0x0f});
BOOST_CHECK_EQUAL(
std::string(expected_xor.begin(), expected_xor.end()),
ds.str());
@@ -421,7 +411,7 @@ BOOST_AUTO_TEST_CASE(streams_buffered_file_rand)
size_t find = currentPos + InsecureRandRange(8);
if (find >= fileSize)
find = fileSize - 1;
- bf.FindByte(static_cast<char>(find));
+ bf.FindByte(uint8_t(find));
// The value at each offset is the offset.
BOOST_CHECK_EQUAL(bf.GetPos(), find);
currentPos = find;
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 1fe51fadd4..4fb7f9c405 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -220,7 +220,7 @@ BOOST_AUTO_TEST_CASE(tx_valid)
fValid = false;
break;
}
- COutPoint outpoint(uint256S(vinput[0].get_str()), vinput[1].get_int());
+ COutPoint outpoint{uint256S(vinput[0].get_str()), uint32_t(vinput[1].get_int())};
mapprevOutScriptPubKeys[outpoint] = ParseScript(vinput[2].get_str());
if (vinput.size() >= 4)
{
@@ -308,7 +308,7 @@ BOOST_AUTO_TEST_CASE(tx_invalid)
fValid = false;
break;
}
- COutPoint outpoint(uint256S(vinput[0].get_str()), vinput[1].get_int());
+ COutPoint outpoint{uint256S(vinput[0].get_str()), uint32_t(vinput[1].get_int())};
mapprevOutScriptPubKeys[outpoint] = ParseScript(vinput[2].get_str());
if (vinput.size() >= 4)
{
diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp
index 6f78b43826..560efb6b42 100644
--- a/src/test/txpackage_tests.cpp
+++ b/src/test/txpackage_tests.cpp
@@ -327,4 +327,236 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup)
BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(tx_child->GetHash())));
}
}
+
+// Tests for packages containing transactions that have same-txid-different-witness equivalents in
+// the mempool.
+BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup)
+{
+ // Mine blocks to mature coinbases.
+ mineBlocks(5);
+ LOCK(cs_main);
+
+ // Transactions with a same-txid-different-witness transaction in the mempool should be ignored,
+ // and the mempool entry's wtxid returned.
+ CScript witnessScript = CScript() << OP_DROP << OP_TRUE;
+ CScript scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash(witnessScript));
+ auto mtx_parent = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[0], /*vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey,
+ /*output_destination=*/ scriptPubKey,
+ /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ false);
+ CTransactionRef ptx_parent = MakeTransactionRef(mtx_parent);
+
+ // Make two children with the same txid but different witnesses.
+ CScriptWitness witness1;
+ witness1.stack.push_back(std::vector<unsigned char>(1));
+ witness1.stack.push_back(std::vector<unsigned char>(witnessScript.begin(), witnessScript.end()));
+
+ CScriptWitness witness2(witness1);
+ witness2.stack.push_back(std::vector<unsigned char>(2));
+ witness2.stack.push_back(std::vector<unsigned char>(witnessScript.begin(), witnessScript.end()));
+
+ CKey child_key;
+ child_key.MakeNewKey(true);
+ CScript child_locking_script = GetScriptForDestination(WitnessV0KeyHash(child_key.GetPubKey()));
+ CMutableTransaction mtx_child1;
+ mtx_child1.nVersion = 1;
+ mtx_child1.vin.resize(1);
+ mtx_child1.vin[0].prevout.hash = ptx_parent->GetHash();
+ mtx_child1.vin[0].prevout.n = 0;
+ mtx_child1.vin[0].scriptSig = CScript();
+ mtx_child1.vin[0].scriptWitness = witness1;
+ mtx_child1.vout.resize(1);
+ mtx_child1.vout[0].nValue = CAmount(48 * COIN);
+ mtx_child1.vout[0].scriptPubKey = child_locking_script;
+
+ CMutableTransaction mtx_child2{mtx_child1};
+ mtx_child2.vin[0].scriptWitness = witness2;
+
+ CTransactionRef ptx_child1 = MakeTransactionRef(mtx_child1);
+ CTransactionRef ptx_child2 = MakeTransactionRef(mtx_child2);
+
+ // child1 and child2 have the same txid
+ BOOST_CHECK_EQUAL(ptx_child1->GetHash(), ptx_child2->GetHash());
+ // child1 and child2 have different wtxids
+ BOOST_CHECK(ptx_child1->GetWitnessHash() != ptx_child2->GetWitnessHash());
+
+ // Try submitting Package1{parent, child1} and Package2{parent, child2} where the children are
+ // same-txid-different-witness.
+ {
+ const auto submit_witness1 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
+ {ptx_parent, ptx_child1}, /*test_accept=*/ false);
+ BOOST_CHECK_MESSAGE(submit_witness1.m_state.IsValid(),
+ "Package validation unexpectedly failed: " << submit_witness1.m_state.GetRejectReason());
+ auto it_parent1 = submit_witness1.m_tx_results.find(ptx_parent->GetWitnessHash());
+ auto it_child1 = submit_witness1.m_tx_results.find(ptx_child1->GetWitnessHash());
+ BOOST_CHECK(it_parent1 != submit_witness1.m_tx_results.end());
+ BOOST_CHECK_MESSAGE(it_parent1->second.m_state.IsValid(),
+ "Transaction unexpectedly failed: " << it_parent1->second.m_state.GetRejectReason());
+ BOOST_CHECK(it_child1 != submit_witness1.m_tx_results.end());
+ BOOST_CHECK_MESSAGE(it_child1->second.m_state.IsValid(),
+ "Transaction unexpectedly failed: " << it_child1->second.m_state.GetRejectReason());
+
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent->GetHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child1->GetHash())));
+
+ const auto submit_witness2 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
+ {ptx_parent, ptx_child2}, /*test_accept=*/ false);
+ BOOST_CHECK_MESSAGE(submit_witness2.m_state.IsValid(),
+ "Package validation unexpectedly failed: " << submit_witness2.m_state.GetRejectReason());
+ auto it_parent2_deduped = submit_witness2.m_tx_results.find(ptx_parent->GetWitnessHash());
+ auto it_child2 = submit_witness2.m_tx_results.find(ptx_child2->GetWitnessHash());
+ BOOST_CHECK(it_parent2_deduped != submit_witness2.m_tx_results.end());
+ BOOST_CHECK(it_parent2_deduped->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY);
+ BOOST_CHECK(it_child2 != submit_witness2.m_tx_results.end());
+ BOOST_CHECK(it_child2->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS);
+ BOOST_CHECK_EQUAL(ptx_child1->GetWitnessHash(), it_child2->second.m_other_wtxid.value());
+
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child2->GetHash())));
+ BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_child2->GetWitnessHash())));
+ }
+
+ // Try submitting Package1{child2, grandchild} where child2 is same-txid-different-witness as
+ // the in-mempool transaction, child1. Since child1 exists in the mempool and its outputs are
+ // available, child2 should be ignored and grandchild should be accepted.
+ //
+ // This tests a potential censorship vector in which an attacker broadcasts a competing package
+ // where a parent's witness is mutated. The honest package should be accepted despite the fact
+ // that we don't allow witness replacement.
+ CKey grandchild_key;
+ grandchild_key.MakeNewKey(true);
+ CScript grandchild_locking_script = GetScriptForDestination(WitnessV0KeyHash(grandchild_key.GetPubKey()));
+ auto mtx_grandchild = CreateValidMempoolTransaction(/*input_transaction=*/ ptx_child2, /* vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ child_key,
+ /*output_destination=*/ grandchild_locking_script,
+ /*output_amount=*/ CAmount(47 * COIN), /*submit=*/ false);
+ CTransactionRef ptx_grandchild = MakeTransactionRef(mtx_grandchild);
+
+ // We already submitted child1 above.
+ {
+ const auto submit_spend_ignored = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool,
+ {ptx_child2, ptx_grandchild}, /*test_accept=*/ false);
+ BOOST_CHECK_MESSAGE(submit_spend_ignored.m_state.IsValid(),
+ "Package validation unexpectedly failed: " << submit_spend_ignored.m_state.GetRejectReason());
+ auto it_child2_ignored = submit_spend_ignored.m_tx_results.find(ptx_child2->GetWitnessHash());
+ auto it_grandchild = submit_spend_ignored.m_tx_results.find(ptx_grandchild->GetWitnessHash());
+ BOOST_CHECK(it_child2_ignored != submit_spend_ignored.m_tx_results.end());
+ BOOST_CHECK(it_child2_ignored->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS);
+ BOOST_CHECK(it_grandchild != submit_spend_ignored.m_tx_results.end());
+ BOOST_CHECK(it_grandchild->second.m_result_type == MempoolAcceptResult::ResultType::VALID);
+
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child2->GetHash())));
+ BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_child2->GetWitnessHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Wtxid(ptx_grandchild->GetWitnessHash())));
+ }
+
+ // A package Package{parent1, parent2, parent3, child} where the parents are a mixture of
+ // identical-tx-in-mempool, same-txid-different-witness-in-mempool, and new transactions.
+ Package package_mixed;
+
+ // Give all the parents anyone-can-spend scripts so we don't have to deal with signing the child.
+ CScript acs_script = CScript() << OP_TRUE;
+ CScript acs_spk = GetScriptForDestination(WitnessV0ScriptHash(acs_script));
+ CScriptWitness acs_witness;
+ acs_witness.stack.push_back(std::vector<unsigned char>(acs_script.begin(), acs_script.end()));
+
+ // parent1 will already be in the mempool
+ auto mtx_parent1 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[1], /*vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey,
+ /*output_destination=*/ acs_spk,
+ /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ true);
+ CTransactionRef ptx_parent1 = MakeTransactionRef(mtx_parent1);
+ package_mixed.push_back(ptx_parent1);
+
+ // parent2 will have a same-txid-different-witness tx already in the mempool
+ CScript grandparent2_script = CScript() << OP_DROP << OP_TRUE;
+ CScript grandparent2_spk = GetScriptForDestination(WitnessV0ScriptHash(grandparent2_script));
+ CScriptWitness parent2_witness1;
+ parent2_witness1.stack.push_back(std::vector<unsigned char>(1));
+ parent2_witness1.stack.push_back(std::vector<unsigned char>(grandparent2_script.begin(), grandparent2_script.end()));
+ CScriptWitness parent2_witness2;
+ parent2_witness2.stack.push_back(std::vector<unsigned char>(2));
+ parent2_witness2.stack.push_back(std::vector<unsigned char>(grandparent2_script.begin(), grandparent2_script.end()));
+
+ // Create grandparent2 creating an output with multiple spending paths. Submit to mempool.
+ auto mtx_grandparent2 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[2], /* vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey,
+ /*output_destination=*/ grandparent2_spk,
+ /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ true);
+ CTransactionRef ptx_grandparent2 = MakeTransactionRef(mtx_grandparent2);
+
+ CMutableTransaction mtx_parent2_v1;
+ mtx_parent2_v1.nVersion = 1;
+ mtx_parent2_v1.vin.resize(1);
+ mtx_parent2_v1.vin[0].prevout.hash = ptx_grandparent2->GetHash();
+ mtx_parent2_v1.vin[0].prevout.n = 0;
+ mtx_parent2_v1.vin[0].scriptSig = CScript();
+ mtx_parent2_v1.vin[0].scriptWitness = parent2_witness1;
+ mtx_parent2_v1.vout.resize(1);
+ mtx_parent2_v1.vout[0].nValue = CAmount(48 * COIN);
+ mtx_parent2_v1.vout[0].scriptPubKey = acs_spk;
+
+ CMutableTransaction mtx_parent2_v2{mtx_parent2_v1};
+ mtx_parent2_v2.vin[0].scriptWitness = parent2_witness2;
+
+ CTransactionRef ptx_parent2_v1 = MakeTransactionRef(mtx_parent2_v1);
+ CTransactionRef ptx_parent2_v2 = MakeTransactionRef(mtx_parent2_v2);
+ // Put parent2_v1 in the package, submit parent2_v2 to the mempool.
+ const MempoolAcceptResult parent2_v2_result = m_node.chainman->ProcessTransaction(ptx_parent2_v2);
+ BOOST_CHECK(parent2_v2_result.m_result_type == MempoolAcceptResult::ResultType::VALID);
+ package_mixed.push_back(ptx_parent2_v1);
+
+ // parent3 will be a new transaction
+ auto mtx_parent3 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[3], /*vout=*/ 0,
+ /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey,
+ /*output_destination=*/ acs_spk,
+ /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ false);
+ CTransactionRef ptx_parent3 = MakeTransactionRef(mtx_parent3);
+ package_mixed.push_back(ptx_parent3);
+
+ // child spends parent1, parent2, and parent3
+ CKey mixed_grandchild_key;
+ mixed_grandchild_key.MakeNewKey(true);
+ CScript mixed_child_spk = GetScriptForDestination(WitnessV0KeyHash(mixed_grandchild_key.GetPubKey()));
+
+ CMutableTransaction mtx_mixed_child;
+ mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent1->GetHash(), 0)));
+ mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent2_v1->GetHash(), 0)));
+ mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent3->GetHash(), 0)));
+ mtx_mixed_child.vin[0].scriptWitness = acs_witness;
+ mtx_mixed_child.vin[1].scriptWitness = acs_witness;
+ mtx_mixed_child.vin[2].scriptWitness = acs_witness;
+ mtx_mixed_child.vout.push_back(CTxOut(145 * COIN, mixed_child_spk));
+ CTransactionRef ptx_mixed_child = MakeTransactionRef(mtx_mixed_child);
+ package_mixed.push_back(ptx_mixed_child);
+
+ // Submit package:
+ // parent1 should be ignored
+ // parent2_v1 should be ignored (and v2 wtxid returned)
+ // parent3 should be accepted
+ // child should be accepted
+ {
+ const auto mixed_result = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_mixed, false);
+ BOOST_CHECK_MESSAGE(mixed_result.m_state.IsValid(), mixed_result.m_state.GetRejectReason());
+ auto it_parent1 = mixed_result.m_tx_results.find(ptx_parent1->GetWitnessHash());
+ auto it_parent2 = mixed_result.m_tx_results.find(ptx_parent2_v1->GetWitnessHash());
+ auto it_parent3 = mixed_result.m_tx_results.find(ptx_parent3->GetWitnessHash());
+ auto it_child = mixed_result.m_tx_results.find(ptx_mixed_child->GetWitnessHash());
+ BOOST_CHECK(it_parent1 != mixed_result.m_tx_results.end());
+ BOOST_CHECK(it_parent2 != mixed_result.m_tx_results.end());
+ BOOST_CHECK(it_parent3 != mixed_result.m_tx_results.end());
+ BOOST_CHECK(it_child != mixed_result.m_tx_results.end());
+
+ BOOST_CHECK(it_parent1->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY);
+ BOOST_CHECK(it_parent2->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS);
+ BOOST_CHECK(it_parent3->second.m_result_type == MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK(it_child->second.m_result_type == MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK_EQUAL(ptx_parent2_v2->GetWitnessHash(), it_parent2->second.m_other_wtxid.value());
+
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent1->GetHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent2_v1->GetHash())));
+ BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_parent2_v1->GetWitnessHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent3->GetHash())));
+ BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_mixed_child->GetHash())));
+ }
+}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/blockfilter.cpp b/src/test/util/blockfilter.cpp
index 538981ce36..3ae22921b9 100644
--- a/src/test/util/blockfilter.cpp
+++ b/src/test/util/blockfilter.cpp
@@ -13,6 +13,8 @@ using node::UndoReadFromDisk;
bool ComputeFilter(BlockFilterType filter_type, const CBlockIndex* block_index, BlockFilter& filter)
{
+ LOCK(::cs_main);
+
CBlock block;
if (!ReadBlockFromDisk(block, block_index->GetBlockPos(), Params().GetConsensus())) {
return false;
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 87546f45f2..c968e4d124 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -42,6 +42,7 @@
#include <walletinitinterface.h>
#include <functional>
+#include <stdexcept>
using node::BlockAssembler;
using node::CalculateCacheSizes;
@@ -88,7 +89,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
m_args{}
{
m_node.args = &gArgs;
- const std::vector<const char*> arguments = Cat(
+ std::vector<const char*> arguments = Cat(
{
"dummy",
"-printtoconsole=0",
@@ -100,6 +101,9 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
"-debugexclude=leveldb",
},
extra_args);
+ if (G_TEST_COMMAND_LINE_ARGUMENTS) {
+ arguments = Cat(arguments, G_TEST_COMMAND_LINE_ARGUMENTS());
+ }
util::ThreadRename("test");
fs::create_directories(m_path_root);
m_args.ForceSetArg("-datadir", fs::PathToString(m_path_root));
@@ -108,9 +112,10 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
{
SetupServerArgs(*m_node.args);
std::string error;
- const bool success{m_node.args->ParseParameters(arguments.size(), arguments.data(), error)};
- assert(success);
- assert(error.empty());
+ if (!m_node.args->ParseParameters(arguments.size(), arguments.data(), error)) {
+ m_node.args->ClearArgs();
+ throw std::runtime_error{error};
+ }
}
SelectParams(chainName);
SeedInsecureRand();
@@ -218,7 +223,9 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
}
- m_node.addrman = std::make_unique<AddrMan>(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/0);
+ m_node.addrman = std::make_unique<AddrMan>(/*asmap=*/std::vector<bool>(),
+ /*deterministic=*/false,
+ m_node.args->GetIntArg("-checkaddrman", 0));
m_node.banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
m_node.connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman); // Deterministic randomness for tests.
m_node.peerman = PeerManager::make(chainparams, *m_node.connman, *m_node.addrman,
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index 58ffd77995..a1b7525cf4 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -19,12 +19,16 @@
#include <util/string.h>
#include <util/vector.h>
+#include <functional>
#include <type_traits>
#include <vector>
/** This is connected to the logger. Can be used to redirect logs to any other log */
extern const std::function<void(const std::string&)> G_TEST_LOG_FUN;
+/** Retrieve the command line arguments. */
+extern const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS;
+
// Enable BOOST_CHECK_EQUAL for enum class types
namespace std {
template <typename T>
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index f5742b65a1..26392e690d 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -235,7 +235,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, TestChain100Setup)
*chainman.SnapshotBlockhash());
// Ensure that the genesis block was not marked assumed-valid.
- BOOST_CHECK(!chainman.ActiveChain().Genesis()->IsAssumedValid());
+ BOOST_CHECK(WITH_LOCK(::cs_main, return !chainman.ActiveChain().Genesis()->IsAssumedValid()));
const AssumeutxoData& au_data = *ExpectedAssumeutxo(snapshot_height, ::Params());
const CBlockIndex* tip = chainman.ActiveTip();
@@ -356,6 +356,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_loadblockindex, TestChain100Setup)
// Mark some region of the chain assumed-valid.
for (int i = 0; i <= cs1.m_chain.Height(); ++i) {
+ LOCK(::cs_main);
auto index = cs1.m_chain[i];
if (i < last_assumed_valid_idx && i >= assumed_valid_start_idx) {
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 85eea888cc..5e4379a60a 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -296,8 +296,8 @@ bool CBlockTreeDB::ReadFlag(const std::string &name, bool &fValue) {
bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex)
{
+ AssertLockHeld(::cs_main);
std::unique_ptr<CDBIterator> pcursor(NewIterator());
-
pcursor->Seek(std::make_pair(DB_BLOCK_INDEX, uint256()));
// Load m_block_index
@@ -322,8 +322,9 @@ bool CBlockTreeDB::LoadBlockIndexGuts(const Consensus::Params& consensusParams,
pindexNew->nStatus = diskindex.nStatus;
pindexNew->nTx = diskindex.nTx;
- if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams))
+ if (!CheckProofOfWork(pindexNew->GetBlockHash(), pindexNew->nBits, consensusParams)) {
return error("%s: CheckProofOfWork failed: %s", __func__, pindexNew->ToString());
+ }
pcursor->Next();
} else {
diff --git a/src/txdb.h b/src/txdb.h
index d1f47719c4..e70f3cd1f2 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -86,7 +86,8 @@ public:
void ReadReindexing(bool &fReindexing);
bool WriteFlag(const std::string &name, bool fValue);
bool ReadFlag(const std::string &name, bool &fValue);
- bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
+ bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex)
+ EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
};
std::optional<bilingual_str> CheckLegacyTxindex(CBlockTreeDB& block_tree_db);
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index dc2769b81e..fb5652d0a0 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -116,10 +116,9 @@ size_t CTxMemPoolEntry::GetTxSize() const
return GetVirtualTransactionSize(nTxWeight, sigOpCost);
}
-// Update the given tx for any in-mempool descendants.
-// Assumes that CTxMemPool::m_children is correct for the given tx and all
-// descendants.
-void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set<uint256> &setExclude)
+void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
+ const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove,
+ uint64_t ancestor_size_limit, uint64_t ancestor_count_limit)
{
CTxMemPoolEntry::Children stageEntries, descendants;
stageEntries = updateIt->GetMemPoolChildrenConst();
@@ -156,17 +155,18 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendan
cachedDescendants[updateIt].insert(mapTx.iterator_to(descendant));
// Update ancestor state for each descendant
mapTx.modify(mapTx.iterator_to(descendant), update_ancestor_state(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCost()));
+ // Don't directly remove the transaction here -- doing so would
+ // invalidate iterators in cachedDescendants. Mark it for removal
+ // by inserting into descendants_to_remove.
+ if (descendant.GetCountWithAncestors() > ancestor_count_limit || descendant.GetSizeWithAncestors() > ancestor_size_limit) {
+ descendants_to_remove.insert(descendant.GetTx().GetHash());
+ }
}
}
mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount));
}
-// vHashesToUpdate is the set of transaction hashes from a disconnected block
-// which has been re-added to the mempool.
-// for each entry, look for descendants that are outside vHashesToUpdate, and
-// add fee/size information for such descendants to the parent.
-// for each such descendant, also update the ancestor state to include the parent.
-void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate)
+void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate, uint64_t ancestor_size_limit, uint64_t ancestor_count_limit)
{
AssertLockHeld(cs);
// For each entry in vHashesToUpdate, store the set of in-mempool, but not
@@ -178,6 +178,8 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
// accounted for in the state of their ancestors)
std::set<uint256> setAlreadyIncluded(vHashesToUpdate.begin(), vHashesToUpdate.end());
+ std::set<uint256> descendants_to_remove;
+
// Iterate in reverse, so that whenever we are looking at a transaction
// we are sure that all in-mempool descendants have already been processed.
// This maximizes the benefit of the descendant cache and guarantees that
@@ -207,7 +209,15 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes
}
}
} // release epoch guard for UpdateForDescendants
- UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded);
+ UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded, descendants_to_remove, ancestor_size_limit, ancestor_count_limit);
+ }
+
+ for (const auto& txid : descendants_to_remove) {
+ // This txid may have been removed already in a prior call to removeRecursive.
+ // Therefore we ensure it is not yet removed already.
+ if (const std::optional<txiter> txiter = GetIter(txid)) {
+ removeRecursive((*txiter)->GetTx(), MemPoolRemovalReason::SIZELIMIT);
+ }
}
}
diff --git a/src/txmempool.h b/src/txmempool.h
index b8c508fd90..e7e5a3c402 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -312,16 +312,6 @@ public:
}
};
-struct update_lock_points
-{
- explicit update_lock_points(const LockPoints& _lp) : lp(_lp) { }
-
- void operator() (CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); }
-
-private:
- const LockPoints& lp;
-};
-
// Multi_index tag names
struct descendant_score {};
struct entry_time {};
@@ -599,10 +589,14 @@ public:
void addUnchecked(const CTxMemPoolEntry& entry, setEntries& setAncestors, bool validFeeEstimate = true) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main);
void removeRecursive(const CTransaction& tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
- /** After reorg, check if mempool entries are now non-final, premature coinbase spends, or have
- * invalid lockpoints. Update lockpoints and remove entries (and descendants of entries) that
- * are no longer valid. */
- void removeForReorg(CChain& chain, std::function<bool(txiter)> check_final_and_mature) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main);
+ /** After reorg, filter the entries that would no longer be valid in the next block, and update
+ * the entries' cached LockPoints if needed. The mempool does not have any knowledge of
+ * consensus rules. It just appplies the callable function and removes the ones for which it
+ * returns true.
+ * @param[in] filter_final_and_mature Predicate that checks the relevant validation rules
+ * and updates an entry's LockPoints.
+ * */
+ void removeForReorg(CChain& chain, std::function<bool(txiter)> filter_final_and_mature) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main);
void removeConflicts(const CTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(cs);
void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight) EXCLUSIVE_LOCKS_REQUIRED(cs);
@@ -642,16 +636,25 @@ public:
*/
void RemoveStaged(setEntries& stage, bool updateDescendants, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs);
- /** When adding transactions from a disconnected block back to the mempool,
- * new mempool entries may have children in the mempool (which is generally
- * not the case when otherwise adding transactions).
- * UpdateTransactionsFromBlock() will find child transactions and update the
- * descendant state for each transaction in vHashesToUpdate (excluding any
- * child transactions present in vHashesToUpdate, which are already accounted
- * for). Note: vHashesToUpdate should be the set of transactions from the
- * disconnected block that have been accepted back into the mempool.
+ /** UpdateTransactionsFromBlock is called when adding transactions from a
+ * disconnected block back to the mempool, new mempool entries may have
+ * children in the mempool (which is generally not the case when otherwise
+ * adding transactions).
+ * @post updated descendant state for descendants of each transaction in
+ * vHashesToUpdate (excluding any child transactions present in
+ * vHashesToUpdate, which are already accounted for). Updated state
+ * includes add fee/size information for such descendants to the
+ * parent and updated ancestor state to include the parent.
+ *
+ * @param[in] vHashesToUpdate The set of txids from the
+ * disconnected block that have been accepted back into the mempool.
+ * @param[in] ancestor_size_limit The maximum allowed size in virtual
+ * bytes of an entry and its ancestors
+ * @param[in] ancestor_count_limit The maximum allowed number of
+ * transactions including the entry and its ancestors.
*/
- void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
+ void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate,
+ uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch);
/** Try to calculate all in-mempool ancestors of entry.
* (these are all calculated including the tx itself)
@@ -800,19 +803,38 @@ private:
/** UpdateForDescendants is used by UpdateTransactionsFromBlock to update
* the descendants for a single transaction that has been added to the
* mempool but may have child transactions in the mempool, eg during a
- * chain reorg. setExclude is the set of descendant transactions in the
- * mempool that must not be accounted for (because any descendants in
- * setExclude were added to the mempool after the transaction being
- * updated and hence their state is already reflected in the parent
- * state).
+ * chain reorg.
+ *
+ * @pre CTxMemPool::m_children is correct for the given tx and all
+ * descendants.
+ * @pre cachedDescendants is an accurate cache where each entry has all
+ * descendants of the corresponding key, including those that should
+ * be removed for violation of ancestor limits.
+ * @post if updateIt has any non-excluded descendants, cachedDescendants has
+ * a new cache line for updateIt.
+ * @post descendants_to_remove has a new entry for any descendant which exceeded
+ * ancestor limits relative to updateIt.
*
- * cachedDescendants will be updated with the descendants of the transaction
- * being updated, so that future invocations don't need to walk the
- * same transaction again, if encountered in another transaction chain.
+ * @param[in] updateIt the entry to update for its descendants
+ * @param[in,out] cachedDescendants a cache where each line corresponds to all
+ * descendants. It will be updated with the descendants of the transaction
+ * being updated, so that future invocations don't need to walk the same
+ * transaction again, if encountered in another transaction chain.
+ * @param[in] setExclude the set of descendant transactions in the mempool
+ * that must not be accounted for (because any descendants in setExclude
+ * were added to the mempool after the transaction being updated and hence
+ * their state is already reflected in the parent state).
+ * @param[out] descendants_to_remove Populated with the txids of entries that
+ * exceed ancestor limits. It's the responsibility of the caller to
+ * removeRecursive them.
+ * @param[in] ancestor_size_limit the max number of ancestral bytes allowed
+ * for any descendant
+ * @param[in] ancestor_count_limit the max number of ancestor transactions
+ * allowed for any descendant
*/
- void UpdateForDescendants(txiter updateIt,
- cacheMap &cachedDescendants,
- const std::set<uint256> &setExclude) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants,
+ const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove,
+ uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Update ancestors of hash to add/remove it as a descendant transaction. */
void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs);
/** Set ancestor state for an entry */
diff --git a/src/uint256.h b/src/uint256.h
index 72681d09c9..5c3a2f5409 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -6,6 +6,8 @@
#ifndef BITCOIN_UINT256_H
#define BITCOIN_UINT256_H
+#include <span.h>
+
#include <assert.h>
#include <cstring>
#include <stdint.h>
@@ -96,13 +98,13 @@ public:
template<typename Stream>
void Serialize(Stream& s) const
{
- s.write((char*)m_data, sizeof(m_data));
+ s.write(MakeByteSpan(m_data));
}
template<typename Stream>
void Unserialize(Stream& s)
{
- s.read((char*)m_data, sizeof(m_data));
+ s.read(MakeWritableByteSpan(m_data));
}
};
diff --git a/src/util/settings.cpp b/src/util/settings.cpp
index 442a55ffb0..683b7ae652 100644
--- a/src/util/settings.cpp
+++ b/src/util/settings.cpp
@@ -112,7 +112,7 @@ bool WriteSettings(const fs::path& path,
errors.emplace_back(strprintf("Error: Unable to open settings file %s for writing", fs::PathToString(path)));
return false;
}
- file << out.write(/* prettyIndent= */ 1, /* indentLevel= */ 4) << std::endl;
+ file << out.write(/* prettyIndent= */ 4, /* indentLevel= */ 1) << std::endl;
file.close();
return true;
}
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 8f35b7b6c6..19de08d1ea 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -6,11 +6,6 @@
#include <util/system.h>
#ifdef ENABLE_EXTERNAL_SIGNER
-#if defined(WIN32) && !defined(__kernel_entry)
-// A workaround for boost 1.71 incompatibility with mingw-w64 compiler.
-// For details see https://github.com/bitcoin/bitcoin/pull/22348.
-#define __kernel_entry
-#endif
#include <boost/process.hpp>
#endif // ENABLE_EXTERNAL_SIGNER
@@ -151,7 +146,7 @@ bool CheckDiskSpace(const fs::path& dir, uint64_t additional_bytes)
}
std::streampos GetFileSize(const char* path, std::streamsize max) {
- std::ifstream file(path, std::ios::binary);
+ fsbridge::ifstream file{path, std::ios::binary};
file.ignore(max);
return file.gcount();
}
diff --git a/src/validation.cpp b/src/validation.cpp
index d34ba1d635..c12dc9e8b6 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -347,43 +347,59 @@ void CChainState::MaybeUpdateMempoolForReorg(
// previously-confirmed transactions back to the mempool.
// UpdateTransactionsFromBlock finds descendants of any transactions in
// the disconnectpool that were added back and cleans up the mempool state.
- m_mempool->UpdateTransactionsFromBlock(vHashUpdate);
-
- const auto check_final_and_mature = [this, flags=STANDARD_LOCKTIME_VERIFY_FLAGS](CTxMemPool::txiter it)
+ const uint64_t ancestor_count_limit = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT);
+ const uint64_t ancestor_size_limit = gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000;
+ m_mempool->UpdateTransactionsFromBlock(vHashUpdate, ancestor_size_limit, ancestor_count_limit);
+
+ // Predicate to use for filtering transactions in removeForReorg.
+ // Checks whether the transaction is still final and, if it spends a coinbase output, mature.
+ // Also updates valid entries' cached LockPoints if needed.
+ // If false, the tx is still valid and its lockpoints are updated.
+ // If true, the tx would be invalid in the next block; remove this entry and all of its descendants.
+ const auto filter_final_and_mature = [this, flags=STANDARD_LOCKTIME_VERIFY_FLAGS](CTxMemPool::txiter it)
EXCLUSIVE_LOCKS_REQUIRED(m_mempool->cs, ::cs_main) {
- bool should_remove = false;
AssertLockHeld(m_mempool->cs);
AssertLockHeld(::cs_main);
const CTransaction& tx = it->GetTx();
+
+ // The transaction must be final.
+ if (!CheckFinalTx(m_chain.Tip(), tx, flags)) return true;
LockPoints lp = it->GetLockPoints();
const bool validLP{TestLockPointValidity(m_chain, lp)};
CCoinsViewMemPool view_mempool(&CoinsTip(), *m_mempool);
- if (!CheckFinalTx(m_chain.Tip(), tx, flags)
- || !CheckSequenceLocks(m_chain.Tip(), view_mempool, tx, flags, &lp, validLP)) {
- // Note if CheckSequenceLocks fails the LockPoints may still be invalid
- // So it's critical that we remove the tx and not depend on the LockPoints.
- should_remove = true;
- } else if (it->GetSpendsCoinbase()) {
+ // CheckSequenceLocks checks if the transaction will be final in the next block to be
+ // created on top of the new chain. We use useExistingLockPoints=false so that, instead of
+ // using the information in lp (which might now refer to a block that no longer exists in
+ // the chain), it will update lp to contain LockPoints relevant to the new chain.
+ if (!CheckSequenceLocks(m_chain.Tip(), view_mempool, tx, flags, &lp, validLP)) {
+ // If CheckSequenceLocks fails, remove the tx and don't depend on the LockPoints.
+ return true;
+ } else if (!validLP) {
+ // If CheckSequenceLocks succeeded, it also updated the LockPoints.
+ // Now update the mempool entry lockpoints as well.
+ m_mempool->mapTx.modify(it, [&lp](CTxMemPoolEntry& e) { e.UpdateLockPoints(lp); });
+ }
+
+ // If the transaction spends any coinbase outputs, it must be mature.
+ if (it->GetSpendsCoinbase()) {
for (const CTxIn& txin : tx.vin) {
auto it2 = m_mempool->mapTx.find(txin.prevout.hash);
if (it2 != m_mempool->mapTx.end())
continue;
- const Coin &coin = CoinsTip().AccessCoin(txin.prevout);
+ const Coin& coin{CoinsTip().AccessCoin(txin.prevout)};
assert(!coin.IsSpent());
const auto mempool_spend_height{m_chain.Tip()->nHeight + 1};
- if (coin.IsSpent() || (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY)) {
- should_remove = true;
- break;
+ if (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY) {
+ return true;
}
}
}
- // CheckSequenceLocks updates lp. Update the mempool entry LockPoints.
- if (!validLP) m_mempool->mapTx.modify(it, update_lock_points(lp));
- return should_remove;
+ // Transaction is still valid and cached LockPoints are updated.
+ return false;
};
// We also need to remove any now-immature transactions
- m_mempool->removeForReorg(m_chain, check_final_and_mature);
+ m_mempool->removeForReorg(m_chain, filter_final_and_mature);
// Re-limit mempool size, in case we added any transactions
LimitMempoolSize(
*m_mempool,
@@ -605,10 +621,10 @@ private:
// Submit all transactions to the mempool and call ConsensusScriptChecks to add to the script
// cache - should only be called after successful validation of all transactions in the package.
- // The package may end up partially-submitted after size limitting; returns true if all
+ // The package may end up partially-submitted after size limiting; returns true if all
// transactions are successfully added to the mempool, false otherwise.
- bool FinalizePackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state,
- std::map<const uint256, const MempoolAcceptResult>& results)
+ bool SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state,
+ std::map<const uint256, const MempoolAcceptResult>& results)
EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Compare a package's feerate against minimum allowed.
@@ -1047,12 +1063,17 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
return true;
}
-bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>& workspaces,
- PackageValidationState& package_state,
- std::map<const uint256, const MempoolAcceptResult>& results)
+bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces,
+ PackageValidationState& package_state,
+ std::map<const uint256, const MempoolAcceptResult>& results)
{
AssertLockHeld(cs_main);
AssertLockHeld(m_pool.cs);
+ // Sanity check: none of the transactions should be in the mempool, and none of the transactions
+ // should have a same-txid-different-witness equivalent in the mempool.
+ assert(std::all_of(workspaces.cbegin(), workspaces.cend(), [this](const auto& ws){
+ return !m_pool.exists(GenTxid::Txid(ws.m_ptx->GetHash())); }));
+
bool all_submitted = true;
// ConsensusScriptChecks adds to the script cache and is therefore consensus-critical;
// CheckInputsFromMempoolAndCache asserts that transactions only spend coins available from the
@@ -1062,18 +1083,24 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>
if (!ConsensusScriptChecks(args, ws)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since PolicyScriptChecks() passed, this should never fail.
- all_submitted = Assume(false);
+ all_submitted = false;
+ package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
+ strprintf("BUG! PolicyScriptChecks succeeded but ConsensusScriptChecks failed: %s",
+ ws.m_ptx->GetHash().ToString()));
}
// Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the
// last calculation done in PreChecks, since package ancestors have already been submitted.
- std::string err_string;
+ std::string unused_err_string;
if(!m_pool.CalculateMemPoolAncestors(*ws.m_entry, ws.m_ancestors, m_limit_ancestors,
m_limit_ancestor_size, m_limit_descendants,
- m_limit_descendant_size, err_string)) {
+ m_limit_descendant_size, unused_err_string)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail.
- all_submitted = Assume(false);
+ all_submitted = false;
+ package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
+ strprintf("BUG! Mempool ancestors or descendants were underestimated: %s",
+ ws.m_ptx->GetHash().ToString()));
}
// If we call LimitMempoolSize() for each individual Finalize(), the mempool will not take
// the transaction's descendant feerate into account because it hasn't seen them yet. Also,
@@ -1083,7 +1110,9 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>
if (!Finalize(args, ws)) {
results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state));
// Since LimitMempoolSize() won't be called, this should never fail.
- all_submitted = Assume(false);
+ all_submitted = false;
+ package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR,
+ strprintf("BUG! Adding to mempool failed: %s", ws.m_ptx->GetHash().ToString()));
}
}
@@ -1092,7 +1121,6 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>
LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(),
gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000,
std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)});
- if (!all_submitted) return false;
// Find the wtxids of the transactions that made it into the mempool. Allow partial submission,
// but don't report success unless they all made it into the mempool.
@@ -1197,8 +1225,8 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, std::move(results));
- if (!FinalizePackage(args, workspaces, package_state, results)) {
- package_state.Invalid(PackageValidationResult::PCKG_TX, "submission failed");
+ if (!SubmitPackage(args, workspaces, package_state, results)) {
+ // PackageValidationState filled in by SubmitPackage().
return PackageMempoolAcceptResult(package_state, std::move(results));
}
@@ -1223,11 +1251,13 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
return PackageMempoolAcceptResult(package_state, {});
}
- const auto& child = package[package.size() - 1];
+ // IsChildWithParents() guarantees the package is > 1 transactions.
+ assert(package.size() > 1);
// The package must be 1 child with all of its unconfirmed parents. The package is expected to
// be sorted, so the last transaction is the child.
+ const auto& child = package.back();
std::unordered_set<uint256, SaltedTxidHasher> unconfirmed_parent_txids;
- std::transform(package.cbegin(), package.end() - 1,
+ std::transform(package.cbegin(), package.cend() - 1,
std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()),
[](const auto& tx) { return tx->GetHash(); });
@@ -1259,10 +1289,14 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
LOCK(m_pool.cs);
std::map<const uint256, const MempoolAcceptResult> results;
- // As node operators are free to set their mempool policies however they please, it's possible
- // for package transaction(s) to already be in the mempool, and we don't want to reject the
- // entire package in that case (as that could be a censorship vector). Filter the transactions
- // that are already in mempool and add their information to results, since we already have them.
+ // Node operators are free to set their mempool policies however they please, nodes may receive
+ // transactions in different orders, and malicious counterparties may try to take advantage of
+ // policy differences to pin or delay propagation of transactions. As such, it's possible for
+ // some package transaction(s) to already be in the mempool, and we don't want to reject the
+ // entire package in that case (as that could be a censorship vector). De-duplicate the
+ // transactions that are already in the mempool, and only call AcceptMultipleTransactions() with
+ // the new transactions. This ensures we don't double-count transaction counts and sizes when
+ // checking ancestor/descendant limits, or double-count transaction fees for fee-related policy.
std::vector<CTransactionRef> txns_new;
for (const auto& tx : package) {
const auto& wtxid = tx->GetWitnessHash();
@@ -1283,9 +1317,10 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
// transaction for the mempool one. Note that we are ignoring the validity of the
// package transaction passed in.
// TODO: allow witness replacement in packages.
- auto iter = m_pool.GetIter(wtxid);
+ auto iter = m_pool.GetIter(txid);
assert(iter != std::nullopt);
- results.emplace(txid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(), iter.value()->GetFee()));
+ // Provide the wtxid of the mempool tx so that the caller can look it up in the mempool.
+ results.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(iter.value()->GetTx().GetWitnessHash()));
} else {
// Transaction does not already exist in the mempool.
txns_new.push_back(tx);
@@ -1352,12 +1387,12 @@ PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTx
}();
// Uncache coins pertaining to transactions that were not submitted to the mempool.
- // Ensure the coins cache is still within limits.
if (test_accept || result.m_state.IsInvalid()) {
for (const COutPoint& hashTx : coins_to_uncache) {
active_chainstate.CoinsTip().Uncache(hashTx);
}
}
+ // Ensure the coins cache is still within limits.
BlockValidationState state_dummy;
active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC);
return result;
@@ -2812,6 +2847,8 @@ static void LimitValidationInterfaceQueue() LOCKS_EXCLUDED(cs_main) {
bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr<const CBlock> pblock)
{
+ AssertLockNotHeld(m_chainstate_mutex);
+
// Note that while we're often called here from ProcessNewBlock, this is
// far from a guarantee. Things in the P2P/RPC will often end up calling
// us in the middle of ProcessNewBlock - do not assume pblock is set
@@ -2821,8 +2858,8 @@ bool CChainState::ActivateBestChain(BlockValidationState& state, std::shared_ptr
// ABC maintains a fair degree of expensive-to-calculate internal state
// because this function periodically releases cs_main so that it does not lock up other threads for too long
// during large connects - and to allow for e.g. the callback queue to drain
- // we use m_cs_chainstate to enforce mutual exclusion so that only one caller may execute this function at a time
- LOCK(m_cs_chainstate);
+ // we use m_chainstate_mutex to enforce mutual exclusion so that only one caller may execute this function at a time
+ LOCK(m_chainstate_mutex);
CBlockIndex *pindexMostWork = nullptr;
CBlockIndex *pindexNewTip = nullptr;
@@ -2941,6 +2978,8 @@ bool CChainState::PreciousBlock(BlockValidationState& state, CBlockIndex* pindex
bool CChainState::InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex)
{
+ AssertLockNotHeld(m_chainstate_mutex);
+
// Genesis block can't be invalidated
assert(pindex);
if (pindex->nHeight == 0) return false;
@@ -2952,7 +2991,7 @@ bool CChainState::InvalidateBlock(BlockValidationState& state, CBlockIndex* pind
// We do not allow ActivateBestChain() to run while InvalidateBlock() is
// running, as that could cause the tip to change while we disconnect
// blocks.
- LOCK(m_cs_chainstate);
+ LOCK(m_chainstate_mutex);
// We'll be acquiring and releasing cs_main below, to allow the validation
// callbacks to run. However, we should keep the block index in a
@@ -4069,7 +4108,7 @@ void CChainState::LoadExternalBlockFile(FILE* fileIn, FlatFilePos* dbp)
try {
// locate a header
unsigned char buf[CMessageHeader::MESSAGE_START_SIZE];
- blkdat.FindByte(char(m_params.MessageStart()[0]));
+ blkdat.FindByte(m_params.MessageStart()[0]);
nRewind = blkdat.GetPos() + 1;
blkdat >> buf;
if (memcmp(buf, m_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE)) {
diff --git a/src/validation.h b/src/validation.h
index edbd68f783..fb258005f1 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -163,8 +163,12 @@ struct MempoolAcceptResult {
VALID, //!> Fully validated, valid.
INVALID, //!> Invalid.
MEMPOOL_ENTRY, //!> Valid, transaction was already in the mempool.
+ DIFFERENT_WITNESS, //!> Not validated. A same-txid-different-witness tx (see m_other_wtxid) already exists in the mempool and was not replaced.
};
+ /** Result type. Present in all MempoolAcceptResults. */
const ResultType m_result_type;
+
+ /** Contains information about why the transaction failed. */
const TxValidationState m_state;
// The following fields are only present when m_result_type = ResultType::VALID or MEMPOOL_ENTRY
@@ -175,6 +179,10 @@ struct MempoolAcceptResult {
/** Raw base fees in satoshis. */
const std::optional<CAmount> m_base_fees;
+ // The following field is only present when m_result_type = ResultType::DIFFERENT_WITNESS
+ /** The wtxid of the transaction in the mempool which has the same txid but different witness. */
+ const std::optional<uint256> m_other_wtxid;
+
static MempoolAcceptResult Failure(TxValidationState state) {
return MempoolAcceptResult(state);
}
@@ -187,6 +195,10 @@ struct MempoolAcceptResult {
return MempoolAcceptResult(vsize, fees);
}
+ static MempoolAcceptResult MempoolTxDifferentWitness(const uint256& other_wtxid) {
+ return MempoolAcceptResult(other_wtxid);
+ }
+
// Private constructors. Use static methods MempoolAcceptResult::Success, etc. to construct.
private:
/** Constructor for failure case */
@@ -203,6 +215,10 @@ private:
/** Constructor for already-in-mempool case. It wouldn't replace any transactions. */
explicit MempoolAcceptResult(int64_t vsize, CAmount fees)
: m_result_type(ResultType::MEMPOOL_ENTRY), m_vsize{vsize}, m_base_fees(fees) {}
+
+ /** Constructor for witness-swapped case. */
+ explicit MempoolAcceptResult(const uint256& other_wtxid)
+ : m_result_type(ResultType::DIFFERENT_WITNESS), m_other_wtxid(other_wtxid) {}
};
/**
@@ -212,7 +228,7 @@ struct PackageMempoolAcceptResult
{
const PackageValidationState m_state;
/**
- * Map from (w)txid to finished MempoolAcceptResults. The client is responsible
+ * Map from wtxid to finished MempoolAcceptResults. The client is responsible
* for keeping track of the transaction objects themselves. If a result is not
* present, it means validation was unfinished for that transaction. If there
* was a package-wide error (see result in m_state), m_tx_results will be empty.
@@ -455,10 +471,11 @@ protected:
arith_uint256 nLastPreciousChainwork = 0;
/**
- * the ChainState CriticalSection
- * A lock that must be held when modifying this ChainState - held in ActivateBestChain()
+ * The ChainState Mutex
+ * A lock that must be held when modifying this ChainState - held in ActivateBestChain() and
+ * InvalidateBlock()
*/
- RecursiveMutex m_cs_chainstate;
+ Mutex m_chainstate_mutex;
/**
* Whether this chainstate is undergoing initial block download.
@@ -622,7 +639,7 @@ public:
*/
bool ActivateBestChain(
BlockValidationState& state,
- std::shared_ptr<const CBlock> pblock = nullptr) LOCKS_EXCLUDED(cs_main);
+ std::shared_ptr<const CBlock> pblock = nullptr) LOCKS_EXCLUDED(m_chainstate_mutex, cs_main);
bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -642,7 +659,7 @@ public:
*/
bool PreciousBlock(BlockValidationState& state, CBlockIndex* pindex) LOCKS_EXCLUDED(cs_main);
/** Mark a block as invalid. */
- bool InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex) LOCKS_EXCLUDED(cs_main);
+ bool InvalidateBlock(BlockValidationState& state, CBlockIndex* pindex) LOCKS_EXCLUDED(m_chainstate_mutex, cs_main);
/** Remove invalidity status from a block and its descendants. */
void ResetBlockFailureFlags(CBlockIndex* pindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
diff --git a/src/versionbits.cpp b/src/versionbits.cpp
index a9264e6116..36815fba17 100644
--- a/src/versionbits.cpp
+++ b/src/versionbits.cpp
@@ -98,29 +98,38 @@ ThresholdState AbstractThresholdConditionChecker::GetStateFor(const CBlockIndex*
return state;
}
-BIP9Stats AbstractThresholdConditionChecker::GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params) const
+BIP9Stats AbstractThresholdConditionChecker::GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params, std::vector<bool>* signalling_blocks) const
{
BIP9Stats stats = {};
stats.period = Period(params);
stats.threshold = Threshold(params);
- if (pindex == nullptr)
- return stats;
+ if (pindex == nullptr) return stats;
// Find beginning of period
- const CBlockIndex* pindexEndOfPrevPeriod = pindex->GetAncestor(pindex->nHeight - ((pindex->nHeight + 1) % stats.period));
- stats.elapsed = pindex->nHeight - pindexEndOfPrevPeriod->nHeight;
+ int blocks_in_period = 1 + (pindex->nHeight % stats.period);
+
+ // Reset signalling_blocks
+ if (signalling_blocks) {
+ signalling_blocks->assign(blocks_in_period, false);
+ }
// Count from current block to beginning of period
+ int elapsed = 0;
int count = 0;
const CBlockIndex* currentIndex = pindex;
- while (pindexEndOfPrevPeriod->nHeight != currentIndex->nHeight){
- if (Condition(currentIndex, params))
- count++;
+ do {
+ ++elapsed;
+ --blocks_in_period;
+ if (Condition(currentIndex, params)) {
+ ++count;
+ if (signalling_blocks) signalling_blocks->at(blocks_in_period) = true;
+ }
currentIndex = currentIndex->pprev;
- }
+ } while(blocks_in_period > 0);
+ stats.elapsed = elapsed;
stats.count = count;
stats.possible = (stats.period - stats.threshold ) >= (stats.elapsed - count);
@@ -196,9 +205,9 @@ ThresholdState VersionBitsCache::State(const CBlockIndex* pindexPrev, const Cons
return VersionBitsConditionChecker(pos).GetStateFor(pindexPrev, params, m_caches[pos]);
}
-BIP9Stats VersionBitsCache::Statistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos)
+BIP9Stats VersionBitsCache::Statistics(const CBlockIndex* pindex, const Consensus::Params& params, Consensus::DeploymentPos pos, std::vector<bool>* signalling_blocks)
{
- return VersionBitsConditionChecker(pos).GetStateStatisticsFor(pindexPrev, params);
+ return VersionBitsConditionChecker(pos).GetStateStatisticsFor(pindex, params, signalling_blocks);
}
int VersionBitsCache::StateSinceHeight(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos)
diff --git a/src/versionbits.h b/src/versionbits.h
index 25ddd6fa5d..1b3fa11e61 100644
--- a/src/versionbits.h
+++ b/src/versionbits.h
@@ -64,8 +64,10 @@ protected:
virtual int Threshold(const Consensus::Params& params) const =0;
public:
- /** Returns the numerical statistics of an in-progress BIP9 softfork in the current period */
- BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params) const;
+ /** Returns the numerical statistics of an in-progress BIP9 softfork in the period including pindex
+ * If provided, signalling_blocks is set to true/false based on whether each block in the period signalled
+ */
+ BIP9Stats GetStateStatisticsFor(const CBlockIndex* pindex, const Consensus::Params& params, std::vector<bool>* signalling_blocks = nullptr) const;
/** Returns the state for pindex A based on parent pindexPrev B. Applies any state transition if conditions are present.
* Caches state from first block of period. */
ThresholdState GetStateFor(const CBlockIndex* pindexPrev, const Consensus::Params& params, ThresholdConditionCache& cache) const;
@@ -82,8 +84,10 @@ private:
ThresholdConditionCache m_caches[Consensus::MAX_VERSION_BITS_DEPLOYMENTS] GUARDED_BY(m_mutex);
public:
- /** Get the numerical statistics for a given deployment for the signalling period that includes the block after pindexPrev. */
- static BIP9Stats Statistics(const CBlockIndex* pindexPrev, const Consensus::Params& params, Consensus::DeploymentPos pos);
+ /** Get the numerical statistics for a given deployment for the signalling period that includes pindex.
+ * If provided, signalling_blocks is set to true/false based on whether each block in the period signalled
+ */
+ static BIP9Stats Statistics(const CBlockIndex* pindex, const Consensus::Params& params, Consensus::DeploymentPos pos, std::vector<bool>* signalling_blocks = nullptr);
static uint32_t Mask(const Consensus::Params& params, Consensus::DeploymentPos pos);
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index e0be914a2b..cea120a81e 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -681,10 +681,10 @@ bool BerkeleyBatch::ReadAtCursor(CDataStream& ssKey, CDataStream& ssValue, bool&
// Convert to streams
ssKey.SetType(SER_DISK);
ssKey.clear();
- ssKey.write((char*)datKey.get_data(), datKey.get_size());
+ ssKey.write({BytePtr(datKey.get_data()), datKey.get_size()});
ssValue.SetType(SER_DISK);
ssValue.clear();
- ssValue.write((char*)datValue.get_data(), datValue.get_size());
+ ssValue.write({BytePtr(datValue.get_data()), datValue.get_size()});
return true;
}
@@ -756,7 +756,7 @@ bool BerkeleyBatch::ReadKey(CDataStream&& key, CDataStream& value)
SafeDbt datValue;
int ret = pdb->get(activeTxn, datKey, datValue, 0);
if (ret == 0 && datValue.get_data() != nullptr) {
- value.write((char*)datValue.get_data(), datValue.get_size());
+ value.write({BytePtr(datValue.get_data()), datValue.get_size()});
return true;
}
return false;
diff --git a/src/wallet/coincontrol.h b/src/wallet/coincontrol.h
index 5ef2295c88..65a5c83366 100644
--- a/src/wallet/coincontrol.h
+++ b/src/wallet/coincontrol.h
@@ -115,9 +115,28 @@ public:
vOutpoints.assign(setSelected.begin(), setSelected.end());
}
+ void SetInputWeight(const COutPoint& outpoint, int64_t weight)
+ {
+ m_input_weights[outpoint] = weight;
+ }
+
+ bool HasInputWeight(const COutPoint& outpoint) const
+ {
+ return m_input_weights.count(outpoint) > 0;
+ }
+
+ int64_t GetInputWeight(const COutPoint& outpoint) const
+ {
+ auto it = m_input_weights.find(outpoint);
+ assert(it != m_input_weights.end());
+ return it->second;
+ }
+
private:
std::set<COutPoint> setSelected;
std::map<COutPoint, CTxOut> m_external_txouts;
+ //! Map of COutPoints to the maximum weight for that input
+ std::map<COutPoint, int64_t> m_input_weights;
};
} // namespace wallet
diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp
index 7dfb1d8839..3e34a2f776 100644
--- a/src/wallet/dump.cpp
+++ b/src/wallet/dump.cpp
@@ -47,12 +47,12 @@ bool DumpWallet(CWallet& wallet, bilingual_str& error)
// Write out a magic string with version
std::string line = strprintf("%s,%u\n", DUMP_MAGIC, DUMP_VERSION);
dump_file.write(line.data(), line.size());
- hasher.write(line.data(), line.size());
+ hasher.write(MakeByteSpan(line));
// Write out the file format
line = strprintf("%s,%s\n", "format", db.Format());
dump_file.write(line.data(), line.size());
- hasher.write(line.data(), line.size());
+ hasher.write(MakeByteSpan(line));
if (ret) {
@@ -73,7 +73,7 @@ bool DumpWallet(CWallet& wallet, bilingual_str& error)
std::string value_str = HexStr(ss_value);
line = strprintf("%s,%s\n", key_str, value_str);
dump_file.write(line.data(), line.size());
- hasher.write(line.data(), line.size());
+ hasher.write(MakeByteSpan(line));
}
}
@@ -150,7 +150,7 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
return false;
}
std::string magic_hasher_line = strprintf("%s,%s\n", magic_key, version_value);
- hasher.write(magic_hasher_line.data(), magic_hasher_line.size());
+ hasher.write(MakeByteSpan(magic_hasher_line));
// Get the stored file format
std::string format_key;
@@ -181,7 +181,7 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
warnings.push_back(strprintf(_("Warning: Dumpfile wallet format \"%s\" does not match command line specified format \"%s\"."), format_value, file_format));
}
std::string format_hasher_line = strprintf("%s,%s\n", format_key, format_value);
- hasher.write(format_hasher_line.data(), format_hasher_line.size());
+ hasher.write(MakeByteSpan(format_hasher_line));
DatabaseOptions options;
DatabaseStatus status;
@@ -225,7 +225,7 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
}
std::string line = strprintf("%s,%s\n", key, value);
- hasher.write(line.data(), line.size());
+ hasher.write(MakeByteSpan(line));
if (key.empty() || value.empty()) {
continue;
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index b1466869b9..9083c304b2 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -90,7 +90,6 @@ WalletTxStatus MakeWalletTxStatus(const CWallet& wallet, const CWalletTx& wtx)
result.depth_in_main_chain = wallet.GetTxDepthInMainChain(wtx);
result.time_received = wtx.nTimeReceived;
result.lock_time = wtx.tx->nLockTime;
- result.is_final = wallet.chain().checkFinalTx(*wtx.tx);
result.is_trusted = CachedTxIsTrusted(wallet, wtx);
result.is_abandoned = wtx.isAbandoned();
result.is_coinbase = wtx.IsCoinBase();
diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp
index 2d47673705..e6f96074d5 100644
--- a/src/wallet/load.cpp
+++ b/src/wallet/load.cpp
@@ -29,7 +29,7 @@ bool VerifyWallets(WalletContext& context)
fs::path wallet_dir = fs::PathFromString(args.GetArg("-walletdir", ""));
boost::system::error_code error;
// The canonical path cleans the path, preventing >1 Berkeley environment instances for the same directory
- fs::path canonical_wallet_dir = fs::canonical(wallet_dir, error);
+ fs::path canonical_wallet_dir = fs::canonical(wallet_dir, error).remove_trailing_separator();
if (error || !fs::exists(wallet_dir)) {
chain.initError(strprintf(_("Specified -walletdir \"%s\" does not exist"), fs::PathToString(wallet_dir)));
return false;
diff --git a/src/wallet/receive.cpp b/src/wallet/receive.cpp
index e598d6f979..1a6f06213c 100644
--- a/src/wallet/receive.cpp
+++ b/src/wallet/receive.cpp
@@ -279,8 +279,6 @@ bool CachedTxIsFromMe(const CWallet& wallet, const CWalletTx& wtx, const isminef
bool CachedTxIsTrusted(const CWallet& wallet, const CWalletTx& wtx, std::set<uint256>& trusted_parents)
{
AssertLockHeld(wallet.cs_wallet);
- // Quick answer in most cases
- if (!wallet.chain().checkFinalTx(*wtx.tx)) return false;
int nDepth = wallet.GetTxDepthInMainChain(wtx);
if (nDepth >= 1) return true;
if (nDepth < 0) return false;
diff --git a/src/wallet/rpc/coins.cpp b/src/wallet/rpc/coins.cpp
index f10de11662..035541babd 100644
--- a/src/wallet/rpc/coins.cpp
+++ b/src/wallet/rpc/coins.cpp
@@ -60,8 +60,8 @@ static CAmount GetReceived(const CWallet& wallet, const UniValue& params, bool b
if (depth < min_depth
// Coinbase with less than 1 confirmation is no longer in the main chain
|| (wtx.IsCoinBase() && (depth < 1 || !include_coinbase))
- || (wallet.IsTxImmatureCoinBase(wtx) && !include_immature_coinbase)
- || !wallet.chain().checkFinalTx(*wtx.tx)) {
+ || (wallet.IsTxImmatureCoinBase(wtx) && !include_immature_coinbase))
+ {
continue;
}
diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp
index cae3542a5e..433b5a1815 100644
--- a/src/wallet/rpc/spend.cpp
+++ b/src/wallet/rpc/spend.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <consensus/validation.h>
#include <core_io.h>
#include <key_io.h>
#include <policy/policy.h>
@@ -429,6 +430,7 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
{"replaceable", UniValueType(UniValue::VBOOL)},
{"conf_target", UniValueType(UniValue::VNUM)},
{"estimate_mode", UniValueType(UniValue::VSTR)},
+ {"input_weights", UniValueType(UniValue::VARR)},
},
true, true);
@@ -548,6 +550,37 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
}
}
+ if (options.exists("input_weights")) {
+ for (const UniValue& input : options["input_weights"].get_array().getValues()) {
+ uint256 txid = ParseHashO(input, "txid");
+
+ const UniValue& vout_v = find_value(input, "vout");
+ if (!vout_v.isNum()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, missing vout key");
+ }
+ int vout = vout_v.get_int();
+ if (vout < 0) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, vout cannot be negative");
+ }
+
+ const UniValue& weight_v = find_value(input, "weight");
+ if (!weight_v.isNum()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, missing weight key");
+ }
+ int64_t weight = weight_v.get_int64();
+ const int64_t min_input_weight = GetTransactionInputWeight(CTxIn());
+ CHECK_NONFATAL(min_input_weight == 165);
+ if (weight < min_input_weight) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, weight cannot be less than 165 (41 bytes (size of outpoint + sequence + empty scriptSig) * 4 (witness scaling factor)) + 1 (empty witness)");
+ }
+ if (weight > MAX_STANDARD_TX_WEIGHT) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Invalid parameter, weight cannot be greater than the maximum standard tx weight of %d", MAX_STANDARD_TX_WEIGHT));
+ }
+
+ coinControl.SetInputWeight(COutPoint(txid, vout), weight);
+ }
+ }
+
if (tx.vout.size() == 0)
throw JSONRPCError(RPC_INVALID_PARAMETER, "TX must have at least one output");
@@ -585,6 +618,23 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
}
}
+static void SetOptionsInputWeights(const UniValue& inputs, UniValue& options)
+{
+ if (options.exists("input_weights")) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Input weights should be specified in inputs rather than in options.");
+ }
+ if (inputs.size() == 0) {
+ return;
+ }
+ UniValue weights(UniValue::VARR);
+ for (const UniValue& input : inputs.getValues()) {
+ if (input.exists("weight")) {
+ weights.push_back(input);
+ }
+ }
+ options.pushKV("input_weights", weights);
+}
+
RPCHelpMan fundrawtransaction()
{
return RPCHelpMan{"fundrawtransaction",
@@ -626,6 +676,17 @@ RPCHelpMan fundrawtransaction()
{"vout_index", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "The zero-based output index, before a change output is added."},
},
},
+ {"input_weights", RPCArg::Type::ARR, RPCArg::Optional::OMITTED_NAMED_ARG, "Inputs and their corresponding weights",
+ {
+ {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"},
+ {"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output index"},
+ {"weight", RPCArg::Type::NUM, RPCArg::Optional::NO, "The maximum weight for this input, "
+ "including the weight of the outpoint and sequence number. "
+ "Note that serialized signature sizes are not guaranteed to be consistent, "
+ "so the maximum DER signatures size of 73 bytes should be used when considering ECDSA signatures."
+ "Remember to convert serialized sizes to weight units when necessary."},
+ },
+ },
},
FundTxDoc()),
"options"},
@@ -1007,6 +1068,11 @@ RPCHelpMan send()
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"},
{"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output number"},
{"sequence", RPCArg::Type::NUM, RPCArg::Optional::NO, "The sequence number"},
+ {"weight", RPCArg::Type::NUM, RPCArg::DefaultHint{"Calculated from wallet and solving data"}, "The maximum weight for this input, "
+ "including the weight of the outpoint and sequence number. "
+ "Note that signature sizes are not guaranteed to be consistent, "
+ "so the maximum DER signatures size of 73 bytes should be used when considering ECDSA signatures."
+ "Remember to convert serialized sizes to weight units when necessary."},
},
},
{"locktime", RPCArg::Type::NUM, RPCArg::Default{0}, "Raw locktime. Non-0 value also locktime-activates inputs"},
@@ -1110,6 +1176,7 @@ RPCHelpMan send()
// Automatically select coins, unless at least one is manually selected. Can
// be overridden by options.add_inputs.
coin_control.m_add_inputs = rawTx.vin.size() == 0;
+ SetOptionsInputWeights(options["inputs"], options);
FundTransaction(*pwallet, rawTx, fee, change_position, options, coin_control, /* override_min_fee */ false);
bool add_to_wallet = true;
@@ -1250,6 +1317,11 @@ RPCHelpMan walletcreatefundedpsbt()
{"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id"},
{"vout", RPCArg::Type::NUM, RPCArg::Optional::NO, "The output number"},
{"sequence", RPCArg::Type::NUM, RPCArg::DefaultHint{"depends on the value of the 'locktime' and 'options.replaceable' arguments"}, "The sequence number"},
+ {"weight", RPCArg::Type::NUM, RPCArg::DefaultHint{"Calculated from wallet and solving data"}, "The maximum weight for this input, "
+ "including the weight of the outpoint and sequence number. "
+ "Note that signature sizes are not guaranteed to be consistent, "
+ "so the maximum DER signatures size of 73 bytes should be used when considering ECDSA signatures."
+ "Remember to convert serialized sizes to weight units when necessary."},
},
},
},
@@ -1330,10 +1402,12 @@ RPCHelpMan walletcreatefundedpsbt()
}, true
);
+ UniValue options = request.params[3];
+
CAmount fee;
int change_position;
bool rbf{wallet.m_signal_rbf};
- const UniValue &replaceable_arg = request.params[3]["replaceable"];
+ const UniValue &replaceable_arg = options["replaceable"];
if (!replaceable_arg.isNull()) {
RPCTypeCheckArgument(replaceable_arg, UniValue::VBOOL);
rbf = replaceable_arg.isTrue();
@@ -1343,7 +1417,8 @@ RPCHelpMan walletcreatefundedpsbt()
// Automatically select coins, unless at least one is manually selected. Can
// be overridden by options.add_inputs.
coin_control.m_add_inputs = rawTx.vin.size() == 0;
- FundTransaction(wallet, rawTx, fee, change_position, request.params[3], coin_control, /* override_min_fee */ true);
+ SetOptionsInputWeights(request.params[0], options);
+ FundTransaction(wallet, rawTx, fee, change_position, options, coin_control, /* override_min_fee */ true);
// Make a blank psbt
PartiallySignedTransaction psbtx(rawTx);
diff --git a/src/wallet/rpc/transactions.cpp b/src/wallet/rpc/transactions.cpp
index d9034808f4..eef2c13ee1 100644
--- a/src/wallet/rpc/transactions.cpp
+++ b/src/wallet/rpc/transactions.cpp
@@ -114,8 +114,8 @@ static UniValue ListReceived(const CWallet& wallet, const UniValue& params, cons
// Coinbase with less than 1 confirmation is no longer in the main chain
if ((wtx.IsCoinBase() && (nDepth < 1 || !include_coinbase))
- || (wallet.IsTxImmatureCoinBase(wtx) && !include_immature_coinbase)
- || !wallet.chain().checkFinalTx(*wtx.tx)) {
+ || (wallet.IsTxImmatureCoinBase(wtx) && !include_immature_coinbase))
+ {
continue;
}
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index d87bdc8679..3d8ae2da69 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -105,10 +105,6 @@ void AvailableCoins(const CWallet& wallet, std::vector<COutput>& vCoins, const C
const uint256& wtxid = entry.first;
const CWalletTx& wtx = entry.second;
- if (!wallet.chain().checkFinalTx(*wtx.tx)) {
- continue;
- }
-
if (wallet.IsTxImmatureCoinBase(wtx))
continue;
@@ -455,15 +451,17 @@ std::optional<SelectionResult> SelectCoins(const CWallet& wallet, const std::vec
}
input_bytes = GetTxSpendSize(wallet, wtx, outpoint.n, false);
txout = wtx.tx->vout.at(outpoint.n);
- }
- if (input_bytes == -1) {
- // The input is external. We either did not find the tx in mapWallet, or we did but couldn't compute the input size with wallet data
+ } else {
+ // The input is external. We did not find the tx in mapWallet.
if (!coin_control.GetExternalOutput(outpoint, txout)) {
- // Not ours, and we don't have solving data.
return std::nullopt;
}
input_bytes = CalculateMaximumSignedInputSize(txout, &coin_control.m_external_provider, /* use_max_sig */ true);
}
+ // If available, override calculated size with coin control specified size
+ if (coin_control.HasInputWeight(outpoint)) {
+ input_bytes = GetVirtualTransactionSize(coin_control.GetInputWeight(outpoint), 0, 0);
+ }
CInputCoin coin(outpoint, txout, input_bytes);
if (coin.m_input_bytes == -1) {
@@ -798,7 +796,7 @@ static bool CreateTransactionInternal(
// to avoid conflicting with other possible uses of nSequence,
// and in the spirit of "smallest possible change from prior
// behavior."
- const uint32_t nSequence = coin_control.m_signal_bip125_rbf.value_or(wallet.m_signal_rbf) ? MAX_BIP125_RBF_SEQUENCE : (CTxIn::SEQUENCE_FINAL - 1);
+ const uint32_t nSequence{coin_control.m_signal_bip125_rbf.value_or(wallet.m_signal_rbf) ? MAX_BIP125_RBF_SEQUENCE : CTxIn::MAX_SEQUENCE_NONFINAL};
for (const auto& coin : selected_coins) {
txNew.vin.push_back(CTxIn(coin.outpoint, CScript(), nSequence));
}
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index 07e387f177..2b2181e70b 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -395,9 +395,9 @@ bool SQLiteBatch::ReadKey(CDataStream&& key, CDataStream& value)
return false;
}
// Leftmost column in result is index 0
- const char* data = reinterpret_cast<const char*>(sqlite3_column_blob(m_read_stmt, 0));
- int data_size = sqlite3_column_bytes(m_read_stmt, 0);
- value.write(data, data_size);
+ const std::byte* data{BytePtr(sqlite3_column_blob(m_read_stmt, 0))};
+ size_t data_size(sqlite3_column_bytes(m_read_stmt, 0));
+ value.write({data, data_size});
sqlite3_clear_bindings(m_read_stmt);
sqlite3_reset(m_read_stmt);
@@ -512,12 +512,12 @@ bool SQLiteBatch::ReadAtCursor(CDataStream& key, CDataStream& value, bool& compl
}
// Leftmost column in result is index 0
- const char* key_data = reinterpret_cast<const char*>(sqlite3_column_blob(m_cursor_stmt, 0));
- int key_data_size = sqlite3_column_bytes(m_cursor_stmt, 0);
- key.write(key_data, key_data_size);
- const char* value_data = reinterpret_cast<const char*>(sqlite3_column_blob(m_cursor_stmt, 1));
- int value_data_size = sqlite3_column_bytes(m_cursor_stmt, 1);
- value.write(value_data, value_data_size);
+ const std::byte* key_data{BytePtr(sqlite3_column_blob(m_cursor_stmt, 0))};
+ size_t key_data_size(sqlite3_column_bytes(m_cursor_stmt, 0));
+ key.write({key_data, key_data_size});
+ const std::byte* value_data{BytePtr(sqlite3_column_blob(m_cursor_stmt, 1))};
+ size_t value_data_size(sqlite3_column_bytes(m_cursor_stmt, 1));
+ value.write({value_data, value_data_size});
return true;
}
diff --git a/src/wallet/test/spend_tests.cpp b/src/wallet/test/spend_tests.cpp
index b2a0697c21..334bd5b8bc 100644
--- a/src/wallet/test/spend_tests.cpp
+++ b/src/wallet/test/spend_tests.cpp
@@ -63,5 +63,56 @@ BOOST_FIXTURE_TEST_CASE(SubtractFee, TestChain100Setup)
BOOST_CHECK_EQUAL(fee, check_tx(fee + 123));
}
+static void TestFillInputToWeight(int64_t additional_weight, std::vector<int64_t> expected_stack_sizes)
+{
+ static const int64_t EMPTY_INPUT_WEIGHT = GetTransactionInputWeight(CTxIn());
+
+ CTxIn input;
+ int64_t target_weight = EMPTY_INPUT_WEIGHT + additional_weight;
+ BOOST_CHECK(FillInputToWeight(input, target_weight));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), target_weight);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), expected_stack_sizes.size());
+ for (unsigned int i = 0; i < expected_stack_sizes.size(); ++i) {
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack[i].size(), expected_stack_sizes[i]);
+ }
+}
+
+BOOST_FIXTURE_TEST_CASE(FillInputToWeightTest, BasicTestingSetup)
+{
+ {
+ // Less than or equal minimum of 165 should not add any witness data
+ CTxIn input;
+ BOOST_CHECK(!FillInputToWeight(input, -1));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), 165);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), 0);
+ BOOST_CHECK(!FillInputToWeight(input, 0));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), 165);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), 0);
+ BOOST_CHECK(!FillInputToWeight(input, 164));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), 165);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), 0);
+ BOOST_CHECK(FillInputToWeight(input, 165));
+ BOOST_CHECK_EQUAL(GetTransactionInputWeight(input), 165);
+ BOOST_CHECK_EQUAL(input.scriptWitness.stack.size(), 0);
+ }
+
+ // Make sure we can add at least one weight
+ TestFillInputToWeight(1, {0});
+
+ // 1 byte compact size uint boundary
+ TestFillInputToWeight(252, {251});
+ TestFillInputToWeight(253, {83, 168});
+ TestFillInputToWeight(262, {86, 174});
+ TestFillInputToWeight(263, {260});
+
+ // 3 byte compact size uint boundary
+ TestFillInputToWeight(65535, {65532});
+ TestFillInputToWeight(65536, {21842, 43688});
+ TestFillInputToWeight(65545, {21845, 43694});
+ TestFillInputToWeight(65546, {65541});
+
+ // Note: We don't test the next boundary because of memory allocation constraints.
+}
+
BOOST_AUTO_TEST_SUITE_END()
} // namespace wallet
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 55428f43a7..8ef0d46c4f 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -140,11 +140,13 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
}
// Prune the older block file.
+ int file_number;
{
LOCK(cs_main);
- Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(oldTip->GetBlockPos().nFile);
+ file_number = oldTip->GetBlockPos().nFile;
+ Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(file_number);
}
- UnlinkPrunedFiles({oldTip->GetBlockPos().nFile});
+ UnlinkPrunedFiles({file_number});
// Verify ScanForWalletTransactions only picks transactions in the new block
// file.
@@ -169,9 +171,10 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Prune the remaining block file.
{
LOCK(cs_main);
- Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(newTip->GetBlockPos().nFile);
+ file_number = newTip->GetBlockPos().nFile;
+ Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(file_number);
}
- UnlinkPrunedFiles({newTip->GetBlockPos().nFile});
+ UnlinkPrunedFiles({file_number});
// Verify ScanForWalletTransactions scans no blocks.
{
@@ -202,11 +205,13 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
CBlockIndex* newTip = m_node.chainman->ActiveChain().Tip();
// Prune the older block file.
+ int file_number;
{
LOCK(cs_main);
- Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(oldTip->GetBlockPos().nFile);
+ file_number = oldTip->GetBlockPos().nFile;
+ Assert(m_node.chainman)->m_blockman.PruneOneBlockFile(file_number);
}
- UnlinkPrunedFiles({oldTip->GetBlockPos().nFile});
+ UnlinkPrunedFiles({file_number});
// Verify importmulti RPC returns failure for a key whose creation time is
// before the missing block, and success for a key whose creation time is
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index b5f64a748e..84962af906 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1505,6 +1505,49 @@ bool DummySignInput(const SigningProvider& provider, CTxIn &tx_in, const CTxOut
return true;
}
+bool FillInputToWeight(CTxIn& txin, int64_t target_weight)
+{
+ assert(txin.scriptSig.empty());
+ assert(txin.scriptWitness.IsNull());
+
+ int64_t txin_weight = GetTransactionInputWeight(txin);
+
+ // Do nothing if the weight that should be added is less than the weight that already exists
+ if (target_weight < txin_weight) {
+ return false;
+ }
+ if (target_weight == txin_weight) {
+ return true;
+ }
+
+ // Subtract current txin weight, which should include empty witness stack
+ int64_t add_weight = target_weight - txin_weight;
+ assert(add_weight > 0);
+
+ // We will want to subtract the size of the Compact Size UInt that will also be serialized.
+ // However doing so when the size is near a boundary can result in a problem where it is not
+ // possible to have a stack element size and combination to exactly equal a target.
+ // To avoid this possibility, if the weight to add is less than 10 bytes greater than
+ // a boundary, the size will be split so that 2/3rds will be in one stack element, and
+ // the remaining 1/3rd in another. Using 3rds allows us to avoid additional boundaries.
+ // 10 bytes is used because that accounts for the maximum size. This does not need to be super precise.
+ if ((add_weight >= 253 && add_weight < 263)
+ || (add_weight > std::numeric_limits<uint16_t>::max() && add_weight <= std::numeric_limits<uint16_t>::max() + 10)
+ || (add_weight > std::numeric_limits<uint32_t>::max() && add_weight <= std::numeric_limits<uint32_t>::max() + 10)) {
+ int64_t first_weight = add_weight / 3;
+ add_weight -= first_weight;
+
+ first_weight -= GetSizeOfCompactSize(first_weight);
+ txin.scriptWitness.stack.emplace(txin.scriptWitness.stack.end(), first_weight, 0);
+ }
+
+ add_weight -= GetSizeOfCompactSize(add_weight);
+ txin.scriptWitness.stack.emplace(txin.scriptWitness.stack.end(), add_weight, 0);
+ assert(GetTransactionInputWeight(txin) == target_weight);
+
+ return true;
+}
+
// Helper for producing a bunch of max-sized low-S low-R signatures (eg 71 bytes)
bool CWallet::DummySignTx(CMutableTransaction &txNew, const std::vector<CTxOut> &txouts, const CCoinControl* coin_control) const
{
@@ -1513,6 +1556,14 @@ bool CWallet::DummySignTx(CMutableTransaction &txNew, const std::vector<CTxOut>
for (const auto& txout : txouts)
{
CTxIn& txin = txNew.vin[nIn];
+ // If weight was provided, fill the input to that weight
+ if (coin_control && coin_control->HasInputWeight(txin.prevout)) {
+ if (!FillInputToWeight(txin, coin_control->GetInputWeight(txin.prevout))) {
+ return false;
+ }
+ nIn++;
+ continue;
+ }
// Use max sig if watch only inputs were used or if this particular input is an external input
// to ensure a sufficient fee is attained for the requested feerate.
const bool use_max_sig = coin_control && (coin_control->fAllowWatchOnly || coin_control->IsExternalSelected(txin.prevout));
diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h
index 00a1865a0e..e2c5c69c91 100644
--- a/src/wallet/wallet.h
+++ b/src/wallet/wallet.h
@@ -939,6 +939,8 @@ bool AddWalletSetting(interfaces::Chain& chain, const std::string& wallet_name);
bool RemoveWalletSetting(interfaces::Chain& chain, const std::string& wallet_name);
bool DummySignInput(const SigningProvider& provider, CTxIn &tx_in, const CTxOut &txout, bool use_max_sig);
+
+bool FillInputToWeight(CTxIn& txin, int64_t target_weight);
} // namespace wallet
#endif // BITCOIN_WALLET_WALLET_H
diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp
index 543db10612..2c6f24a239 100644
--- a/src/zmq/zmqpublishnotifier.cpp
+++ b/src/zmq/zmqpublishnotifier.cpp
@@ -209,9 +209,10 @@ bool CZMQPublishHashBlockNotifier::NotifyBlock(const CBlockIndex *pindex)
{
uint256 hash = pindex->GetBlockHash();
LogPrint(BCLog::ZMQ, "zmq: Publish hashblock %s to %s\n", hash.GetHex(), this->address);
- char data[32];
- for (unsigned int i = 0; i < 32; i++)
+ uint8_t data[32];
+ for (unsigned int i = 0; i < 32; i++) {
data[31 - i] = hash.begin()[i];
+ }
return SendZmqMessage(MSG_HASHBLOCK, data, 32);
}
@@ -219,9 +220,10 @@ bool CZMQPublishHashTransactionNotifier::NotifyTransaction(const CTransaction &t
{
uint256 hash = transaction.GetHash();
LogPrint(BCLog::ZMQ, "zmq: Publish hashtx %s to %s\n", hash.GetHex(), this->address);
- char data[32];
- for (unsigned int i = 0; i < 32; i++)
+ uint8_t data[32];
+ for (unsigned int i = 0; i < 32; i++) {
data[31 - i] = hash.begin()[i];
+ }
return SendZmqMessage(MSG_HASHTX, data, 32);
}
diff --git a/test/functional/data/rpc_decodescript.json b/test/functional/data/rpc_decodescript.json
index d1aa9ab00d..8903f5efac 100644
--- a/test/functional/data/rpc_decodescript.json
+++ b/test/functional/data/rpc_decodescript.json
@@ -4,6 +4,7 @@
{
"asm": "1 eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"address": "bcrt1pamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhqz6nvlh",
+ "desc": "addr(bcrt1pamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhqz6nvlh)#v52jnujz",
"type": "witness_v1_taproot"
}
],
@@ -12,6 +13,7 @@
{
"asm": "1 -28398",
"address": "bcrt1pamhqk96edn",
+ "desc": "addr(bcrt1pamhqk96edn)#vkh8uj5a",
"type": "witness_unknown"
}
],
@@ -20,6 +22,7 @@
{
"asm": "0 eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee",
"address": "bcrt1qamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhqgdn98t",
+ "desc": "addr(bcrt1qamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhwamhqgdn98t)#afaecevx",
"type": "witness_v0_scripthash",
"p2sh": "2MwGk8mw1GBP6U9D5X8gTvgvXpuknmAK3fo"
}
@@ -29,6 +32,7 @@
{
"asm": "OP_HASH160 eeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeeee OP_EQUAL",
"address": "2NF2b3KS8xXb9XHvbRMXdZh8s5g92rUZHtp",
+ "desc": "addr(2NF2b3KS8xXb9XHvbRMXdZh8s5g92rUZHtp)#ywfcpmh9",
"type": "scripthash"
}
],
@@ -36,6 +40,7 @@
"6a00",
{
"asm": "OP_RETURN 0",
+ "desc": "raw(6a00)#ncfmkl43",
"type": "nulldata"
}
],
@@ -43,6 +48,7 @@
"6aee",
{
"asm": "OP_RETURN OP_UNKNOWN",
+ "desc": "raw(6aee)#vsyzgqdt",
"type": "nonstandard"
}
],
@@ -50,6 +56,7 @@
"6a02ee",
{
"asm": "OP_RETURN [error]",
+ "desc": "raw(6a02ee)#gvdwnlzl",
"type": "nonstandard"
}
],
@@ -57,10 +64,12 @@
"02eeee",
{
"asm": "-28398",
+ "desc": "raw(02eeee)#5xzck7pr",
"type": "nonstandard",
"p2sh": "2N34iiGoUUkVSPiaaTFpJjB1FR9TXQu3PGM",
"segwit": {
"asm": "0 96c2368fc30514a438a8bd909f93c49a1549d77198ccbdb792043b666cb24f42",
+ "desc": "addr(bcrt1qjmprdr7rq522gw9ghkgfly7yng25n4m3nrxtmdujqsakvm9jfapqk795l5)#5akkdska",
"hex": "002096c2368fc30514a438a8bd909f93c49a1549d77198ccbdb792043b666cb24f42",
"address": "bcrt1qjmprdr7rq522gw9ghkgfly7yng25n4m3nrxtmdujqsakvm9jfapqk795l5",
"type": "witness_v0_scripthash",
@@ -72,6 +81,7 @@
"ba",
{
"asm": "OP_CHECKSIGADD",
+ "desc": "raw(ba)#yy0eg44l",
"type": "nonstandard"
}
],
@@ -79,6 +89,7 @@
"50",
{
"asm": "OP_RESERVED",
+ "desc": "raw(50)#a7tu03xf",
"type": "nonstandard"
}
]
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index 7fd0d0140b..9d32749a08 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -92,7 +92,7 @@ class BIP65Test(BitcoinTestFramework):
self.rpc_timeout = 480
def test_cltv_info(self, *, is_active):
- assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip65'], {
+ assert_equal(self.nodes[0].getdeploymentinfo()['deployments']['bip65'], {
"active": is_active,
"height": CLTV_HEIGHT,
"type": "buried",
diff --git a/test/functional/feature_dersig.py b/test/functional/feature_dersig.py
index f35ce7e0c9..9a46839969 100755
--- a/test/functional/feature_dersig.py
+++ b/test/functional/feature_dersig.py
@@ -60,7 +60,7 @@ class BIP66Test(BitcoinTestFramework):
return self.miniwallet.create_self_transfer(utxo_to_spend=utxo_to_spend)['tx']
def test_dersig_info(self, *, is_active):
- assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip66'],
+ assert_equal(self.nodes[0].getdeploymentinfo()['deployments']['bip66'],
{
"active": is_active,
"height": DERSIG_HEIGHT,
diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py
index 4b56b0c26b..dbd71a8b2d 100755
--- a/test/functional/feature_init.py
+++ b/test/functional/feature_init.py
@@ -3,8 +3,6 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Stress tests related to node initialization."""
-import random
-import time
import os
from pathlib import Path
@@ -26,7 +24,6 @@ class InitStressTest(BitcoinTestFramework):
def run_test(self):
"""
- test terminating initialization after seeing a certain log line.
- - test terminating init after seeing a random number of log lines.
- test removing certain essential files to test startup error paths.
"""
# TODO: skip Windows for now since it isn't clear how to SIGTERM.
@@ -76,46 +73,14 @@ class InitStressTest(BitcoinTestFramework):
for terminate_line in lines_to_terminate_after:
self.log.info(f"Starting node and will exit after line '{terminate_line}'")
- node.start(extra_args=['-txindex=1'])
-
- num_total_logs = node.wait_for_debug_log([terminate_line], ignore_case=True)
- self.log.debug(f"Terminating node after {num_total_logs} log lines seen")
+ with node.wait_for_debug_log([terminate_line], ignore_case=True):
+ node.start(extra_args=['-txindex=1'])
+ self.log.debug("Terminating node after terminate line was found")
sigterm_node()
check_clean_start()
self.stop_node(0)
- self.log.info(
- f"Terminate at some random point in the init process (max logs: {num_total_logs})")
-
- for _ in range(40):
- num_logs = len(Path(node.debug_log_path).read_text().splitlines())
- additional_lines = random.randint(1, num_total_logs)
- self.log.debug(f"Starting node and will exit after {additional_lines} lines")
- node.start(extra_args=['-txindex=1'])
- logfile = open(node.debug_log_path, 'rb')
-
- MAX_SECS_TO_WAIT = 10
- start = time.time()
- num_lines = 0
-
- while True:
- line = logfile.readline()
- if line:
- num_lines += 1
-
- if num_lines >= (num_logs + additional_lines) or \
- (time.time() - start) > MAX_SECS_TO_WAIT:
- self.log.debug(f"Terminating node after {num_lines} log lines seen")
- sigterm_node()
- break
-
- if node.process.poll() is not None:
- raise AssertionError("node failed to start")
-
- check_clean_start()
- self.stop_node(0)
-
self.log.info("Test startup errors after removing certain essential files")
files_to_disturb = {
diff --git a/test/functional/feature_maxtipage.py b/test/functional/feature_maxtipage.py
new file mode 100755
index 0000000000..87f9d6962d
--- /dev/null
+++ b/test/functional/feature_maxtipage.py
@@ -0,0 +1,56 @@
+#!/usr/bin/env python3
+# Copyright (c) 2022 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test logic for setting nMaxTipAge on command line.
+
+Nodes don't consider themselves out of "initial block download" as long as
+their best known block header time is more than nMaxTipAge in the past.
+"""
+
+import time
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+
+DEFAULT_MAX_TIP_AGE = 24 * 60 * 60
+
+
+class MaxTipAgeTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 2
+
+ def test_maxtipage(self, maxtipage, set_parameter=True):
+ node_miner = self.nodes[0]
+ node_ibd = self.nodes[1]
+
+ self.restart_node(1, [f'-maxtipage={maxtipage}'] if set_parameter else None)
+ self.connect_nodes(0, 1)
+
+ # tips older than maximum age -> stay in IBD
+ cur_time = int(time.time())
+ node_ibd.setmocktime(cur_time)
+ for delta in [5, 4, 3, 2, 1]:
+ node_miner.setmocktime(cur_time - maxtipage - delta)
+ self.generate(node_miner, 1)
+ assert_equal(node_ibd.getblockchaininfo()['initialblockdownload'], True)
+
+ # tip within maximum age -> leave IBD
+ node_miner.setmocktime(cur_time - maxtipage)
+ self.generate(node_miner, 1)
+ assert_equal(node_ibd.getblockchaininfo()['initialblockdownload'], False)
+
+ def run_test(self):
+ self.log.info("Test IBD with maximum tip age of 24 hours (default).")
+ self.test_maxtipage(DEFAULT_MAX_TIP_AGE, set_parameter=False)
+
+ for hours in [20, 10, 5, 2, 1]:
+ maxtipage = hours * 60 * 60
+ self.log.info(f"Test IBD with maximum tip age of {hours} hours (-maxtipage={maxtipage}).")
+ self.test_maxtipage(maxtipage)
+
+
+if __name__ == '__main__':
+ MaxTipAgeTest().main()
diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py
index 2842b2534d..06aa5608bb 100755
--- a/test/functional/interface_rest.py
+++ b/test/functional/interface_rest.py
@@ -22,6 +22,10 @@ from test_framework.util import (
from test_framework.messages import BLOCK_HEADER_SIZE
+INVALID_PARAM = "abc"
+UNKNOWN_PARAM = "0000000000000000000000000000000000000000000000000000000000000000"
+
+
class ReqType(Enum):
JSON = 1
BIN = 2
@@ -103,6 +107,12 @@ class RESTTest (BitcoinTestFramework):
n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1'))
spending = (txid, n)
+ # Test /tx with an invalid and an unknown txid
+ resp = self.test_rest_request(uri=f"/tx/{INVALID_PARAM}", ret_type=RetType.OBJ, status=400)
+ assert_equal(resp.read().decode('utf-8').rstrip(), f"Invalid hash: {INVALID_PARAM}")
+ resp = self.test_rest_request(uri=f"/tx/{UNKNOWN_PARAM}", ret_type=RetType.OBJ, status=404)
+ assert_equal(resp.read().decode('utf-8').rstrip(), f"{UNKNOWN_PARAM} not found")
+
self.log.info("Query an unspent TXO using the /getutxos URI")
self.generatetoaddress(self.nodes[1], 1, not_related_address)
@@ -205,8 +215,8 @@ class RESTTest (BitcoinTestFramework):
bb_hash = self.nodes[0].getbestblockhash()
# Check result if block does not exists
- assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), [])
- self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ)
+ assert_equal(self.test_rest_request(f"/headers/1/{UNKNOWN_PARAM}"), [])
+ self.test_rest_request(f"/block/{UNKNOWN_PARAM}", status=404, ret_type=RetType.OBJ)
# Check result if block is not in the active chain
self.nodes[0].invalidateblock(bb_hash)
@@ -250,8 +260,8 @@ class RESTTest (BitcoinTestFramework):
assert_equal(blockhash, bb_hash)
# Check invalid blockhashbyheight requests
- resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400)
- assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc")
+ resp = self.test_rest_request(f"/blockhashbyheight/{INVALID_PARAM}", ret_type=RetType.OBJ, status=400)
+ assert_equal(resp.read().decode('utf-8').rstrip(), f"Invalid height: {INVALID_PARAM}")
resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404)
assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range")
resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400)
diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py
index 16c15e3f74..51de582ce0 100755
--- a/test/functional/mempool_updatefromblock.py
+++ b/test/functional/mempool_updatefromblock.py
@@ -17,7 +17,7 @@ from test_framework.util import assert_equal
class MempoolUpdateFromBlockTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
- self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000']]
+ self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000', '-limitancestorcount=100']]
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index 4dd4899f74..2d96ba74b5 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -6,6 +6,7 @@
Test the following RPCs:
- getblockchaininfo
+ - getdeploymentinfo
- getchaintxstats
- gettxoutsetinfo
- getblockheader
@@ -79,6 +80,7 @@ class BlockchainTest(BitcoinTestFramework):
self._test_stopatheight()
self._test_waitforblockheight()
self._test_getblock()
+ self._test_getdeploymentinfo()
assert self.nodes[0].verifychain(4, 0)
def mine_chain(self):
@@ -115,7 +117,6 @@ class BlockchainTest(BitcoinTestFramework):
'mediantime',
'pruned',
'size_on_disk',
- 'softforks',
'time',
'verificationprogress',
'warnings',
@@ -159,11 +160,6 @@ class BlockchainTest(BitcoinTestFramework):
self.start_node(0, extra_args=[
'-stopatheight=207',
'-prune=550',
- '-testactivationheight=bip34@2',
- '-testactivationheight=dersig@3',
- '-testactivationheight=cltv@4',
- '-testactivationheight=csv@5',
- '-testactivationheight=segwit@6',
])
res = self.nodes[0].getblockchaininfo()
@@ -177,7 +173,13 @@ class BlockchainTest(BitcoinTestFramework):
assert_equal(res['prune_target_size'], 576716800)
assert_greater_than(res['size_on_disk'], 0)
- assert_equal(res['softforks'], {
+ def check_signalling_deploymentinfo_result(self, gdi_result, height, blockhash, status_next):
+ assert height >= 144 and height <= 287
+
+ assert_equal(gdi_result, {
+ "hash": blockhash,
+ "height": height,
+ "deployments": {
'bip34': {'type': 'buried', 'active': True, 'height': 2},
'bip66': {'type': 'buried', 'active': True, 'height': 3},
'bip65': {'type': 'buried', 'active': True, 'height': 4},
@@ -186,36 +188,65 @@ class BlockchainTest(BitcoinTestFramework):
'testdummy': {
'type': 'bip9',
'bip9': {
- 'status': 'started',
'bit': 28,
'start_time': 0,
'timeout': 0x7fffffffffffffff, # testdummy does not have a timeout so is set to the max int64 value
+ 'min_activation_height': 0,
+ 'status': 'started',
+ 'status-next': status_next,
'since': 144,
'statistics': {
'period': 144,
'threshold': 108,
- 'elapsed': HEIGHT - 143,
- 'count': HEIGHT - 143,
+ 'elapsed': height - 143,
+ 'count': height - 143,
'possible': True,
},
- 'min_activation_height': 0,
+ 'signalling': '#'*(height-143),
},
'active': False
},
'taproot': {
'type': 'bip9',
'bip9': {
- 'status': 'active',
'start_time': -1,
'timeout': 9223372036854775807,
- 'since': 0,
'min_activation_height': 0,
+ 'status': 'active',
+ 'status-next': 'active',
+ 'since': 0,
},
'height': 0,
'active': True
}
+ }
})
+ def _test_getdeploymentinfo(self):
+ # Note: continues past -stopatheight height, so must be invoked
+ # after _test_stopatheight
+
+ self.log.info("Test getdeploymentinfo")
+ self.stop_node(0)
+ self.start_node(0, extra_args=[
+ '-testactivationheight=bip34@2',
+ '-testactivationheight=dersig@3',
+ '-testactivationheight=cltv@4',
+ '-testactivationheight=csv@5',
+ '-testactivationheight=segwit@6',
+ ])
+
+ gbci207 = self.nodes[0].getblockchaininfo()
+ self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci207["blocks"], gbci207["bestblockhash"], "started")
+
+ # block just prior to lock in
+ self.generate(self.wallet, 287 - gbci207["blocks"])
+ gbci287 = self.nodes[0].getblockchaininfo()
+ self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(), gbci287["blocks"], gbci287["bestblockhash"], "locked_in")
+
+ # calling with an explicit hash works
+ self.check_signalling_deploymentinfo_result(self.nodes[0].getdeploymentinfo(gbci207["bestblockhash"]), gbci207["blocks"], gbci207["bestblockhash"], "started")
+
def _test_getchaintxstats(self):
self.log.info("Test getchaintxstats")
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index a8e6acea45..759e43194b 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -4,8 +4,10 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the fundrawtransaction RPC."""
+
from decimal import Decimal
from itertools import product
+from math import ceil
from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
@@ -1003,7 +1005,7 @@ class RawTransactionsTest(BitcoinTestFramework):
ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
- raw_tx = wallet.createrawtransaction([ext_utxo], {self.nodes[0].getnewaddress(): 15})
+ raw_tx = wallet.createrawtransaction([ext_utxo], {self.nodes[0].getnewaddress(): ext_utxo["amount"] / 2})
assert_raises_rpc_error(-4, "Insufficient funds", wallet.fundrawtransaction, raw_tx)
# Error conditions
@@ -1011,6 +1013,12 @@ class RawTransactionsTest(BitcoinTestFramework):
assert_raises_rpc_error(-5, "'01234567890a0b0c0d0e0f' is not a valid public key", wallet.fundrawtransaction, raw_tx, {"solving_data": {"pubkeys":["01234567890a0b0c0d0e0f"]}})
assert_raises_rpc_error(-5, "'not a script' is not hex", wallet.fundrawtransaction, raw_tx, {"solving_data": {"scripts":["not a script"]}})
assert_raises_rpc_error(-8, "Unable to parse descriptor 'not a descriptor'", wallet.fundrawtransaction, raw_tx, {"solving_data": {"descriptors":["not a descriptor"]}})
+ assert_raises_rpc_error(-8, "Invalid parameter, missing vout key", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"]}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, vout cannot be negative", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": -1}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, missing weight key", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"]}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be less than 165", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 164}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be less than 165", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": -1}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, weight cannot be greater than", wallet.fundrawtransaction, raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 400001}]})
# But funding should work when the solving data is provided
funded_tx = wallet.fundrawtransaction(raw_tx, {"solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}})
@@ -1020,10 +1028,45 @@ class RawTransactionsTest(BitcoinTestFramework):
assert signed_tx['complete']
funded_tx = wallet.fundrawtransaction(raw_tx, {"solving_data": {"descriptors": [desc]}})
- signed_tx = wallet.signrawtransactionwithwallet(funded_tx['hex'])
- assert not signed_tx['complete']
- signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx['hex'])
- assert signed_tx['complete']
+ signed_tx1 = wallet.signrawtransactionwithwallet(funded_tx['hex'])
+ assert not signed_tx1['complete']
+ signed_tx2 = self.nodes[0].signrawtransactionwithwallet(signed_tx1['hex'])
+ assert signed_tx2['complete']
+
+ unsigned_weight = self.nodes[0].decoderawtransaction(signed_tx1["hex"])["weight"]
+ signed_weight = self.nodes[0].decoderawtransaction(signed_tx2["hex"])["weight"]
+ # Input's weight is difference between weight of signed and unsigned,
+ # and the weight of stuff that didn't change (prevout, sequence, 1 byte of scriptSig)
+ input_weight = signed_weight - unsigned_weight + (41 * 4)
+ low_input_weight = input_weight // 2
+ high_input_weight = input_weight * 2
+
+ # Funding should also work if the input weight is provided
+ funded_tx = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}]})
+ signed_tx = wallet.signrawtransactionwithwallet(funded_tx["hex"])
+ signed_tx = self.nodes[0].signrawtransactionwithwallet(signed_tx["hex"])
+ assert_equal(self.nodes[0].testmempoolaccept([signed_tx["hex"]])[0]["allowed"], True)
+ assert_equal(signed_tx["complete"], True)
+ # Reducing the weight should have a lower fee
+ funded_tx2 = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}]})
+ assert_greater_than(funded_tx["fee"], funded_tx2["fee"])
+ # Increasing the weight should have a higher fee
+ funded_tx2 = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}]})
+ assert_greater_than(funded_tx2["fee"], funded_tx["fee"])
+ # The provided weight should override the calculated weight when solving data is provided
+ funded_tx3 = wallet.fundrawtransaction(raw_tx, {"solving_data": {"descriptors": [desc]}, "input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}]})
+ assert_equal(funded_tx2["fee"], funded_tx3["fee"])
+ # The feerate should be met
+ funded_tx4 = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}], "fee_rate": 10})
+ input_add_weight = high_input_weight - (41 * 4)
+ tx4_weight = wallet.decoderawtransaction(funded_tx4["hex"])["weight"] + input_add_weight
+ tx4_vsize = int(ceil(tx4_weight / 4))
+ assert_fee_amount(funded_tx4["fee"], tx4_vsize, Decimal(0.0001))
+
+ # Funding with weight at csuint boundaries should not cause problems
+ funded_tx = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 255}]})
+ funded_tx = wallet.fundrawtransaction(raw_tx, {"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 65539}]})
+
self.nodes[2].unloadwallet("extfund")
def test_include_unsafe(self):
diff --git a/test/functional/rpc_getblockfrompeer.py b/test/functional/rpc_getblockfrompeer.py
index effcebe854..b65322d920 100755
--- a/test/functional/rpc_getblockfrompeer.py
+++ b/test/functional/rpc_getblockfrompeer.py
@@ -40,12 +40,8 @@ class GetBlockFromPeerTest(BitcoinTestFramework):
self.sync_blocks()
self.log.info("Node 0 should only have the header for node 1's block 3")
- for x in self.nodes[0].getchaintips():
- if x['hash'] == short_tip:
- assert_equal(x['status'], "headers-only")
- break
- else:
- raise AssertionError("short tip not synced")
+ x = next(filter(lambda x: x['hash'] == short_tip, self.nodes[0].getchaintips()))
+ assert_equal(x['status'], "headers-only")
assert_raises_rpc_error(-1, "Block not found on disk", self.nodes[0].getblock, short_tip)
self.log.info("Fetch block from node 1")
@@ -60,17 +56,15 @@ class GetBlockFromPeerTest(BitcoinTestFramework):
assert_raises_rpc_error(-1, "Block header missing", self.nodes[0].getblockfrompeer, "00" * 32, 0)
self.log.info("Non-existent peer generates error")
- assert_raises_rpc_error(-1, f"Peer nodeid {peer_0_peer_1_id + 1} does not exist", self.nodes[0].getblockfrompeer, short_tip, peer_0_peer_1_id + 1)
+ assert_raises_rpc_error(-1, "Peer does not exist", self.nodes[0].getblockfrompeer, short_tip, peer_0_peer_1_id + 1)
self.log.info("Successful fetch")
result = self.nodes[0].getblockfrompeer(short_tip, peer_0_peer_1_id)
self.wait_until(lambda: self.check_for_block(short_tip), timeout=1)
- assert(not "warnings" in result)
+ assert_equal(result, {})
self.log.info("Don't fetch blocks we already have")
- result = self.nodes[0].getblockfrompeer(short_tip, peer_0_peer_1_id)
- assert("warnings" in result)
- assert_equal(result["warnings"], "Block already downloaded")
+ assert_raises_rpc_error(-1, "Block already downloaded", self.nodes[0].getblockfrompeer, short_tip, peer_0_peer_1_id)
if __name__ == '__main__':
GetBlockFromPeerTest().main()
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 153a201e95..b037807b53 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -606,11 +606,15 @@ class PSBTTest(BitcoinTestFramework):
assert_raises_rpc_error(-25, 'Inputs missing or spent', self.nodes[0].walletprocesspsbt, 'cHNidP8BAJoCAAAAAkvEW8NnDtdNtDpsmze+Ht2LH35IJcKv00jKAlUs21RrAwAAAAD/////S8Rbw2cO1020OmybN74e3Ysffkglwq/TSMoCVSzbVGsBAAAAAP7///8CwLYClQAAAAAWABSNJKzjaUb3uOxixsvh1GGE3fW7zQD5ApUAAAAAFgAUKNw0x8HRctAgmvoevm4u1SbN7XIAAAAAAAEAnQIAAAACczMa321tVHuN4GKWKRncycI22aX3uXgwSFUKM2orjRsBAAAAAP7///9zMxrfbW1Ue43gYpYpGdzJwjbZpfe5eDBIVQozaiuNGwAAAAAA/v///wIA+QKVAAAAABl2qRT9zXUVA8Ls5iVqynLHe5/vSe1XyYisQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAAAAAQEfQM0ClQAAAAAWABRmWQUcjSjghQ8/uH4Bn/zkakwLtAAAAA==')
- # Test that we can fund psbts with external inputs specified
+ self.log.info("Test that we can fund psbts with external inputs specified")
+
eckey = ECKey()
eckey.generate()
privkey = bytes_to_wif(eckey.get_bytes())
+ self.nodes[1].createwallet("extfund")
+ wallet = self.nodes[1].get_wallet_rpc("extfund")
+
# Make a weird but signable script. sh(pkh()) descriptor accomplishes this
desc = descsum_create("sh(pkh({}))".format(privkey))
if self.options.descriptors:
@@ -622,26 +626,97 @@ class PSBTTest(BitcoinTestFramework):
addr_info = self.nodes[0].getaddressinfo(addr)
self.nodes[0].sendtoaddress(addr, 10)
+ self.nodes[0].sendtoaddress(wallet.getnewaddress(), 10)
self.generate(self.nodes[0], 6)
ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
- assert_raises_rpc_error(-4, "Insufficient funds", self.nodes[1].walletcreatefundedpsbt, [ext_utxo], {self.nodes[0].getnewaddress(): 10 + ext_utxo['amount']}, 0, {'add_inputs': True})
+ assert_raises_rpc_error(-4, "Insufficient funds", wallet.walletcreatefundedpsbt, [ext_utxo], {self.nodes[0].getnewaddress(): 15})
# But funding should work when the solving data is provided
- psbt = self.nodes[1].walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {'add_inputs': True, "solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}})
- signed = self.nodes[1].walletprocesspsbt(psbt['psbt'])
+ psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data": {"pubkeys": [addr_info['pubkey']], "scripts": [addr_info["embedded"]["scriptPubKey"]]}})
+ signed = wallet.walletprocesspsbt(psbt['psbt'])
assert not signed['complete']
signed = self.nodes[0].walletprocesspsbt(signed['psbt'])
assert signed['complete']
self.nodes[0].finalizepsbt(signed['psbt'])
- psbt = self.nodes[1].walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {'add_inputs': True, "solving_data":{"descriptors": [desc]}})
- signed = self.nodes[1].walletprocesspsbt(psbt['psbt'])
+ psbt = wallet.walletcreatefundedpsbt([ext_utxo], {self.nodes[0].getnewaddress(): 15}, 0, {"add_inputs": True, "solving_data":{"descriptors": [desc]}})
+ signed = wallet.walletprocesspsbt(psbt['psbt'])
assert not signed['complete']
signed = self.nodes[0].walletprocesspsbt(signed['psbt'])
assert signed['complete']
- self.nodes[0].finalizepsbt(signed['psbt'])
+ final = self.nodes[0].finalizepsbt(signed['psbt'], False)
+
+ dec = self.nodes[0].decodepsbt(signed["psbt"])
+ for i, txin in enumerate(dec["tx"]["vin"]):
+ if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]:
+ input_idx = i
+ break
+ psbt_in = dec["inputs"][input_idx]
+ # Calculate the input weight
+ # (prevout + sequence + length of scriptSig + 2 bytes buffer) * 4 + len of scriptwitness
+ len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0
+ len_scriptwitness = len(psbt_in["final_scriptwitness"]["hex"]) // 2 if "final_scriptwitness" in psbt_in else 0
+ input_weight = ((41 + len_scriptsig + 2) * 4) + len_scriptwitness
+ low_input_weight = input_weight // 2
+ high_input_weight = input_weight * 2
+
+ # Input weight error conditions
+ assert_raises_rpc_error(
+ -8,
+ "Input weights should be specified in inputs rather than in options.",
+ wallet.walletcreatefundedpsbt,
+ inputs=[ext_utxo],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]}
+ )
+
+ # Funding should also work if the input weight is provided
+ psbt = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"add_inputs": True}
+ )
+ signed = wallet.walletprocesspsbt(psbt["psbt"])
+ signed = self.nodes[0].walletprocesspsbt(signed["psbt"])
+ final = self.nodes[0].finalizepsbt(signed["psbt"])
+ assert self.nodes[0].testmempoolaccept([final["hex"]])[0]["allowed"]
+ # Reducing the weight should have a lower fee
+ psbt2 = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": low_input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"add_inputs": True}
+ )
+ assert_greater_than(psbt["fee"], psbt2["fee"])
+ # Increasing the weight should have a higher fee
+ psbt2 = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"add_inputs": True}
+ )
+ assert_greater_than(psbt2["fee"], psbt["fee"])
+ # The provided weight should override the calculated weight when solving data is provided
+ psbt3 = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={'add_inputs': True, "solving_data":{"descriptors": [desc]}}
+ )
+ assert_equal(psbt2["fee"], psbt3["fee"])
+
+ # Import the external utxo descriptor so that we can sign for it from the test wallet
+ if self.options.descriptors:
+ res = wallet.importdescriptors([{"desc": desc, "timestamp": "now"}])
+ else:
+ res = wallet.importmulti([{"desc": desc, "timestamp": "now"}])
+ assert res[0]["success"]
+ # The provided weight should override the calculated weight for a wallet input
+ psbt3 = wallet.walletcreatefundedpsbt(
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": high_input_weight}],
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"add_inputs": True}
+ )
+ assert_equal(psbt2["fee"], psbt3["fee"])
if __name__ == '__main__':
PSBTTest().main()
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
index 96691b2686..a839af0288 100755
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -99,25 +99,36 @@ class RawTransactionsTest(BitcoinTestFramework):
rawTx = self.nodes[1].createrawtransaction([{'txid': txid, 'vout': vout}], {self.nodes[1].getnewaddress(): 9.999})
rawTxSigned = self.nodes[1].signrawtransactionwithwallet(rawTx)
txId = self.nodes[1].sendrawtransaction(rawTxSigned['hex'])
- self.generate(self.nodes[0], 1)
+ self.generateblock(self.nodes[0], output=self.nodes[0].getnewaddress(), transactions=[rawTxSigned['hex']])
+ err_msg = (
+ "No such mempool transaction. Use -txindex or provide a block hash to enable"
+ " blockchain transaction queries. Use gettransaction for wallet transactions."
+ )
for n in [0, 3]:
self.log.info(f"Test getrawtransaction {'with' if n == 0 else 'without'} -txindex")
- # 1. valid parameters - only supply txid
- assert_equal(self.nodes[n].getrawtransaction(txId), rawTxSigned['hex'])
- # 2. valid parameters - supply txid and 0 for non-verbose
- assert_equal(self.nodes[n].getrawtransaction(txId, 0), rawTxSigned['hex'])
+ if n == 0:
+ # With -txindex.
+ # 1. valid parameters - only supply txid
+ assert_equal(self.nodes[n].getrawtransaction(txId), rawTxSigned['hex'])
- # 3. valid parameters - supply txid and False for non-verbose
- assert_equal(self.nodes[n].getrawtransaction(txId, False), rawTxSigned['hex'])
+ # 2. valid parameters - supply txid and 0 for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, 0), rawTxSigned['hex'])
- # 4. valid parameters - supply txid and 1 for verbose.
- # We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
- assert_equal(self.nodes[n].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
+ # 3. valid parameters - supply txid and False for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, False), rawTxSigned['hex'])
- # 5. valid parameters - supply txid and True for non-verbose
- assert_equal(self.nodes[n].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
+ # 4. valid parameters - supply txid and 1 for verbose.
+ # We only check the "hex" field of the output so we don't need to update this test every time the output format changes.
+ assert_equal(self.nodes[n].getrawtransaction(txId, 1)["hex"], rawTxSigned['hex'])
+
+ # 5. valid parameters - supply txid and True for non-verbose
+ assert_equal(self.nodes[n].getrawtransaction(txId, True)["hex"], rawTxSigned['hex'])
+ else:
+ # Without -txindex, expect to raise.
+ for verbose in [None, 0, False, 1, True]:
+ assert_raises_rpc_error(-5, err_msg, self.nodes[n].getrawtransaction, txId, verbose)
# 6. invalid parameters - supply txid and invalid boolean values (strings) for verbose
for value in ["True", "False"]:
@@ -145,10 +156,6 @@ class RawTransactionsTest(BitcoinTestFramework):
assert 'in_active_chain' not in gottx
else:
self.log.info("Test getrawtransaction without -txindex, without blockhash: expect the call to raise")
- err_msg = (
- "No such mempool transaction. Use -txindex or provide a block hash to enable"
- " blockchain transaction queries. Use gettransaction for wallet transactions."
- )
assert_raises_rpc_error(-5, err_msg, self.nodes[n].getrawtransaction, txid=tx, verbose=True)
# We should not get the tx if we provide an unrelated block
assert_raises_rpc_error(-5, "No such transaction found", self.nodes[n].getrawtransaction, txid=tx, blockhash=block2)
diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py
index e648040278..a2091b4ece 100755
--- a/test/functional/rpc_signrawtransaction.py
+++ b/test/functional/rpc_signrawtransaction.py
@@ -270,7 +270,7 @@ class SignRawTransactionsTest(BitcoinTestFramework):
getcontext().prec = 8
# Make sure CSV is active
- assert self.nodes[0].getblockchaininfo()['softforks']['csv']['active']
+ assert self.nodes[0].getdeploymentinfo()['deployments']['csv']['active']
# Create a P2WSH script with CSV
script = CScript([1, OP_CHECKSEQUENCEVERIFY, OP_DROP])
@@ -305,7 +305,7 @@ class SignRawTransactionsTest(BitcoinTestFramework):
getcontext().prec = 8
# Make sure CLTV is active
- assert self.nodes[0].getblockchaininfo()['softforks']['bip65']['active']
+ assert self.nodes[0].getdeploymentinfo()['deployments']['bip65']['active']
# Create a P2WSH script with CLTV
script = CScript([100, OP_CHECKLOCKTIMEVERIFY, OP_DROP])
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 0b9154a030..289e83579b 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -422,7 +422,8 @@ class TestNode():
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
- def wait_for_debug_log(self, expected_msgs, timeout=10, ignore_case=False) -> int:
+ @contextlib.contextmanager
+ def wait_for_debug_log(self, expected_msgs, timeout=60, ignore_case=False):
"""
Block until we see a particular debug log message fragment or until we exceed the timeout.
Return:
@@ -432,6 +433,8 @@ class TestNode():
prev_size = self.debug_log_bytes()
re_flags = re.MULTILINE | (re.IGNORECASE if ignore_case else 0)
+ yield
+
while True:
found = True
with open(self.debug_log_path, encoding='utf-8') as dl:
@@ -443,8 +446,7 @@ class TestNode():
found = False
if found:
- num_logs = len(log.splitlines())
- return num_logs
+ return
if time.time() >= time_end:
print_log = " - " + "\n - ".join(log.splitlines())
@@ -456,7 +458,6 @@ class TestNode():
self._raise_assertion_error(
'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
str(expected_msgs), print_log))
- return -1 # useless return to satisfy linter
@contextlib.contextmanager
def profile_with_perf(self, profile_name: str):
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 195af14914..dabde13bf1 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -438,7 +438,7 @@ def delete_cookie_file(datadir, chain):
def softfork_active(node, key):
"""Return whether a softfork is active."""
- return node.getblockchaininfo()['softforks'][key]['active']
+ return node.getdeploymentinfo()['deployments'][key]['active']
def set_node_times(nodes, t):
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index eb2d030f4a..e833128063 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -198,6 +198,7 @@ BASE_SCRIPTS = [
'wallet_keypool.py --legacy-wallet',
'wallet_keypool.py --descriptors',
'wallet_descriptor.py --descriptors',
+ 'feature_maxtipage.py',
'p2p_nobloomfilter_messages.py',
'p2p_filter.py',
'rpc_setban.py',
@@ -308,6 +309,7 @@ BASE_SCRIPTS = [
'feature_coinstatsindex.py --legacy-wallet',
'feature_coinstatsindex.py --descriptors',
'wallet_orphanedreward.py',
+ 'wallet_timelock.py',
'p2p_node_network_limited.py',
'p2p_permissions.py',
'feature_blocksdir.py',
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
index 0b868dde6c..317121eb68 100755
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -141,7 +141,7 @@ class MultiWalletTest(BitcoinTestFramework):
# should raise rpc error if wallet path can't be created
err_code = -4 if self.options.descriptors else -1
- assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad")
+ assert_raises_rpc_error(err_code, "boost::filesystem::create_director", self.nodes[0].createwallet, "w8/bad")
# check that all requested wallets were created
self.stop_node(0)
diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py
index d77d554baa..843a9f52b7 100755
--- a/test/functional/wallet_send.py
+++ b/test/functional/wallet_send.py
@@ -518,5 +518,45 @@ class WalletSendTest(BitcoinTestFramework):
assert signed["complete"]
self.nodes[0].finalizepsbt(signed["psbt"])
+ dec = self.nodes[0].decodepsbt(signed["psbt"])
+ for i, txin in enumerate(dec["tx"]["vin"]):
+ if txin["txid"] == ext_utxo["txid"] and txin["vout"] == ext_utxo["vout"]:
+ input_idx = i
+ break
+ psbt_in = dec["inputs"][input_idx]
+ # Calculate the input weight
+ # (prevout + sequence + length of scriptSig + 2 bytes buffer) * 4 + len of scriptwitness
+ len_scriptsig = len(psbt_in["final_scriptSig"]["hex"]) // 2 if "final_scriptSig" in psbt_in else 0
+ len_scriptwitness = len(psbt_in["final_scriptwitness"]["hex"]) // 2 if "final_scriptwitness" in psbt_in else 0
+ input_weight = ((41 + len_scriptsig + 2) * 4) + len_scriptwitness
+
+ # Input weight error conditions
+ assert_raises_rpc_error(
+ -8,
+ "Input weights should be specified in inputs rather than in options.",
+ ext_wallet.send,
+ outputs={self.nodes[0].getnewaddress(): 15},
+ options={"inputs": [ext_utxo], "input_weights": [{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": 1000}]}
+ )
+
+ # Funding should also work when input weights are provided
+ res = self.test_send(
+ from_wallet=ext_wallet,
+ to_wallet=self.nodes[0],
+ amount=15,
+ inputs=[{"txid": ext_utxo["txid"], "vout": ext_utxo["vout"], "weight": input_weight}],
+ add_inputs=True,
+ psbt=True,
+ include_watching=True,
+ fee_rate=10
+ )
+ signed = ext_wallet.walletprocesspsbt(res["psbt"])
+ signed = ext_fund.walletprocesspsbt(res["psbt"])
+ assert signed["complete"]
+ tx = self.nodes[0].finalizepsbt(signed["psbt"])
+ testres = self.nodes[0].testmempoolaccept([tx["hex"]])[0]
+ assert_equal(testres["allowed"], True)
+ assert_fee_amount(testres["fees"]["base"], testres["vsize"], Decimal(0.0001))
+
if __name__ == '__main__':
WalletSendTest().main()
diff --git a/test/functional/wallet_timelock.py b/test/functional/wallet_timelock.py
new file mode 100755
index 0000000000..cf233a00ef
--- /dev/null
+++ b/test/functional/wallet_timelock.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python3
+# Copyright (c) 2022 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import assert_equal
+
+
+class WalletLocktimeTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+
+ def run_test(self):
+ node = self.nodes[0]
+
+ mtp_tip = node.getblockheader(node.getbestblockhash())["mediantime"]
+
+ self.log.info("Get new address with label")
+ label = "timelock⌛🔓"
+ address = node.getnewaddress(label=label)
+
+ self.log.info("Send to new address with locktime")
+ node.send(
+ outputs={address: 5},
+ options={"locktime": mtp_tip - 1},
+ )
+ self.generate(node, 1)
+
+ self.log.info("Check that clock can not change finality of confirmed txs")
+ amount_before_ad = node.getreceivedbyaddress(address)
+ amount_before_lb = node.getreceivedbylabel(label)
+ list_before_ad = node.listreceivedbyaddress(address_filter=address)
+ list_before_lb = node.listreceivedbylabel(include_empty=False)
+ balance_before = node.getbalances()["mine"]["trusted"]
+ coin_before = node.listunspent(maxconf=1)
+ node.setmocktime(mtp_tip - 1)
+ assert_equal(node.getreceivedbyaddress(address), amount_before_ad)
+ assert_equal(node.getreceivedbylabel(label), amount_before_lb)
+ assert_equal(node.listreceivedbyaddress(address_filter=address), list_before_ad)
+ assert_equal(node.listreceivedbylabel(include_empty=False), list_before_lb)
+ assert_equal(node.getbalances()["mine"]["trusted"], balance_before)
+ assert_equal(node.listunspent(maxconf=1), coin_before)
+
+
+if __name__ == "__main__":
+ WalletLocktimeTest().main()
diff --git a/test/lint/commit-script-check.sh b/test/lint/commit-script-check.sh
index 6a8a15d05c..9449b393f1 100755
--- a/test/lint/commit-script-check.sh
+++ b/test/lint/commit-script-check.sh
@@ -17,6 +17,11 @@ if test -z "$1"; then
exit 1
fi
+if ! sed --help 2>&1 | grep -q 'GNU'; then
+ echo "Error: the installed sed package is not compatible. Please make sure you have GNU sed installed in your system.";
+ exit 1;
+fi
+
RET=0
PREV_BRANCH=$(git name-rev --name-only HEAD)
PREV_HEAD=$(git rev-parse HEAD)
diff --git a/test/lint/lint-spelling.ignore-words.txt b/test/lint/lint-spelling.ignore-words.txt
index 9906b15e9a..afdb0692d8 100644
--- a/test/lint/lint-spelling.ignore-words.txt
+++ b/test/lint/lint-spelling.ignore-words.txt
@@ -1,6 +1,8 @@
asend
+ba
blockin
cachable
+creat
fo
fpr
hights
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index 4292544dbd..a049693ee1 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -45,10 +45,7 @@ shift-base:test/fuzz/crypto_diff_fuzz_chacha20.cpp
# job.
unsigned-integer-overflow:addrman.cpp
unsigned-integer-overflow:arith_uint256.h
-unsigned-integer-overflow:bitcoin-tx.cpp
unsigned-integer-overflow:common/bloom.cpp
-unsigned-integer-overflow:chain.cpp
-unsigned-integer-overflow:chain.h
unsigned-integer-overflow:coins.cpp
unsigned-integer-overflow:compressor.cpp
unsigned-integer-overflow:core_write.cpp
@@ -64,41 +61,20 @@ unsigned-integer-overflow:validation.cpp
implicit-integer-sign-change:addrman.h
implicit-integer-sign-change:bech32.cpp
implicit-integer-sign-change:common/bloom.cpp
-implicit-integer-sign-change:chain.cpp
-implicit-integer-sign-change:chain.h
implicit-integer-sign-change:coins.h
implicit-integer-sign-change:compat/stdin.cpp
implicit-integer-sign-change:compressor.h
implicit-integer-sign-change:crypto/
implicit-integer-sign-change:key.cpp
-implicit-integer-sign-change:noui.cpp
implicit-integer-sign-change:policy/fees.cpp
implicit-integer-sign-change:prevector.h
implicit-integer-sign-change:script/bitcoinconsensus.cpp
implicit-integer-sign-change:script/interpreter.cpp
implicit-integer-sign-change:serialize.h
-implicit-integer-sign-change:test/arith_uint256_tests.cpp
-implicit-integer-sign-change:test/coins_tests.cpp
-implicit-integer-sign-change:test/pow_tests.cpp
-implicit-integer-sign-change:test/prevector_tests.cpp
-implicit-integer-sign-change:test/sighash_tests.cpp
-implicit-integer-sign-change:test/skiplist_tests.cpp
-implicit-integer-sign-change:test/streams_tests.cpp
-implicit-integer-sign-change:test/transaction_tests.cpp
implicit-integer-sign-change:txmempool.cpp
-implicit-integer-sign-change:zmq/zmqpublishnotifier.cpp
-implicit-signed-integer-truncation,implicit-integer-sign-change:chain.h
-implicit-signed-integer-truncation,implicit-integer-sign-change:test/skiplist_tests.cpp
implicit-signed-integer-truncation:addrman.cpp
implicit-signed-integer-truncation:addrman.h
-implicit-signed-integer-truncation:chain.h
implicit-signed-integer-truncation:crypto/
-implicit-signed-integer-truncation:node/miner.cpp
-implicit-signed-integer-truncation:net.cpp
-implicit-signed-integer-truncation:streams.h
-implicit-signed-integer-truncation:test/arith_uint256_tests.cpp
-implicit-signed-integer-truncation:test/skiplist_tests.cpp
-implicit-signed-integer-truncation:torcontrol.cpp
implicit-unsigned-integer-truncation:crypto/
shift-base:arith_uint256.cpp
shift-base:crypto/
diff --git a/test/util/data/tt-delin1-out.json b/test/util/data/tt-delin1-out.json
index c5b9f6df01..6e053fe2b9 100644
--- a/test/util/data/tt-delin1-out.json
+++ b/test/util/data/tt-delin1-out.json
@@ -194,6 +194,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o)#xvg87vgr",
"hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
"address": "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o",
"type": "pubkeyhash"
@@ -204,6 +205,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 6c772e9cf96371bba3da8cb733da70a2fcf20078 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb)#tsyprkms",
"hex": "76a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac",
"address": "1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb",
"type": "pubkeyhash"
diff --git a/test/util/data/tt-delout1-out.json b/test/util/data/tt-delout1-out.json
index 3863416430..e61b9c79db 100644
--- a/test/util/data/tt-delout1-out.json
+++ b/test/util/data/tt-delout1-out.json
@@ -203,6 +203,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o)#xvg87vgr",
"hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
"address": "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o",
"type": "pubkeyhash"
diff --git a/test/util/data/tt-locktime317000-out.json b/test/util/data/tt-locktime317000-out.json
index 62e785f7d0..873628e124 100644
--- a/test/util/data/tt-locktime317000-out.json
+++ b/test/util/data/tt-locktime317000-out.json
@@ -203,6 +203,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 8fd139bb39ced713f231c58a4d07bf6954d1c201 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o)#xvg87vgr",
"hex": "76a9148fd139bb39ced713f231c58a4d07bf6954d1c20188ac",
"address": "1E7SGgAZFCHDnVZLuRViX3gUmxpMfdvd2o",
"type": "pubkeyhash"
@@ -213,6 +214,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 6c772e9cf96371bba3da8cb733da70a2fcf20078 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb)#tsyprkms",
"hex": "76a9146c772e9cf96371bba3da8cb733da70a2fcf2007888ac",
"address": "1AtWkdmfmYkErU16d3KYykJUbEp9MAj9Sb",
"type": "pubkeyhash"
diff --git a/test/util/data/txcreate1.json b/test/util/data/txcreate1.json
index 96d77ef273..c4a76f22a6 100644
--- a/test/util/data/txcreate1.json
+++ b/test/util/data/txcreate1.json
@@ -41,6 +41,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
@@ -51,6 +52,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 f2d4db28cad6502226ee484ae24505c2885cb12d OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(1P8yWvZW8jVihP1bzHeqfE4aoXNX8AVa46)#vdmdu766",
"hex": "76a914f2d4db28cad6502226ee484ae24505c2885cb12d88ac",
"address": "1P8yWvZW8jVihP1bzHeqfE4aoXNX8AVa46",
"type": "pubkeyhash"
diff --git a/test/util/data/txcreate2.json b/test/util/data/txcreate2.json
index ee9b9c3c17..95953cc90e 100644
--- a/test/util/data/txcreate2.json
+++ b/test/util/data/txcreate2.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "",
+ "desc": "raw()#58lrscpx",
"hex": "",
"type": "nonstandard"
}
diff --git a/test/util/data/txcreatedata1.json b/test/util/data/txcreatedata1.json
index 87fc7e9cf7..1454ffdab7 100644
--- a/test/util/data/txcreatedata1.json
+++ b/test/util/data/txcreatedata1.json
@@ -23,6 +23,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
@@ -33,6 +34,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_RETURN 54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
+ "desc": "raw(6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e)#zf2avljj",
"hex": "6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
"type": "nulldata"
}
diff --git a/test/util/data/txcreatedata2.json b/test/util/data/txcreatedata2.json
index d03b1c8244..ca20d2aa45 100644
--- a/test/util/data/txcreatedata2.json
+++ b/test/util/data/txcreatedata2.json
@@ -23,6 +23,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
@@ -33,6 +34,7 @@
"n": 1,
"scriptPubKey": {
"asm": "OP_RETURN 54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
+ "desc": "raw(6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e)#zf2avljj",
"hex": "6a4c4f54686973204f505f52455455524e207472616e73616374696f6e206f7574707574207761732063726561746564206279206d6f646966696564206372656174657261777472616e73616374696f6e2e",
"type": "nulldata"
}
diff --git a/test/util/data/txcreatedata_seq0.json b/test/util/data/txcreatedata_seq0.json
index 8a123f1ba8..9838383c06 100644
--- a/test/util/data/txcreatedata_seq0.json
+++ b/test/util/data/txcreatedata_seq0.json
@@ -23,6 +23,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
diff --git a/test/util/data/txcreatedata_seq1.json b/test/util/data/txcreatedata_seq1.json
index 006fd7259f..c729f8dcfb 100644
--- a/test/util/data/txcreatedata_seq1.json
+++ b/test/util/data/txcreatedata_seq1.json
@@ -32,6 +32,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 1fc11f39be1729bf973a7ab6a615ca4729d64574 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(13tuJJDR2RgArmgfv6JScSdreahzgc4T6o)#ztmwxg4c",
"hex": "76a9141fc11f39be1729bf973a7ab6a615ca4729d6457488ac",
"address": "13tuJJDR2RgArmgfv6JScSdreahzgc4T6o",
"type": "pubkeyhash"
diff --git a/test/util/data/txcreatemultisig1.json b/test/util/data/txcreatemultisig1.json
index baa290c2b1..9632b20ece 100644
--- a/test/util/data/txcreatemultisig1.json
+++ b/test/util/data/txcreatemultisig1.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "2 02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d 02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485 3 OP_CHECKMULTISIG",
+ "desc": "multi(2,02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397,021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d,02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485)#8s88p9pl",
"hex": "522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae",
"type": "multisig"
}
diff --git a/test/util/data/txcreatemultisig2.json b/test/util/data/txcreatemultisig2.json
index 6685512587..021cf539a8 100644
--- a/test/util/data/txcreatemultisig2.json
+++ b/test/util/data/txcreatemultisig2.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 1c6fbaf46d64221e80cbae182c33ddf81b9294ac OP_EQUAL",
+ "desc": "addr(34HNh57oBCRKkxNyjTuWAJkTbuGh6jg2Ms)#ngnz8933",
"hex": "a9141c6fbaf46d64221e80cbae182c33ddf81b9294ac87",
"address": "34HNh57oBCRKkxNyjTuWAJkTbuGh6jg2Ms",
"type": "scripthash"
diff --git a/test/util/data/txcreatemultisig3.json b/test/util/data/txcreatemultisig3.json
index be96f4c704..3c20a88a91 100644
--- a/test/util/data/txcreatemultisig3.json
+++ b/test/util/data/txcreatemultisig3.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "0 e15a86a23178f433d514dbbce042e87d72662b8b5edcacfd2e37ab7a2d135f05",
+ "desc": "addr(bc1qu9dgdg330r6r84g5mw7wqshg04exv2uttmw2elfwx74h5tgntuzs44gyfg)#yvt39j9m",
"hex": "0020e15a86a23178f433d514dbbce042e87d72662b8b5edcacfd2e37ab7a2d135f05",
"address": "bc1qu9dgdg330r6r84g5mw7wqshg04exv2uttmw2elfwx74h5tgntuzs44gyfg",
"type": "witness_v0_scripthash"
diff --git a/test/util/data/txcreatemultisig4.json b/test/util/data/txcreatemultisig4.json
index 08831ecdca..7ae18fd90a 100644
--- a/test/util/data/txcreatemultisig4.json
+++ b/test/util/data/txcreatemultisig4.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 6edf12858999f0dae74f9c692e6694ee3621b2ac OP_EQUAL",
+ "desc": "addr(3BoFUz1StqcNcgUTZE5cC1eFhuYFzj3fGH)#466tx6fn",
"hex": "a9146edf12858999f0dae74f9c692e6694ee3621b2ac87",
"address": "3BoFUz1StqcNcgUTZE5cC1eFhuYFzj3fGH",
"type": "scripthash"
diff --git a/test/util/data/txcreatemultisig5.json b/test/util/data/txcreatemultisig5.json
index 93048cf261..98a5c2d8d1 100644
--- a/test/util/data/txcreatemultisig5.json
+++ b/test/util/data/txcreatemultisig5.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 a4051c02398868af83f28f083208fae99a769263 OP_EQUAL",
+ "desc": "addr(3GeGs1eHUxPz5YyuFe9WPpXid2UsUb5Jos)#juhnnegr",
"hex": "a914a4051c02398868af83f28f083208fae99a76926387",
"address": "3GeGs1eHUxPz5YyuFe9WPpXid2UsUb5Jos",
"type": "scripthash"
diff --git a/test/util/data/txcreateoutpubkey1.json b/test/util/data/txcreateoutpubkey1.json
index 42b519bb21..3baf479991 100644
--- a/test/util/data/txcreateoutpubkey1.json
+++ b/test/util/data/txcreateoutpubkey1.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397 OP_CHECKSIG",
+ "desc": "pk(02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397)#rk5v7uqw",
"hex": "2102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397ac",
"type": "pubkey"
}
diff --git a/test/util/data/txcreateoutpubkey2.json b/test/util/data/txcreateoutpubkey2.json
index 52168a889b..78acf1658b 100644
--- a/test/util/data/txcreateoutpubkey2.json
+++ b/test/util/data/txcreateoutpubkey2.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "0 a2516e770582864a6a56ed21a102044e388c62e3",
+ "desc": "addr(bc1q5fgkuac9s2ry56jka5s6zqsyfcugcchry5cwu0)#gm7zhxq2",
"hex": "0014a2516e770582864a6a56ed21a102044e388c62e3",
"address": "bc1q5fgkuac9s2ry56jka5s6zqsyfcugcchry5cwu0",
"type": "witness_v0_keyhash"
diff --git a/test/util/data/txcreateoutpubkey3.json b/test/util/data/txcreateoutpubkey3.json
index fce210f8a3..632ed52ccf 100644
--- a/test/util/data/txcreateoutpubkey3.json
+++ b/test/util/data/txcreateoutpubkey3.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 a5ab14c9804d0d8bf02f1aea4e82780733ad0a83 OP_EQUAL",
+ "desc": "addr(3GnzN8FqgvYGYdhj8NW6UNxxVv3Uj1ApQn)#zsln680u",
"hex": "a914a5ab14c9804d0d8bf02f1aea4e82780733ad0a8387",
"address": "3GnzN8FqgvYGYdhj8NW6UNxxVv3Uj1ApQn",
"type": "scripthash"
diff --git a/test/util/data/txcreatescript1.json b/test/util/data/txcreatescript1.json
index af1c4c35e2..cdee9dbbfa 100644
--- a/test/util/data/txcreatescript1.json
+++ b/test/util/data/txcreatescript1.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DROP",
+ "desc": "raw(75)#ppey0zqj",
"hex": "75",
"type": "nonstandard"
}
diff --git a/test/util/data/txcreatescript2.json b/test/util/data/txcreatescript2.json
index 2cde70fdf7..1fbae62f4b 100644
--- a/test/util/data/txcreatescript2.json
+++ b/test/util/data/txcreatescript2.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 71ed53322d470bb96657deb786b94f97dd46fb15 OP_EQUAL",
+ "desc": "addr(3C5QarEGh9feKbDJ3QbMf2YNjnMoiPDhNp)#5mx9waq3",
"hex": "a91471ed53322d470bb96657deb786b94f97dd46fb1587",
"address": "3C5QarEGh9feKbDJ3QbMf2YNjnMoiPDhNp",
"type": "scripthash"
diff --git a/test/util/data/txcreatescript3.json b/test/util/data/txcreatescript3.json
index 7a282faf4f..502fe91692 100644
--- a/test/util/data/txcreatescript3.json
+++ b/test/util/data/txcreatescript3.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "0 0bfe935e70c321c7ca3afc75ce0d0ca2f98b5422e008bb31c00c6d7f1f1c0ad6",
+ "desc": "addr(bc1qp0lfxhnscvsu0j36l36uurgv5tuck4pzuqytkvwqp3kh78cupttqyf705v)#s4fdh9tu",
"hex": "00200bfe935e70c321c7ca3afc75ce0d0ca2f98b5422e008bb31c00c6d7f1f1c0ad6",
"address": "bc1qp0lfxhnscvsu0j36l36uurgv5tuck4pzuqytkvwqp3kh78cupttqyf705v",
"type": "witness_v0_scripthash"
diff --git a/test/util/data/txcreatescript4.json b/test/util/data/txcreatescript4.json
index 298b37bb4a..1ed89dfff2 100644
--- a/test/util/data/txcreatescript4.json
+++ b/test/util/data/txcreatescript4.json
@@ -14,6 +14,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_HASH160 6a2c482f4985f57e702f325816c90e3723ca81ae OP_EQUAL",
+ "desc": "addr(3BNQbeFeJJGMAyDxPwWPuqxPMrjsFLjk3f)#fdleltnv",
"hex": "a9146a2c482f4985f57e702f325816c90e3723ca81ae87",
"address": "3BNQbeFeJJGMAyDxPwWPuqxPMrjsFLjk3f",
"type": "scripthash"
diff --git a/test/util/data/txcreatesignv1.json b/test/util/data/txcreatesignv1.json
index ca5e003110..56ef9b195e 100644
--- a/test/util/data/txcreatesignv1.json
+++ b/test/util/data/txcreatesignv1.json
@@ -23,6 +23,7 @@
"n": 0,
"scriptPubKey": {
"asm": "OP_DUP OP_HASH160 5834479edbbe0539b31ffd3a8f8ebadc2165ed01 OP_EQUALVERIFY OP_CHECKSIG",
+ "desc": "addr(193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7)#nw04wh58",
"hex": "76a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac",
"address": "193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7",
"type": "pubkeyhash"