aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml36
-rw-r--r--REVIEWERS17
-rw-r--r--ci/README.md3
-rwxr-xr-xci/test/00_setup_env.sh7
-rwxr-xr-xci/test/00_setup_env_android.sh2
-rwxr-xr-xci/test/00_setup_env_i686_centos.sh1
-rwxr-xr-xci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh1
-rwxr-xr-xci/test/00_setup_env_native_qt5.sh1
-rwxr-xr-xci/test/01_base_install.sh33
-rwxr-xr-xci/test/04_install.sh42
-rwxr-xr-xci/test/06_script_b.sh8
-rw-r--r--ci/test_imagefile10
-rw-r--r--configure.ac15
-rwxr-xr-xcontrib/devtools/symbol-check.py22
-rwxr-xr-xcontrib/devtools/test-symbol-check.py25
-rwxr-xr-xcontrib/guix/libexec/build.sh7
-rw-r--r--contrib/guix/manifest.scm51
-rw-r--r--contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch62
-rw-r--r--contrib/guix/patches/glibc-2.24-guix-prefix.patch25
-rw-r--r--contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch100
-rw-r--r--contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch87
-rw-r--r--contrib/guix/patches/glibc-2.27-fcommon.patch (renamed from contrib/guix/patches/glibc-2.24-fcommon.patch)10
-rw-r--r--contrib/guix/patches/glibc-2.27-guix-prefix.patch3
-rw-r--r--contrib/guix/patches/glibc-ldd-x86_64.patch4
-rw-r--r--contrib/seeds/README.md2
-rwxr-xr-xcontrib/seeds/makeseeds.py4
-rw-r--r--contrib/verify-commits/allow-revsig-commits175
-rw-r--r--contrib/verify-commits/trusted-git-root2
-rw-r--r--contrib/verify-commits/trusted-keys1
-rw-r--r--contrib/verifybinaries/README.md11
-rw-r--r--depends/funcs.mk4
-rw-r--r--depends/packages/bdb.mk2
-rw-r--r--depends/packages/zeromq.mk4
-rw-r--r--doc/build-freebsd.md35
-rw-r--r--doc/dependencies.md4
-rw-r--r--doc/developer-notes.md4
-rw-r--r--doc/fuzzing.md4
-rw-r--r--doc/init.md2
-rw-r--r--doc/release-notes-27037.md5
-rwxr-xr-xshare/rpcauth/rpcauth.py14
-rw-r--r--src/Makefile.test.include4
-rw-r--r--src/Makefile.test_util.include3
-rw-r--r--src/bench/chacha20.cpp4
-rw-r--r--src/bench/nanobench.h463
-rw-r--r--src/bitcoin-cli.cpp18
-rw-r--r--src/blockencodings.h2
-rw-r--r--src/coins.cpp24
-rw-r--r--src/coins.h8
-rw-r--r--src/consensus/params.h1
-rw-r--r--src/core_io.h3
-rw-r--r--src/core_write.cpp4
-rw-r--r--src/crypto/chacha20.cpp260
-rw-r--r--src/crypto/chacha20.h66
-rw-r--r--src/crypto/chacha_poly_aead.cpp13
-rw-r--r--src/crypto/muhash.cpp2
-rw-r--r--src/fs.cpp32
-rw-r--r--src/i2p.cpp2
-rw-r--r--src/init.cpp9
-rw-r--r--src/interfaces/node.h7
-rw-r--r--src/logging/timer.h26
-rw-r--r--src/mapport.cpp10
-rw-r--r--src/net.cpp10
-rw-r--r--src/netaddress.cpp23
-rw-r--r--src/netaddress.h2
-rw-r--r--src/node/interfaces.cpp3
-rw-r--r--src/psbt.cpp12
-rw-r--r--src/psbt.h4
-rw-r--r--src/qt/bitcoingui.cpp7
-rw-r--r--src/qt/clientmodel.cpp11
-rw-r--r--src/qt/clientmodel.h7
-rw-r--r--src/qt/optionsmodel.cpp138
-rw-r--r--src/qt/optionsmodel.h15
-rw-r--r--src/qt/overviewpage.cpp16
-rw-r--r--src/qt/overviewpage.h1
-rw-r--r--src/qt/rpcconsole.cpp4
-rw-r--r--src/qt/sendcoinsdialog.cpp2
-rw-r--r--src/qt/transactionfilterproxy.cpp17
-rw-r--r--src/qt/transactionfilterproxy.h6
-rw-r--r--src/qt/transactiontablemodel.cpp5
-rw-r--r--src/qt/walletmodel.cpp7
-rw-r--r--src/qt/walletmodel.h18
-rw-r--r--src/random.cpp20
-rw-r--r--src/random.h21
-rw-r--r--src/rpc/net.cpp26
-rw-r--r--src/rpc/rawtransaction.cpp4
-rw-r--r--src/rpc/rawtransaction_util.cpp43
-rw-r--r--src/rpc/rawtransaction_util.h7
-rw-r--r--src/rpc/request.cpp2
-rw-r--r--src/rpc/server_util.cpp14
-rw-r--r--src/rpc/server_util.h3
-rw-r--r--src/script/descriptor.cpp2
-rw-r--r--src/script/descriptor.h6
-rw-r--r--src/script/miniscript.cpp76
-rw-r--r--src/script/miniscript.h334
-rw-r--r--src/script/sign.cpp118
-rw-r--r--src/script/sign.h25
-rw-r--r--src/streams.h8
-rw-r--r--src/test/coins_tests.cpp54
-rw-r--r--src/test/crypto_tests.cpp163
-rw-r--r--src/test/dbwrapper_tests.cpp43
-rw-r--r--src/test/descriptor_tests.cpp69
-rw-r--r--src/test/fuzz/coins_view.cpp2
-rw-r--r--src/test/fuzz/coinscache_sim.cpp478
-rw-r--r--src/test/fuzz/crypto_chacha20.cpp118
-rw-r--r--src/test/fuzz/crypto_diff_fuzz_chacha20.cpp33
-rw-r--r--src/test/fuzz/miniscript.cpp803
-rw-r--r--src/test/fuzz/script_sign.cpp6
-rw-r--r--src/test/logging_tests.cpp15
-rw-r--r--src/test/miniscript_tests.cpp232
-rw-r--r--src/test/multisig_tests.cpp3
-rw-r--r--src/test/orphanage_tests.cpp6
-rw-r--r--src/test/script_p2sh_tests.cpp15
-rw-r--r--src/test/script_tests.cpp15
-rw-r--r--src/test/serialize_tests.cpp3
-rw-r--r--src/test/transaction_tests.cpp51
-rw-r--r--src/test/util/xoroshiro128plusplus.h71
-rw-r--r--src/test/xoroshiro128plusplus_tests.cpp29
-rw-r--r--src/uint256.cpp15
-rw-r--r--src/uint256.h80
-rw-r--r--src/util/hasher.cpp5
-rw-r--r--src/util/hasher.h2
-rw-r--r--src/util/system.cpp5
-rw-r--r--src/validation.cpp40
-rw-r--r--src/wallet/feebumper.cpp21
-rw-r--r--src/wallet/feebumper.h3
-rw-r--r--src/wallet/init.cpp3
-rw-r--r--src/wallet/interfaces.cpp3
-rw-r--r--src/wallet/rpc/spend.cpp69
-rw-r--r--src/wallet/spend.cpp4
-rw-r--r--src/wallet/test/coinselector_tests.cpp11
-rw-r--r--src/wallet/wallet.cpp16
-rw-r--r--test/functional/data/rpc_decodescript.json2
-rwxr-xr-xtest/functional/feature_block.py50
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py3
-rwxr-xr-xtest/functional/feature_posix_fs_permissions.py43
-rwxr-xr-xtest/functional/mempool_updatefromblock.py57
-rwxr-xr-xtest/functional/p2p_invalid_messages.py34
-rwxr-xr-xtest/functional/rpc_decodescript.py12
-rwxr-xr-xtest/functional/test_framework/test_framework.py5
-rwxr-xr-xtest/functional/test_runner.py1
-rwxr-xr-xtest/functional/wallet_backwards_compatibility.py4
-rwxr-xr-xtest/functional/wallet_bumpfee.py22
-rwxr-xr-xtest/functional/wallet_groups.py5
-rwxr-xr-xtest/functional/wallet_migration.py4
-rwxr-xr-xtest/functional/wallet_miniscript.py211
-rwxr-xr-xtest/functional/wallet_pruning.py11
-rwxr-xr-xtest/get_previous_releases.py9
-rwxr-xr-xtest/lint/lint-locale-dependence.py3
-rw-r--r--test/sanitizer_suppressions/ubsan2
-rwxr-xr-xtest/util/rpcauth-test.py11
150 files changed, 4271 insertions, 1527 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 64fcd82c2b..673bc427e1 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -39,7 +39,7 @@ main_template: &MAIN_TEMPLATE
ci_script:
- ./ci/test_run_all.sh
-global_task_template: &GLOBAL_TASK_TEMPLATE
+container_depends_template: &CONTAINER_DEPENDS_TEMPLATE
<< : *BASE_TEMPLATE
container:
# https://cirrus-ci.org/faq/#are-there-any-limits
@@ -49,15 +49,10 @@ global_task_template: &GLOBAL_TASK_TEMPLATE
memory: 8G # Set to 8GB to avoid OOM. https://cirrus-ci.org/guide/linux/#linux-containers
depends_built_cache:
folder: "depends/built"
- fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-list -1 HEAD ./depends)
- << : *MAIN_TEMPLATE
+ fingerprint_script: echo $CIRRUS_TASK_NAME $(git rev-parse HEAD:depends)
-macos_native_task_template: &MACOS_NATIVE_TASK_TEMPLATE
- << : *BASE_TEMPLATE
- check_clang_script:
- - clang --version
- brew_install_script:
- - brew install boost libevent qt@5 miniupnpc libnatpmp ccache zeromq qrencode libtool automake gnu-getopt
+global_task_template: &GLOBAL_TASK_TEMPLATE
+ << : *CONTAINER_DEPENDS_TEMPLATE
<< : *MAIN_TEMPLATE
compute_credits_template: &CREDITS_TEMPLATE
@@ -308,13 +303,13 @@ task:
task:
name: 'macOS 10.15 [gui, no tests] [focal]'
- << : *BASE_TEMPLATE
+ << : *CONTAINER_DEPENDS_TEMPLATE
+ container:
+ image: ubuntu:focal
macos_sdk_cache:
folder: "depends/SDKs/$MACOS_SDK"
fingerprint_key: "$MACOS_SDK"
<< : *MAIN_TEMPLATE
- container:
- image: ubuntu:focal
env:
MACOS_SDK: "Xcode-12.2-12B45b-extracted-SDK-with-libcxx-headers"
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
@@ -325,7 +320,12 @@ task:
macos_instance:
# Use latest image, but hardcode version to avoid silent upgrades (and breaks)
image: ghcr.io/cirruslabs/macos-ventura-xcode:14.1 # https://cirrus-ci.org/guide/macOS
- << : *MACOS_NATIVE_TASK_TEMPLATE
+ << : *BASE_TEMPLATE
+ check_clang_script:
+ - clang --version
+ brew_install_script:
+ - brew install boost libevent qt@5 miniupnpc libnatpmp ccache zeromq qrencode libtool automake gnu-getopt
+ << : *MAIN_TEMPLATE
env:
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
CI_USE_APT_INSTALL: "no"
@@ -333,17 +333,17 @@ task:
FILE_ENV: "./ci/test/00_setup_env_mac_native_arm64.sh"
task:
- name: 'ARM64 Android APK [focal]'
- << : *BASE_TEMPLATE
+ name: 'ARM64 Android APK [jammy]'
+ << : *CONTAINER_DEPENDS_TEMPLATE
+ container:
+ image: ubuntu:jammy
android_sdk_cache:
folder: "depends/SDKs/android"
fingerprint_key: "ANDROID_API_LEVEL=28 ANDROID_BUILD_TOOLS_VERSION=28.0.3 ANDROID_NDK_VERSION=23.2.8568313"
depends_sources_cache:
folder: "depends/sources"
- fingerprint_script: git rev-list -1 HEAD ./depends
+ fingerprint_script: git rev-parse HEAD:depends/packages
<< : *MAIN_TEMPLATE
- container:
- image: ubuntu:focal
env:
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
FILE_ENV: "./ci/test/00_setup_env_android.sh"
diff --git a/REVIEWERS b/REVIEWERS
deleted file mode 100644
index cb1bafa496..0000000000
--- a/REVIEWERS
+++ /dev/null
@@ -1,17 +0,0 @@
-# ==============================================================================
-# Bitcoin Core REVIEWERS
-# ==============================================================================
-
-# Configuration of automated review requests for the bitcoin/bitcoin repo
-# via DrahtBot.
-
-# Order is not important; if a modified file or directory matches a fnmatch,
-# the reviewer will be mentioned in a PR comment requesting a review.
-
-# Regular contributors are free to add their names to specific directories or
-# files provided that they are willing to provide a review.
-
-# Absence from this list should not be interpreted as a discouragement to
-# review a pull request. Peer review is always welcome and is a critical
-# component of the progress of the codebase. Information on peer review
-# guidelines can be found in the CONTRIBUTING.md doc.
diff --git a/ci/README.md b/ci/README.md
index 3c5f04c39e..de798607df 100644
--- a/ci/README.md
+++ b/ci/README.md
@@ -8,8 +8,7 @@ Be aware that the tests will be built and run in-place, so please run at your ow
If the repository is not a fresh git clone, you might have to clean files from previous builds or test runs first.
The ci needs to perform various sysadmin tasks such as installing packages or writing to the user's home directory.
-While most of the actions are done inside a docker container, this is not possible for all. Thus, cache directories,
-such as the depends cache, previous release binaries, or ccache, are mounted as read-write into the docker container. While it should be fine to run
+While it should be fine to run
the ci system locally on you development box, the ci scripts can generally be assumed to have received less review and
testing compared to other parts of the codebase. If you want to keep the work tree clean, you might want to run the ci
system in a virtual machine with a Linux operating system of your choice.
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index 07c20f632d..ab830b8ec0 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -8,11 +8,10 @@ export LC_ALL=C.UTF-8
# The root dir.
# The ci system copies this folder.
-# This is where the depends build is done.
BASE_ROOT_DIR=$( cd "$( dirname "${BASH_SOURCE[0]}" )"/../../ >/dev/null 2>&1 && pwd )
export BASE_ROOT_DIR
# The depends dir.
-# This folder exists on the ci host and ci guest. Changes are propagated back and forth.
+# This folder exists only on the ci guest, and on the ci host as a volume.
export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends}
# A folder for the ci system to put temporary files (ccache, datadirs for tests, ...)
# This folder only exists on the ci host.
@@ -58,12 +57,14 @@ export CCACHE_SIZE=${CCACHE_SIZE:-100M}
export CCACHE_TEMPDIR=${CCACHE_TEMPDIR:-/tmp/.ccache-temp}
export CCACHE_COMPRESS=${CCACHE_COMPRESS:-1}
# The cache dir.
-# This folder exists on the ci host and ci guest. Changes are propagated back and forth.
+# This folder exists only on the ci guest, and on the ci host as a volume.
export CCACHE_DIR=${CCACHE_DIR:-$BASE_SCRATCH_DIR/.ccache}
# Folder where the build result is put (bin and lib).
export BASE_OUTDIR=${BASE_OUTDIR:-$BASE_SCRATCH_DIR/out/$HOST}
# Folder where the build is done (dist and out-of-tree build).
export BASE_BUILD_DIR=${BASE_BUILD_DIR:-$BASE_SCRATCH_DIR/build}
+# The folder for previous release binaries.
+# This folder exists only on the ci guest, and on the ci host as a volume.
export PREVIOUS_RELEASES_DIR=${PREVIOUS_RELEASES_DIR:-$BASE_ROOT_DIR/releases/$HOST}
export SDK_URL=${SDK_URL:-https://bitcoincore.org/depends-sources/sdks}
export CI_BASE_PACKAGES=${CI_BASE_PACKAGES:-build-essential libtool autotools-dev automake pkg-config bsdmainutils curl ca-certificates ccache python3 rsync git procps bison}
diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh
index e1830b4f49..1834bd0bc4 100755
--- a/ci/test/00_setup_env_android.sh
+++ b/ci/test/00_setup_env_android.sh
@@ -9,7 +9,7 @@ export LC_ALL=C.UTF-8
export HOST=aarch64-linux-android
export PACKAGES="unzip openjdk-8-jdk gradle"
export CONTAINER_NAME=ci_android
-export CI_IMAGE_NAME_TAG="ubuntu:focal"
+export CI_IMAGE_NAME_TAG="ubuntu:jammy"
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index e8f963c8a9..8a931d44e5 100755
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -12,6 +12,7 @@ export CI_IMAGE_NAME_TAG=quay.io/centos/centos:stream8
export CI_BASE_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python38 python38-pip which patch lbzip2 xz procps-ng dash rsync coreutils bison"
export PIP_PACKAGES="pyzmq"
export GOAL="install"
+export NO_WERROR=1 # GCC 8
export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports"
export CONFIG_SHELL="/bin/dash"
export TEST_RUNNER_ENV="LC_ALL=en_US.UTF-8"
diff --git a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
index 08bb5d1eab..06bc2401c5 100755
--- a/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
+++ b/ci/test/00_setup_env_native_nowallet_libbitcoinkernel.sh
@@ -13,4 +13,5 @@ export PACKAGES="-t buster-backports python3-zmq clang-8 llvm-8 libc++abi-8-dev
export APPEND_APT_SOURCES_LIST="deb http://deb.debian.org/debian buster-backports main"
export DEP_OPTS="NO_WALLET=1 CC=clang-8 CXX='clang++-8 -stdlib=libc++'"
export GOAL="install"
+export NO_WERROR=1
export BITCOIN_CONFIG="--enable-reduce-exports CC=clang-8 CXX='clang++-8 -stdlib=libc++' --enable-experimental-util-chainstate --with-experimental-kernel-lib --enable-shared"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 3f39185ae8..5cc0addd33 100755
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -15,6 +15,7 @@ export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude fe
export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
+export NO_WERROR=1
export DOWNLOAD_PREVIOUS_RELEASES="true"
export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-reduce-exports \
--enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" CC=gcc-8 CXX=g++-8"
diff --git a/ci/test/01_base_install.sh b/ci/test/01_base_install.sh
new file mode 100755
index 0000000000..c2469d7ca9
--- /dev/null
+++ b/ci/test/01_base_install.sh
@@ -0,0 +1,33 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2018-2022 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+CI_EXEC_ROOT () { bash -c "$*"; }
+export -f CI_EXEC_ROOT
+
+if [ -n "$DPKG_ADD_ARCH" ]; then
+ CI_EXEC_ROOT dpkg --add-architecture "$DPKG_ADD_ARCH"
+fi
+
+if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then
+ ${CI_RETRY_EXE} CI_EXEC_ROOT dnf -y install epel-release
+ ${CI_RETRY_EXE} CI_EXEC_ROOT dnf -y --allowerasing install "$CI_BASE_PACKAGES" "$PACKAGES"
+elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
+ if [[ "${ADD_UNTRUSTED_BPFCC_PPA}" == "true" ]]; then
+ # Ubuntu 22.04 LTS and Debian 11 both have an outdated bpfcc-tools packages.
+ # The iovisor PPA is outdated as well. The next Ubuntu and Debian releases will contain updated
+ # packages. Meanwhile, use an untrusted PPA to install an up-to-date version of the bpfcc-tools
+ # package.
+ # TODO: drop this once we can use newer images in GCE
+ CI_EXEC_ROOT add-apt-repository ppa:hadret/bpfcc
+ fi
+ if [[ -n "${APPEND_APT_SOURCES_LIST}" ]]; then
+ CI_EXEC_ROOT echo "${APPEND_APT_SOURCES_LIST}" \>\> /etc/apt/sources.list
+ fi
+ ${CI_RETRY_EXE} CI_EXEC_ROOT apt-get update
+ ${CI_RETRY_EXE} CI_EXEC_ROOT apt-get install --no-install-recommends --no-upgrade -y "$PACKAGES" "$CI_BASE_PACKAGES"
+fi
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index 53fe6d961f..62bc3a963d 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -33,7 +33,15 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
# the name isn't important, so long as we use the same UID
LOCAL_USER=nonroot
- ${CI_RETRY_EXE} docker pull "$CI_IMAGE_NAME_TAG"
+ DOCKER_BUILDKIT=1 ${CI_RETRY_EXE} docker build \
+ --file "${BASE_ROOT_DIR}/ci/test_imagefile" \
+ --build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \
+ --build-arg "FILE_ENV=${FILE_ENV}" \
+ --tag="${CONTAINER_NAME}" \
+ "${BASE_ROOT_DIR}"
+ docker volume create "${CONTAINER_NAME}_ccache" || true
+ docker volume create "${CONTAINER_NAME}_depends" || true
+ docker volume create "${CONTAINER_NAME}_previous_releases" || true
if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then
echo "Restart docker before run to stop and clear all containers started with --rm"
@@ -43,13 +51,13 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
# shellcheck disable=SC2086
CI_CONTAINER_ID=$(docker run $CI_CONTAINER_CAP --rm --interactive --detach --tty \
--mount type=bind,src=$BASE_ROOT_DIR,dst=/ro_base,readonly \
- --mount type=bind,src=$CCACHE_DIR,dst=$CCACHE_DIR \
- --mount type=bind,src=$DEPENDS_DIR,dst=$DEPENDS_DIR \
- --mount type=bind,src=$PREVIOUS_RELEASES_DIR,dst=$PREVIOUS_RELEASES_DIR \
+ --mount "type=volume,src=${CONTAINER_NAME}_ccache,dst=$CCACHE_DIR" \
+ --mount "type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR" \
+ --mount "type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR" \
-w $BASE_ROOT_DIR \
--env-file /tmp/env \
--name $CONTAINER_NAME \
- $CI_IMAGE_NAME_TAG)
+ $CONTAINER_NAME)
export CI_CONTAINER_ID
# Create a non-root user inside the container which matches the local user.
@@ -63,6 +71,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
export CI_EXEC_CMD_PREFIX="docker exec -u $LOCAL_UID $CI_CONTAINER_ID"
else
echo "Running on host system without docker wrapper"
+ "${BASE_ROOT_DIR}/ci/test/01_base_install.sh"
fi
CI_EXEC () {
@@ -76,29 +85,6 @@ export -f CI_EXEC_ROOT
CI_EXEC mkdir -p "${BINS_SCRATCH_DIR}"
-if [ -n "$DPKG_ADD_ARCH" ]; then
- CI_EXEC_ROOT dpkg --add-architecture "$DPKG_ADD_ARCH"
-fi
-
-if [[ $CI_IMAGE_NAME_TAG == *centos* ]]; then
- ${CI_RETRY_EXE} CI_EXEC_ROOT dnf -y install epel-release
- ${CI_RETRY_EXE} CI_EXEC_ROOT dnf -y --allowerasing install "$CI_BASE_PACKAGES" "$PACKAGES"
-elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
- if [[ "${ADD_UNTRUSTED_BPFCC_PPA}" == "true" ]]; then
- # Ubuntu 22.04 LTS and Debian 11 both have an outdated bpfcc-tools packages.
- # The iovisor PPA is outdated as well. The next Ubuntu and Debian releases will contain updated
- # packages. Meanwhile, use an untrusted PPA to install an up-to-date version of the bpfcc-tools
- # package.
- # TODO: drop this once we can use newer images in GCE
- CI_EXEC_ROOT add-apt-repository ppa:hadret/bpfcc
- fi
- if [[ -n "${APPEND_APT_SOURCES_LIST}" ]]; then
- CI_EXEC_ROOT echo "${APPEND_APT_SOURCES_LIST}" \>\> /etc/apt/sources.list
- fi
- ${CI_RETRY_EXE} CI_EXEC_ROOT apt-get update
- ${CI_RETRY_EXE} CI_EXEC_ROOT apt-get install --no-install-recommends --no-upgrade -y "$PACKAGES" "$CI_BASE_PACKAGES"
-fi
-
if [ -n "$PIP_PACKAGES" ]; then
if [ "$CI_OS_NAME" == "macos" ]; then
sudo -H pip3 install --upgrade pip
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index 6c247abebe..075524741a 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -60,6 +60,7 @@ if [ "${RUN_TIDY}" = "true" ]; then
" src/rpc/signmessage.cpp"\
" src/test/fuzz/txorphan.cpp"\
" src/test/fuzz/util/"\
+ " src/uint256.cpp"\
" src/util/bip32.cpp"\
" src/util/bytevectorhash.cpp"\
" src/util/check.cpp"\
@@ -75,7 +76,12 @@ if [ "${RUN_TIDY}" = "true" ]; then
" src/util/syserror.cpp"\
" src/util/threadinterrupt.cpp"\
" src/zmq"\
- " -p . ${MAKEJOBS} -- -Xiwyu --cxx17ns -Xiwyu --mapping_file=${BASE_BUILD_DIR}/bitcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp"
+ " -p . ${MAKEJOBS}"\
+ " -- -Xiwyu --cxx17ns -Xiwyu --mapping_file=${BASE_BUILD_DIR}/bitcoin-$HOST/contrib/devtools/iwyu/bitcoin.core.imp"\
+ " |& tee /tmp/iwyu_ci.out"
+ export P_CI_DIR="${BASE_ROOT_DIR}/src"
+ CI_EXEC "python3 ${DIR_IWYU}/include-what-you-use/fix_includes.py --nosafe_headers < /tmp/iwyu_ci.out"
+ CI_EXEC "git --no-pager diff"
fi
if [ "$RUN_SECURITY_TESTS" = "true" ]; then
diff --git a/ci/test_imagefile b/ci/test_imagefile
new file mode 100644
index 0000000000..4854708d1a
--- /dev/null
+++ b/ci/test_imagefile
@@ -0,0 +1,10 @@
+ARG CI_IMAGE_NAME_TAG
+FROM ${CI_IMAGE_NAME_TAG}
+
+ARG FILE_ENV
+ENV FILE_ENV=${FILE_ENV}
+
+COPY ./ci/retry/retry /usr/bin/retry
+COPY ./ci/test/00_setup_env.sh ./${FILE_ENV} ./ci/test/01_base_install.sh /ci_base_install/ci/test/
+
+RUN ["bash", "-c", "cd /ci_base_install/ && set -o errexit && source ./ci/test/00_setup_env.sh && ./ci/test/01_base_install.sh"]
diff --git a/configure.ac b/configure.ac
index aa1a45c527..0f809169fc 100644
--- a/configure.ac
+++ b/configure.ac
@@ -935,7 +935,9 @@ if test "$TARGET_OS" != "windows"; then
AX_CHECK_COMPILE_FLAG([-fPIC], [PIC_FLAGS="-fPIC"])
fi
-dnl All versions of gcc that we commonly use for building are subject to bug
+dnl Versions of gcc prior to 12.1 (commit
+dnl https://github.com/gcc-mirror/gcc/commit/551aa75778a4c5165d9533cd447c8fc822f583e1)
+dnl are subject to a bug, see the gccbug_90348 test case and
dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90348. To work around that, set
dnl -fstack-reuse=none for all gcc builds. (Only gcc understands this flag)
AX_CHECK_COMPILE_FLAG([-fstack-reuse=none], [HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-reuse=none"])
@@ -1416,14 +1418,15 @@ if test "$use_upnp" != "no"; then
[AC_CHECK_LIB([miniupnpc], [upnpDiscover], [MINIUPNPC_LIBS="$MINIUPNPC_LIBS -lminiupnpc"], [have_miniupnpc=no], [$MINIUPNPC_LIBS])],
[have_miniupnpc=no]
)
- dnl The minimum supported miniUPnPc API version is set to 10. This keeps compatibility
- dnl with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages.
+
+ dnl The minimum supported miniUPnPc API version is set to 17. This excludes
+ dnl versions with known vulnerabilities.
if test "$have_miniupnpc" != "no"; then
AC_MSG_CHECKING([whether miniUPnPc API version is supported])
AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[
@%:@include <miniupnpc/miniupnpc.h>
]], [[
- #if MINIUPNPC_API_VERSION >= 10
+ #if MINIUPNPC_API_VERSION >= 17
// Everything is okay
#else
# error miniUPnPc API version is too old
@@ -1432,7 +1435,7 @@ if test "$use_upnp" != "no"; then
AC_MSG_RESULT([yes])
],[
AC_MSG_RESULT([no])
- AC_MSG_WARN([miniUPnPc API version < 10 is unsupported, disabling UPnP support.])
+ AC_MSG_WARN([miniUPnPc API version < 17 is unsupported, disabling UPnP support.])
have_miniupnpc=no
])
fi
@@ -1449,7 +1452,7 @@ if test "$use_natpmp" != "no"; then
CPPFLAGS="$TEMP_CPPFLAGS"
fi
-if test "$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_util$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench" = "nononononononono"; then
+if test "$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_util$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench$enable_fuzz_binary" = "nonononononononono"; then
use_boost=no
else
use_boost=yes
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 4b1cceb57c..f26236dd59 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -15,19 +15,19 @@ from typing import List, Dict
import lief #type:ignore
-# Debian 9 (Stretch) EOL: 2022. https://wiki.debian.org/DebianReleases#Production_Releases
+# Debian 10 (Buster) EOL: 2024. https://wiki.debian.org/LTS
#
-# - g++ version 6.3.0 (https://packages.debian.org/search?suite=stretch&arch=any&searchon=names&keywords=g%2B%2B)
-# - libc version 2.24 (https://packages.debian.org/search?suite=stretch&arch=any&searchon=names&keywords=libc6)
+# - libgcc version 8.3.0 (https://packages.debian.org/search?suite=buster&arch=any&searchon=names&keywords=libgcc1)
+# - libc version 2.28 (https://packages.debian.org/search?suite=buster&arch=any&searchon=names&keywords=libc6)
#
-# Ubuntu 16.04 (Xenial) EOL: 2026. https://wiki.ubuntu.com/Releases
+# Ubuntu 18.04 (Bionic) EOL: 2028. https://wiki.ubuntu.com/ReleaseTeam
#
-# - g++ version 5.3.1
-# - libc version 2.23
+# - libgcc version 8.4.0 (https://packages.ubuntu.com/bionic/libgcc1)
+# - libc version 2.27 (https://packages.ubuntu.com/bionic/libc6)
#
# CentOS Stream 8 EOL: 2024. https://wiki.centos.org/About/Product
#
-# - g++ version 8.5.0 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
+# - libgcc version 8.5.0 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
# - libc version 2.28 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
#
# See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info.
@@ -35,10 +35,10 @@ import lief #type:ignore
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': {
- lief.ELF.ARCH.x86_64: (2,18),
- lief.ELF.ARCH.ARM: (2,18),
- lief.ELF.ARCH.AARCH64:(2,18),
- lief.ELF.ARCH.PPC64: (2,18),
+ lief.ELF.ARCH.x86_64: (2,27),
+ lief.ELF.ARCH.ARM: (2,27),
+ lief.ELF.ARCH.AARCH64:(2,27),
+ lief.ELF.ARCH.PPC64: (2,27),
lief.ELF.ARCH.RISCV: (2,27),
},
'LIBATOMIC': (1,0),
diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py
index de73b02090..e304880140 100755
--- a/contrib/devtools/test-symbol-check.py
+++ b/contrib/devtools/test-symbol-check.py
@@ -38,31 +38,6 @@ class TestSymbolChecks(unittest.TestCase):
executable = 'test1'
cc = determine_wellknown_cmd('CC', 'gcc')
- # there's no way to do this test for RISC-V at the moment; we build for
- # RISC-V in a glibc 2.27 environment and we allow all symbols from 2.27.
- if 'riscv' in get_machine(cc):
- self.skipTest("test not available for RISC-V")
-
- # nextup was introduced in GLIBC 2.24, so is newer than our supported
- # glibc (2.18), and available in our release build environment (2.24).
- with open(source, 'w', encoding="utf8") as f:
- f.write('''
- #define _GNU_SOURCE
- #include <math.h>
-
- double nextup(double x);
-
- int main()
- {
- nextup(3.14);
- return 0;
- }
- ''')
-
- self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']),
- (1, executable + ': symbol nextup from unsupported version GLIBC_2.24(3)\n' +
- executable + ': failed IMPORTED_SYMBOLS'))
-
# -lutil is part of the libc6 package so a safe bet that it's installed
# it's also out of context enough that it's unlikely to ever become a real dependency
source = 'test2.c'
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index f2be3677eb..08a6c72a95 100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -238,13 +238,6 @@ case "$HOST" in
*mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;;
esac
-# Using --no-tls-get-addr-optimize retains compatibility with glibc 2.18, by
-# avoiding a PowerPC64 optimisation available in glibc 2.22 and later.
-# https://sourceware.org/binutils/docs-2.35/ld/PowerPC64-ELF64.html
-case "$HOST" in
- *powerpc64*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,--no-tls-get-addr-optimize" ;;
-esac
-
# Make $HOST-specific native binaries from depends available in $PATH
export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}"
mkdir -p "$DISTSRC"
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 8e5c89cc5e..379ad898c4 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -147,7 +147,7 @@ chain for " target " development."))
#:key
(base-gcc-for-libc base-gcc)
(base-kernel-headers base-linux-kernel-headers)
- (base-libc (make-glibc-with-bind-now (make-glibc-without-werror glibc-2.24)))
+ (base-libc (hardened-glibc (make-glibc-without-werror glibc-2.27)))
(base-gcc (make-gcc-rpath-link (hardened-gcc base-gcc))))
"Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values
desirable for building Bitcoin Core release binaries."
@@ -537,33 +537,14 @@ inspecting signatures in Mach-O binaries.")
(define (make-glibc-without-werror glibc)
(package-with-extra-configure-variable glibc "enable_werror" "no"))
-(define (make-glibc-with-stack-protector glibc)
- (package-with-extra-configure-variable glibc "--enable-stack-protector" "all"))
-
-(define (make-glibc-with-bind-now glibc)
- (package-with-extra-configure-variable glibc "--enable-bind-now" "yes"))
-
-(define-public glibc-2.24
- (package
- (inherit glibc-2.31)
- (version "2.24")
- (source (origin
- (method git-fetch)
- (uri (git-reference
- (url "https://sourceware.org/git/glibc.git")
- (commit "0d7f1ed30969886c8dde62fbf7d2c79967d4bace")))
- (file-name (git-file-name "glibc" "0d7f1ed30969886c8dde62fbf7d2c79967d4bace"))
- (sha256
- (base32
- "0g5hryia5v1k0qx97qffgwzrz4lr4jw3s5kj04yllhswsxyjbic3"))
- (patches (search-our-patches "glibc-ldd-x86_64.patch"
- "glibc-versioned-locpath.patch"
- "glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch"
- "glibc-2.24-no-build-time-cxx-header-run.patch"
- "glibc-2.24-fcommon.patch"
- "glibc-2.24-guix-prefix.patch"))))))
+;; https://www.gnu.org/software/libc/manual/html_node/Configuring-and-compiling.html
+(define (hardened-glibc glibc)
+ (package-with-extra-configure-variable (
+ package-with-extra-configure-variable glibc
+ "--enable-stack-protector" "all")
+ "--enable-bind-now" "yes"))
-(define-public glibc-2.27/bitcoin-patched
+(define-public glibc-2.27
(package
(inherit glibc-2.31)
(version "2.27")
@@ -571,14 +552,15 @@ inspecting signatures in Mach-O binaries.")
(method git-fetch)
(uri (git-reference
(url "https://sourceware.org/git/glibc.git")
- (commit "23158b08a0908f381459f273a984c6fd328363cb")))
- (file-name (git-file-name "glibc" "23158b08a0908f381459f273a984c6fd328363cb"))
+ (commit "73886db6218e613bd6d4edf529f11e008a6c2fa6")))
+ (file-name (git-file-name "glibc" "73886db6218e613bd6d4edf529f11e008a6c2fa6"))
(sha256
(base32
- "1b2n1gxv9f4fd5yy68qjbnarhf8mf4vmlxk10i3328c1w5pmp0ca"))
+ "0azpb9cvnbv25zg8019rqz48h8i2257ngyjg566dlnp74ivrs9vq"))
(patches (search-our-patches "glibc-ldd-x86_64.patch"
+ "glibc-versioned-locpath.patch"
"glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch"
- "glibc-2.27-dont-redefine-nss-database.patch"
+ "glibc-2.27-fcommon.patch"
"glibc-2.27-guix-prefix.patch"))))))
(packages->manifest
@@ -627,12 +609,7 @@ inspecting signatures in Mach-O binaries.")
(make-nsis-for-gcc-10 nsis-x86_64)
osslsigncode))
((string-contains target "-linux-")
- (list (cond ((string-contains target "riscv64-")
- (make-bitcoin-cross-toolchain target
- #:base-libc (make-glibc-with-stack-protector
- (make-glibc-with-bind-now (make-glibc-without-werror glibc-2.27/bitcoin-patched)))))
- (else
- (make-bitcoin-cross-toolchain target)))))
+ (list (make-bitcoin-cross-toolchain target)))
((string-contains target "darwin")
(list clang-toolchain-10 binutils cmake xorriso python-signapple))
(else '())))))
diff --git a/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch b/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch
deleted file mode 100644
index 5c4d0c6ebe..0000000000
--- a/contrib/guix/patches/glibc-2.24-elfm-loadaddr-dynamic-rewrite.patch
+++ /dev/null
@@ -1,62 +0,0 @@
-https://sourceware.org/git/?p=glibc.git;a=commit;h=a68ba2f3cd3cbe32c1f31e13c20ed13487727b32
-
-commit 6b02af31e9a721bb15a11380cd22d53b621711f8
-Author: Szabolcs Nagy <szabolcs.nagy@arm.com>
-Date: Wed Oct 18 17:26:23 2017 +0100
-
- [AARCH64] Rewrite elf_machine_load_address using _DYNAMIC symbol
-
- This patch rewrites aarch64 elf_machine_load_address to use special _DYNAMIC
- symbol instead of _dl_start.
-
- The static address of _DYNAMIC symbol is stored in the first GOT entry.
- Here is the change which makes this solution work (part of binutils 2.24):
- https://sourceware.org/ml/binutils/2013-06/msg00248.html
-
- i386, x86_64 targets use the same method to do this as well.
-
- The original implementation relies on a trick that R_AARCH64_ABS32 relocation
- being resolved at link time and the static address fits in the 32bits.
- However, in LP64, normally, the address is defined to be 64 bit.
-
- Here is the C version one which should be portable in all cases.
-
- * sysdeps/aarch64/dl-machine.h (elf_machine_load_address): Use
- _DYNAMIC symbol to calculate load address.
-
-diff --git a/sysdeps/aarch64/dl-machine.h b/sysdeps/aarch64/dl-machine.h
-index e86d8b5b63..5a5b8a5de5 100644
---- a/sysdeps/aarch64/dl-machine.h
-+++ b/sysdeps/aarch64/dl-machine.h
-@@ -49,26 +49,11 @@ elf_machine_load_address (void)
- /* To figure out the load address we use the definition that for any symbol:
- dynamic_addr(symbol) = static_addr(symbol) + load_addr
-
-- The choice of symbol is arbitrary. The static address we obtain
-- by constructing a non GOT reference to the symbol, the dynamic
-- address of the symbol we compute using adrp/add to compute the
-- symbol's address relative to the PC.
-- This depends on 32bit relocations being resolved at link time
-- and that the static address fits in the 32bits. */
--
-- ElfW(Addr) static_addr;
-- ElfW(Addr) dynamic_addr;
--
-- asm (" \n"
--" adrp %1, _dl_start; \n"
--" add %1, %1, #:lo12:_dl_start \n"
--" ldr %w0, 1f \n"
--" b 2f \n"
--"1: \n"
--" .word _dl_start \n"
--"2: \n"
-- : "=r" (static_addr), "=r" (dynamic_addr));
-- return dynamic_addr - static_addr;
-+ _DYNAMIC sysmbol is used here as its link-time address stored in
-+ the special unrelocated first GOT entry. */
-+
-+ extern ElfW(Dyn) _DYNAMIC[] attribute_hidden;
-+ return (ElfW(Addr)) &_DYNAMIC - elf_machine_dynamic ();
- }
-
- /* Set up the loaded object described by L so its unrelocated PLT
diff --git a/contrib/guix/patches/glibc-2.24-guix-prefix.patch b/contrib/guix/patches/glibc-2.24-guix-prefix.patch
deleted file mode 100644
index 875e8cd611..0000000000
--- a/contrib/guix/patches/glibc-2.24-guix-prefix.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-Without ffile-prefix-map, the debug symbols will contain paths for the
-guix store which will include the hashes of each package. However, the
-hash for the same package will differ when on different architectures.
-In order to be reproducible regardless of the architecture used to build
-the package, map all guix store prefixes to something fixed, e.g. /usr.
-
-We might be able to drop this in favour of using --with-nonshared-cflags
-when we being using newer versions of glibc.
-
---- a/Makeconfig
-+++ b/Makeconfig
-@@ -950,6 +950,10 @@ object-suffixes-for-libc += .oS
- # shared objects. We don't want to use CFLAGS-os because users may, for
- # example, make that processor-specific.
- CFLAGS-.oS = $(CFLAGS-.o) $(PIC-ccflag)
-+
-+# Map Guix store paths to /usr
-+CFLAGS-.oS += `find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;`
-+
- CPPFLAGS-.oS = $(CPPFLAGS-.o) -DPIC -DLIBC_NONSHARED=1
- libtype.oS = lib%_nonshared.a
- endif
---
-2.35.1
-
diff --git a/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch b/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch
deleted file mode 100644
index 11fe7fdc99..0000000000
--- a/contrib/guix/patches/glibc-2.24-no-build-time-cxx-header-run.patch
+++ /dev/null
@@ -1,100 +0,0 @@
-https://sourceware.org/git/?p=glibc.git;a=commit;h=fc3e1337be1c6935ab58bd13520f97a535cf70cc
-
-commit dc23a45db566095e83ff0b7a57afc87fb5ca89a1
-Author: Florian Weimer <fweimer@redhat.com>
-Date: Wed Sep 21 10:45:32 2016 +0200
-
- Avoid running $(CXX) during build to obtain header file paths
-
- This reduces the build time somewhat and is particularly noticeable
- during rebuilds with few code changes.
-
-diff --git a/Makerules b/Makerules
-index 7e4077ee50..c338850de5 100644
---- a/Makerules
-+++ b/Makerules
-@@ -121,14 +121,10 @@ ifneq (,$(CXX))
- # will be used instead of /usr/include/stdlib.h and /usr/include/math.h.
- before-compile := $(common-objpfx)cstdlib $(common-objpfx)cmath \
- $(before-compile)
--cstdlib=$(shell echo "\#include <cstdlib>" | $(CXX) -M -MP -x c++ - \
-- | sed -n "/cstdlib:/{s/:$$//;p}")
--$(common-objpfx)cstdlib: $(cstdlib)
-+$(common-objpfx)cstdlib: $(c++-cstdlib-header)
- $(INSTALL_DATA) $< $@T
- $(move-if-change) $@T $@
--cmath=$(shell echo "\#include <cmath>" | $(CXX) -M -MP -x c++ - \
-- | sed -n "/cmath:/{s/:$$//;p}")
--$(common-objpfx)cmath: $(cmath)
-+$(common-objpfx)cmath: $(c++-cmath-header)
- $(INSTALL_DATA) $< $@T
- $(move-if-change) $@T $@
- endif
-diff --git a/config.make.in b/config.make.in
-index 95c6f36876..04a8b3ed7f 100644
---- a/config.make.in
-+++ b/config.make.in
-@@ -45,6 +45,8 @@ defines = @DEFINES@
- sysheaders = @sysheaders@
- sysincludes = @SYSINCLUDES@
- c++-sysincludes = @CXX_SYSINCLUDES@
-+c++-cstdlib-header = @CXX_CSTDLIB_HEADER@
-+c++-cmath-header = @CXX_CMATH_HEADER@
- all-warnings = @all_warnings@
- enable-werror = @enable_werror@
-
-diff --git a/configure b/configure
-index 17625e1041..6ff252744b 100755
---- a/configure
-+++ b/configure
-@@ -635,6 +635,8 @@ BISON
- INSTALL_INFO
- PERL
- BASH_SHELL
-+CXX_CMATH_HEADER
-+CXX_CSTDLIB_HEADER
- CXX_SYSINCLUDES
- SYSINCLUDES
- AUTOCONF
-@@ -5054,6 +5056,18 @@ fi
-
-
-
-+# Obtain some C++ header file paths. This is used to make a local
-+# copy of those headers in Makerules.
-+if test -n "$CXX"; then
-+ find_cxx_header () {
-+ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}"
-+ }
-+ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)"
-+ CXX_CMATH_HEADER="$(find_cxx_header cmath)"
-+fi
-+
-+
-+
- # Test if LD_LIBRARY_PATH contains the notation for the current directory
- # since this would lead to problems installing/building glibc.
- # LD_LIBRARY_PATH contains the current directory if one of the following
-diff --git a/configure.ac b/configure.ac
-index 33bcd62180..9938ab0dc2 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -1039,6 +1039,18 @@ fi
- AC_SUBST(SYSINCLUDES)
- AC_SUBST(CXX_SYSINCLUDES)
-
-+# Obtain some C++ header file paths. This is used to make a local
-+# copy of those headers in Makerules.
-+if test -n "$CXX"; then
-+ find_cxx_header () {
-+ echo "#include <$1>" | $CXX -M -MP -x c++ - | sed -n "/$1:/{s/:\$//;p}"
-+ }
-+ CXX_CSTDLIB_HEADER="$(find_cxx_header cstdlib)"
-+ CXX_CMATH_HEADER="$(find_cxx_header cmath)"
-+fi
-+AC_SUBST(CXX_CSTDLIB_HEADER)
-+AC_SUBST(CXX_CMATH_HEADER)
-+
- # Test if LD_LIBRARY_PATH contains the notation for the current directory
- # since this would lead to problems installing/building glibc.
- # LD_LIBRARY_PATH contains the current directory if one of the following
diff --git a/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch b/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch
deleted file mode 100644
index 16a595d613..0000000000
--- a/contrib/guix/patches/glibc-2.27-dont-redefine-nss-database.patch
+++ /dev/null
@@ -1,87 +0,0 @@
-commit 78a90c2f74a2012dd3eff302189e47ff6779a757
-Author: Andreas Schwab <schwab@linux-m68k.org>
-Date: Fri Mar 2 23:07:14 2018 +0100
-
- Fix multiple definitions of __nss_*_database (bug 22918)
-
- (cherry picked from commit eaf6753f8aac33a36deb98c1031d1bad7b593d2d)
-
-diff --git a/nscd/gai.c b/nscd/gai.c
-index d081747797..576fd0045b 100644
---- a/nscd/gai.c
-+++ b/nscd/gai.c
-@@ -45,3 +45,6 @@
- #ifdef HAVE_LIBIDN
- # include <libidn/idn-stub.c>
- #endif
-+
-+/* Some variables normally defined in libc. */
-+service_user *__nss_hosts_database attribute_hidden;
-diff --git a/nss/nsswitch.c b/nss/nsswitch.c
-index d5e655974f..b0f0c11a3e 100644
---- a/nss/nsswitch.c
-+++ b/nss/nsswitch.c
-@@ -62,7 +62,7 @@ static service_library *nss_new_service (name_database *database,
-
- /* Declare external database variables. */
- #define DEFINE_DATABASE(name) \
-- extern service_user *__nss_##name##_database attribute_hidden; \
-+ service_user *__nss_##name##_database attribute_hidden; \
- weak_extern (__nss_##name##_database)
- #include "databases.def"
- #undef DEFINE_DATABASE
-diff --git a/nss/nsswitch.h b/nss/nsswitch.h
-index eccb535ef5..63573b9ebc 100644
---- a/nss/nsswitch.h
-+++ b/nss/nsswitch.h
-@@ -226,10 +226,10 @@ libc_hidden_proto (__nss_hostname_digits_dots)
- #define MAX_NR_ADDRS 48
-
- /* Prototypes for __nss_*_lookup2 functions. */
--#define DEFINE_DATABASE(arg) \
-- service_user *__nss_##arg##_database attribute_hidden; \
-- int __nss_##arg##_lookup2 (service_user **, const char *, \
-- const char *, void **); \
-+#define DEFINE_DATABASE(arg) \
-+ extern service_user *__nss_##arg##_database attribute_hidden; \
-+ int __nss_##arg##_lookup2 (service_user **, const char *, \
-+ const char *, void **); \
- libc_hidden_proto (__nss_##arg##_lookup2)
- #include "databases.def"
- #undef DEFINE_DATABASE
-diff --git a/posix/tst-rfc3484-2.c b/posix/tst-rfc3484-2.c
-index f509534ca9..8c64ac59ff 100644
---- a/posix/tst-rfc3484-2.c
-+++ b/posix/tst-rfc3484-2.c
-@@ -58,6 +58,7 @@ _res_hconf_init (void)
- #undef USE_NSCD
- #include "../sysdeps/posix/getaddrinfo.c"
-
-+service_user *__nss_hosts_database attribute_hidden;
-
- /* This is the beginning of the real test code. The above defines
- (among other things) the function rfc3484_sort. */
-diff --git a/posix/tst-rfc3484-3.c b/posix/tst-rfc3484-3.c
-index ae44087a10..1c61aaf844 100644
---- a/posix/tst-rfc3484-3.c
-+++ b/posix/tst-rfc3484-3.c
-@@ -58,6 +58,7 @@ _res_hconf_init (void)
- #undef USE_NSCD
- #include "../sysdeps/posix/getaddrinfo.c"
-
-+service_user *__nss_hosts_database attribute_hidden;
-
- /* This is the beginning of the real test code. The above defines
- (among other things) the function rfc3484_sort. */
-diff --git a/posix/tst-rfc3484.c b/posix/tst-rfc3484.c
-index 7f191abbbc..8f45848e44 100644
---- a/posix/tst-rfc3484.c
-+++ b/posix/tst-rfc3484.c
-@@ -58,6 +58,7 @@ _res_hconf_init (void)
- #undef USE_NSCD
- #include "../sysdeps/posix/getaddrinfo.c"
-
-+service_user *__nss_hosts_database attribute_hidden;
-
- /* This is the beginning of the real test code. The above defines
- (among other things) the function rfc3484_sort. */
diff --git a/contrib/guix/patches/glibc-2.24-fcommon.patch b/contrib/guix/patches/glibc-2.27-fcommon.patch
index 2bc32ede90..f3baacab98 100644
--- a/contrib/guix/patches/glibc-2.24-fcommon.patch
+++ b/contrib/guix/patches/glibc-2.27-fcommon.patch
@@ -18,15 +18,15 @@ Date: Fri May 6 11:03:04 2022 +0100
https://sourceware.org/git/?p=glibc.git;a=commit;h=7650321ce037302bfc2f026aa19e0213b8d02fe6
diff --git a/Makeconfig b/Makeconfig
-index ee379f5852..63c4a2f234 100644
+index 86a71e5802..aa2166be60 100644
--- a/Makeconfig
+++ b/Makeconfig
-@@ -824,7 +824,7 @@ ifeq "$(strip $(+cflags))" ""
- +cflags := $(default_cflags)
+@@ -896,7 +896,7 @@ ifeq "$(strip $(+cflags))" ""
endif # $(+cflags) == ""
--+cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags)
-++cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) -fcommon
+ +cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) \
+- $(+stack-protector)
++ $(+stack-protector) -fcommon
+gcc-nowarn := -w
# Don't duplicate options if we inherited variables from the parent.
diff --git a/contrib/guix/patches/glibc-2.27-guix-prefix.patch b/contrib/guix/patches/glibc-2.27-guix-prefix.patch
index d777af74f0..6648bc6c05 100644
--- a/contrib/guix/patches/glibc-2.27-guix-prefix.patch
+++ b/contrib/guix/patches/glibc-2.27-guix-prefix.patch
@@ -20,6 +20,3 @@ when we being using newer versions of glibc.
libtype.o := lib%.a
object-suffixes += .o
ifeq (yes,$(build-shared))
---
-2.35.1
-
diff --git a/contrib/guix/patches/glibc-ldd-x86_64.patch b/contrib/guix/patches/glibc-ldd-x86_64.patch
index b1b6d5a548..a23b095caa 100644
--- a/contrib/guix/patches/glibc-ldd-x86_64.patch
+++ b/contrib/guix/patches/glibc-ldd-x86_64.patch
@@ -1,8 +1,8 @@
By default, 'RTDLLIST' in 'ldd' refers to 'lib64/ld-linux-x86-64.so', whereas
it's in 'lib/' for us. This patch fixes that.
---- glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2012-12-25 04:02:13.000000000 +0100
-+++ glibc-2.17/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed 2013-09-15 23:08:03.000000000 +0200
+--- a/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed
++++ b/sysdeps/unix/sysv/linux/x86_64/ldd-rewrite.sed
@@ -1,3 +1,3 @@
/LD_TRACE_LOADED_OBJECTS=1/a\
add_env="$add_env LD_LIBRARY_VERSION=\\$verify_out"
diff --git a/contrib/seeds/README.md b/contrib/seeds/README.md
index b2ea7522ac..b0bbe96493 100644
--- a/contrib/seeds/README.md
+++ b/contrib/seeds/README.md
@@ -13,6 +13,6 @@ data. Run the following commands from the `/contrib/seeds` directory:
curl https://bitcoin.sipa.be/seeds.txt.gz | gzip -dc > seeds_main.txt
curl https://bitcoin.sipa.be/asmap-filled.dat > asmap-filled.dat
- python3 makeseeds.py -a asmap-filled.dat < seeds_main.txt > nodes_main.txt
+ python3 makeseeds.py -a asmap-filled.dat -s seeds_main.txt > nodes_main.txt
cat nodes_main_manual.txt >> nodes_main.txt
python3 generate-seeds.py . > ../../src/chainparamsseeds.h
diff --git a/contrib/seeds/makeseeds.py b/contrib/seeds/makeseeds.py
index eda58c370f..23d38ee48d 100755
--- a/contrib/seeds/makeseeds.py
+++ b/contrib/seeds/makeseeds.py
@@ -173,6 +173,7 @@ def ip_stats(ips: List[Dict]) -> str:
def parse_args():
argparser = argparse.ArgumentParser(description='Generate a list of bitcoin node seed ip addresses.')
argparser.add_argument("-a","--asmap", help='the location of the asmap asn database file (required)', required=True)
+ argparser.add_argument("-s","--seeds", help='the location of the DNS seeds file (required)', required=True)
return argparser.parse_args()
def main():
@@ -184,7 +185,8 @@ def main():
print('Done.', file=sys.stderr)
print('Loading and parsing DNS seeds…', end='', file=sys.stderr, flush=True)
- lines = sys.stdin.readlines()
+ with open(args.seeds, 'r', encoding='utf8') as f:
+ lines = f.readlines()
ips = [parseline(line) for line in lines]
print('Done.', file=sys.stderr)
diff --git a/contrib/verify-commits/allow-revsig-commits b/contrib/verify-commits/allow-revsig-commits
index 0bb299b8fa..0c43d9cce5 100644
--- a/contrib/verify-commits/allow-revsig-commits
+++ b/contrib/verify-commits/allow-revsig-commits
@@ -643,3 +643,178 @@ b7365f0545b1a6862e3277b2b2139ee0d5aee1cf
4bd0e9b90a39c5c6a016b83882ae44cb4d28f1f8
7438ceac716fdfe6621728c05e718eaa89dd89aa
4e3efd47e0d50c6cd1dc81ccc9669a5b2658f495
+5ab6a942764bf6577ae311f2551153dde3d4830c
+b04f42efe31e23e15cc945efe0de906ed2eadb2b
+ceae0eb7e31f9d3495a13a23df7372e5e870b572
+5bf65ec66e5986c9188e3f6234f1c5c0f8dc7f90
+55c9e2d790fa2e137ccd0d91e6cf3e2d0bff4813
+ba29911e21c88f49780c6c87f94ff8ed6e764a9d
+fffff0abb9c71f0af83a7925db3c293b3bb12158
+aaeb315ff0f7956449a92736160795f0140369e3
+0dd34773334c7f4db7b05df30ee61b011795b46d
+2598720d6c1ef15288045599de7e65f8707d09bc
+bc83710fdcc09d8e427e77457df107acc9db1be5
+ddd7a39aa960ee3639ef1e59b2e53852e0862c52
+0808c88d7bd992d5c9ded0009c9563f6177b4035
+a085a554913ae8f4ed83afac830ce6dc39c9cc65
+b1a824dd06aa58618947783edee2dd891b5204dc
+a4e066af8573dcefb11dff120e1c09e8cf7f40c2
+58b9d6cf9e9b801be9c677a3ae121e5d2950ce66
+7377ed778c6d832ecd291e65b2789af7bac2ae2b
+c3a41ad980cc5149de3f9ec8414962c183b1fed9
+5884a47c367f6ff1aff3ae1ef6894881c5a5e0b7
+1d39c9ca0672e7ad4c1f0959f9d58d2fcc7dc46b
+e16f6441044fc2123e0cbdcbd8a5842ec3aae7a0
+6c6cc7989cac79450bf83b932ca82d390a37e17b
+bc28ca3afb7f6656a0bf50038a5e383ee7f9b219
+57a491bee17af88f75c2cea8c109d93b1cdbc9a8
+f8586b25f6a4f1e30a54e58f45dd28aaf580bbc6
+e5df0ba0d97e5f8cfd748f09d6ed77b7bfc45471
+1b0469199bdaedfd452eea718268be7fd50db3c0
+015717e2b873b7a2ce433bd3be2328a782aa5d91
+3b3c66f85959f3393a3a9e87a29004b526f91b93
+874529665c1c326fc86fc0d0d6c3512fab087ab8
+7f2c983e1cfdb58b6f84eabe5ff6a16f143f39aa
+0ea92cad5274f3939f09d6890da31a21b8481282
+489b5876698f9bb2d93b1b1d62d514148b31effd
+faf25b09d9e78f2ff129e25b90f67930d2fc1c4f
+df933596e7e9aa17f7e5cd6e1c850520f5b56f1b
+9e4fbebcc8e497016563e46de4c64fa094edab2d
+1557014378cc5a6234a9244fa60132955206fd27
+c5fbcf5f8d7b36bee54ac80d1027d0dccea2aa75
+cccbc5fe3ea5ae52426203f4485b11071fbe4b6e
+5174a139c92c1238f9700d06e362dc628d81a0a9
+9dae9f5f1e2bf29f58d3f49b0c612063d883b8b3
+e282764e049523439bc8adaadc002a1420122830
+d8ae5044488248d5eb134aa7c0a15c813a2f8715
+06ea2783a2c11e7b171e2809c3211bb3091d894d
+00ce8543f16f4357926eb6dc701ac6229142be80
+1f63b460a8506675ccacb4647941f07d391735e3
+a100c42a136da5ddfd09aa442543ec2190f24faf
+636991d0c0f969968c790d490c82c1d2fa4e8047
+dd52f79a73eca18301db1569d517197160018dbb
+e157b98640c7cfb94cae7e0faca3bcffc2dde990
+ad9e5eaf77bf7e19a926a43407c88386a8a1e58f
+c5e67be03bb06a5d7885c55db1f016fbf2333fe3
+48eec32347494da781f26478fa488b28336afbd2
+c324b07a541a04698954ece94e5879ae7131c1c7
+4901631dac6a883c6ddd0d4e5e3edd08b10d7609
+cacbdbaa95317b45cf2100702bca92410fb43b9a
+b4f686952a60bbadc7ed2250651d0d6af0959f4d
+90e49c1ececd6296c6ec6109cea525a208c0626e
+700808754884919916a5518e7ecfdabadef956d8
+0cd1a2eff9e0020ec1052a931f3863794d1a95d9
+51527ec1ec4264f7e24ef548bb049db07a89fc7f
+ed4eeafbb6e2e73ff9fb9c03bd66bbb049b8aacd
+d4475ea7ae70ad1a1f9374b88c68f01590a88d54
+5e1aacab576b8d8918da129097a9ac0816b6ead2
+fe6a299fc0020cd62156d4b7dd9c8dac358c69c5
+0047d9b89b9fa6be660c363961cf0af72fa62ecf
+037c5e511fe2185d244049cae25a98f99b878787
+8730bd3fc87c8a7d44347267e1ff3c7a8674201b
+47b8256da872722953693c4037d1b9e07caadcb1
+85aea18ae660b5edf7b6c1415f033cfcb15307f9
+132d5f8c2f2397a4600a42203f413dafdb6bcc37
+23ebd7a8027f12e722834d214113892fe8561fe1
+a19f641a80cb2841ca9f593e9be589dbc4b4ae7c
+1e7db37e76c510d373c4404eea2b97508b367aca
+16fa967d3cca66eef0f17b41fd8aaee6a1420fbc
+9eedbe98c86ff2a9214c24c37f6524ce67fd129b
+0342ae1d395ca82614f6d3b8fabb6a44403baf2a
+777b89b3008e53374eb13fdee70db315cd61a703
+8b686776ef5cbd6ef9d5281c3136eded25ea35a4
+c90b42bcdb594638c5759ef5ef0773314d0a1379
+7134327be5c1bdcef7919ed735049a6bbfc457ec
+e88a52e9a2fda971d34425bb80e42ad2d6623d68
+173c79626867e9f89d49be7dcbb0c2042c480553
+2513499348fa955d0e4b0970b08ba9e715e6316e
+43bb10661360d9f35d921d493a1f94ac95df00e2
+6f55ab57cbfa414d57a8e9fb9a47f9bcd8c836d7
+6300b9556ec927a61371053fafe1a4045f5afb00
+f8b2e9bcfc76fede05f5e12f7b15f0d9c9d0add5
+b297b945f7610772434817181ad12067b2832565
+57a73d71a36ce212977607d3e94de6ef55521bfc
+5fdf37e14bb3b66264a7e6868250c2084ac39054
+3059d4dd72af73b654077d9f72019c47edd47674
+333a41882c5ccd5f0c7f884f97d25449bdeec07b
+7da4f65a00a8d96da2119de613ed7fbee2a28a0d
+e14f0fa6a346afecbb1d5470aef5226a8cc33e57
+cf0a8b9c4870cc88254a757286140d9632e7b70c
+b69fd5eaa99f84b62a49d7c7f48d8cee1227592a
+1e3ed01faa77215a7c36308237280aaa58895532
+6c9bc14a3f2cfa50144607c820ebab5288f9571c
+8e3c266a4f02093d57d563f32ba73d3ab4b5f208
+decde9bba6f9d3671bdf0af4fe6ff4bf28992d1d
+9b7eb584ade2ce73dbfcda080935172c3857b758
+3bbc46ddafb61f68785c7e581817db952f99d93a
+bbb83f0b2b2671980c06453fd243b1f2801a1cc4
+6c9460edaeb6c89692b71f51be7b7ee386f4f5c4
+b3072799248fae8fc16f910b642edb9c5acf8bac
+696d39410fc3372d120a6e89695c1543ac2fc052
+c5c4fb31828107a5ded88627632e19e05b2c7e83
+9ce1c506a3a5d20b1bf254235bfae48af592d86c
+fe66dad8a779ed928b1c2fc0c3accf594b042877
+f421de5be611f874a027392d5fee7e113dce4f54
+d492dc1cdaabdc52b0766bf4cba4bd73178325d0
+6348bc61b533705a229f2c2ddcff2bdd98849d07
+83b26cb97cb46516aa4fdee3bcbfa751d28c1233
+afac75f140a3e7d89877f03420e1bc64a8d8c6cd
+171f6f2699dc27e77843318be2fefdfcd9e589fb
+50c806f001d66e20f314777b9fa7fefa01dc6893
+bdbabc50ba6c87ded97ea2bbacd3605c59cd12d0
+9e32adbb5c543885b2c01a984bf1e4b80e8cec16
+7c08d81e119570792648fe95bbacddbb1d5f9ae2
+65e9ca22785f4a799cbcff6d95cbe1ce4b4a6bd2
+2948d6dea098bf722828b969838668f833c2cb00
+deb847b75710d600e5b0d3d5c77fa5166d80808a
+05e5af5a6c884d2ade3d7acc766ad5380cb85b64
+cba41db327a241f992f9329b214d9070888255b8
+f6d335e82822ed8f2da56c1bcaddf7f99acd4997
+30308cc380a8176a5ec0e0bd2beed8b9c482ccf7
+8b6cd42c6226dea28c182a48a214d1c091b9b5bb
+267917f5632a99bb51fc3fe516d8308e79d31ed1
+ba11eb354b9f3420ebb8608227062fb639a07496
+848b11615b67a3c49f76ebbcaa241a322d8014d8
+25290071c434638e2719a99784572deef44542ad
+159f89c118645c0f9e23e453f01cede84abac795
+37637bea3a9a48c0d52d68d3f78f154f8249a009
+0a76bf848c72211f986a6cc5b1c13de820b861dd
+358fe779cbb2681666ae5ab23a19662db21a2c46
+c44e734dca64a15fae92255a5d848c04adaad2fa
+8add59d77dd621be57059229f378822e4b707318
+922c49a1389531d9fba30168257c466bd413f625
+df0825046acc7cb496c47666e36af18118beb030
+c23bf06492dddacfc0eece3d4dd12cce81496dd0
+3eec29ed3aa1c8eb293a7a7a6be356fc014f8813
+a7e80449c0811b361cdaea39b6bab78ca5fbf668
+5e8e0b3d7f6055e326bda61e60712b530e8920f0
+a5edd191be93aff8f9c0f60f04e711e2e78ecc77
+515200298b555845696a07ae2bc0a84a5dd02ae4
+e8a3882e20f0ffeeb9b3964c2c09d8aa5eb53fd4
+c545a7aeb1d559377933c7b2e6edc2d4a37b33fb
+df669230cf2001dd869e897bb4f2d9c46f9accd9
+56a0fbf8365343d73cdff2b0a0e16542294d7577
+196b4599201dbce3e0317e9b98753fa6a244b82d
+cf5bb048e80d4cde8828787b266b7f5f2e3b6d7b
+b94d0c7af11bd91dad4f180ce2a2ffa09e4b5668
+792d0d8d512cf8ddca200317b74ce550c1a1a428
+767ee2e3a1082468b4e2248bac3ef8bd54bb2ddb
+31db3dd874dfbba88616c96a5767e2c9861d9a7a
+018fd9620293582f0ce43d344ac3110e19c4dedc
+801aaac2b39564aa14009785146ba26d2506fb53
+121d47afe3e67ff7f94d26e08a39573dccf652aa
+af7fba3af788e91a460582351d40f8f5e2118760
+8f1c28a609b203e0d0a844d9cc5ada9eb9160a5e
+8319c4e906e6df5f2048e7c048942fde285a93a2
+66be456d93a66526322b7f36fd734a8dbd5e5524
+c006ab29ceec9274dc85a0de7f7d0502021a4b87
+1220af5e6d1072ea306f6ecaaa7effe3d386c379
+14ba286556faad794f288ef38493c540382897cb
+784a21d35466736a7a372364498ed94482a12a2a
+4ad59042b359f473d5888ecee0c9288dcf98f1c9
+fee16b15fa3425871670239c25d4e61ae961e0c5
+216f4ca9e7ccb1f0fcb9bab0f9940992a87ae55f
+2d0bdb2089644f5904629413423cdc897911b081
+50c502f54abd9eb15c8ddca013f0fdfae3d132a9
+c840ab0231bc29057172179f005001c9ab299554
+aab5e48d422d396aec09bd6389de68613b19def5
diff --git a/contrib/verify-commits/trusted-git-root b/contrib/verify-commits/trusted-git-root
index 1c42195961..efb6b9f7b4 100644
--- a/contrib/verify-commits/trusted-git-root
+++ b/contrib/verify-commits/trusted-git-root
@@ -1 +1 @@
-577bd51a4b8de066466a445192c1c653872657e2
+8ef096d4f8e08ac691502e3fd34721a8bdfa9044
diff --git a/contrib/verify-commits/trusted-keys b/contrib/verify-commits/trusted-keys
index 5ca65e7b0d..eeafcdf205 100644
--- a/contrib/verify-commits/trusted-keys
+++ b/contrib/verify-commits/trusted-keys
@@ -1,4 +1,3 @@
-71A3B16735405025D447E8F274810B012346C9A6
B8B3F1C0E58C15DB6A81D30C3648A882F4316B9B
E777299FC265DD04793070EB944D35F9AC3DB76A
D1DBF2C4B96F2DEBF4C16654410108112E7EA81F
diff --git a/contrib/verifybinaries/README.md b/contrib/verifybinaries/README.md
index c50d4bef71..ab831eea28 100644
--- a/contrib/verifybinaries/README.md
+++ b/contrib/verifybinaries/README.md
@@ -1,16 +1,5 @@
### Verify Binaries
-#### Preparation:
-
-Make sure you obtain the proper release signing key and verify the fingerprint with several independent sources.
-
-```sh
-$ gpg --fingerprint "Bitcoin Core binary release signing key"
-pub 4096R/36C2E964 2015-06-24 [expires: YYYY-MM-DD]
- Key fingerprint = 01EA 5486 DE18 A882 D4C2 6845 90C8 019E 36C2 E964
-uid Wladimir J. van der Laan (Bitcoin Core binary release signing key) <laanwj@gmail.com>
-```
-
#### Usage:
This script attempts to download the signature file `SHA256SUMS.asc` from https://bitcoin.org.
diff --git a/depends/funcs.mk b/depends/funcs.mk
index 6b333f940e..f0bbf4a168 100644
--- a/depends/funcs.mk
+++ b/depends/funcs.mk
@@ -234,7 +234,9 @@ $($(1)_postprocessed): | $($(1)_staged)
touch $$@
$($(1)_cached): | $($(1)_dependencies) $($(1)_postprocessed)
echo Caching $(1)...
- cd $$($(1)_staging_dir)/$(host_prefix); find . | sort | $(build_TAR) --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T -
+ cd $$($(1)_staging_dir)/$(host_prefix); \
+ find . ! -name '.stamp_postprocessed' -print0 | TZ=UTC xargs -0r touch -h -m -t 200001011200; \
+ find . ! -name '.stamp_postprocessed' | LC_ALL=C sort | $(build_TAR) --numeric-owner --no-recursion -czf $$($(1)_staging_dir)/$$(@F) -T -
mkdir -p $$(@D)
rm -rf $$(@D) && mkdir -p $$(@D)
mv $$($(1)_staging_dir)/$$(@F) $$(@)
diff --git a/depends/packages/bdb.mk b/depends/packages/bdb.mk
index 262587690c..d607336059 100644
--- a/depends/packages/bdb.mk
+++ b/depends/packages/bdb.mk
@@ -15,7 +15,7 @@ $(package)_config_opts_netbsd=--with-pic
$(package)_config_opts_openbsd=--with-pic
$(package)_config_opts_android=--with-pic
$(package)_cflags+=-Wno-error=implicit-function-declaration -Wno-error=format-security
-$(package)_cppflags_freebsd=-D_XOPEN_SOURCE=600
+$(package)_cppflags_freebsd=-D_XOPEN_SOURCE=600 -D__BSD_VISIBLE=1
$(package)_cppflags_netbsd=-D_XOPEN_SOURCE=600
$(package)_cppflags_openbsd=-D_XOPEN_SOURCE=600
$(package)_cppflags_mingw32=-DUNICODE -D_UNICODE
diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk
index 267ed11253..d715232793 100644
--- a/depends/packages/zeromq.mk
+++ b/depends/packages/zeromq.mk
@@ -20,12 +20,12 @@ endef
define $(package)_preprocess_cmds
patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch && \
- patch -p1 < $($(package)_patch_dir)/netbsd_kevent_void.patch && \
- cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config
+ patch -p1 < $($(package)_patch_dir)/netbsd_kevent_void.patch
endef
define $(package)_config_cmds
./autogen.sh && \
+ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config && \
$($(package)_autoconf)
endef
diff --git a/doc/build-freebsd.md b/doc/build-freebsd.md
index d45e9c4d0d..aa10e4a891 100644
--- a/doc/build-freebsd.md
+++ b/doc/build-freebsd.md
@@ -36,13 +36,30 @@ pkg install sqlite3
```
###### Legacy Wallet Support
-`db5` is only required to support legacy wallets.
-Skip if you don't intend to use legacy wallets.
+BerkeleyDB is only required if legacy wallet support is required.
+
+It is required to use Berkeley DB 4.8. You **cannot** use the BerkeleyDB library
+from ports. However, you can build DB 4.8 yourself [using depends](/depends).
-```bash
-pkg install db5
```
----
+gmake -C depends NO_BOOST=1 NO_LIBEVENT=1 NO_QT=1 NO_SQLITE=1 NO_NATPMP=1 NO_UPNP=1 NO_ZMQ=1 NO_USDT=1
+```
+
+When the build is complete, the Berkeley DB installation location will be displayed:
+
+```
+to: /path/to/bitcoin/depends/x86_64-unknown-freebsd[release-number]
+```
+
+Finally, set `BDB_PREFIX` to this path according to your shell:
+
+```
+csh: setenv BDB_PREFIX [path displayed above]
+```
+
+```
+sh/bash: export BDB_PREFIX=[path displayed above]
+```
#### GUI Dependencies
###### Qt5
@@ -91,12 +108,12 @@ This explicitly enables the GUI and disables legacy wallet support, assuming `sq
##### Descriptor & Legacy Wallet. No GUI:
This enables support for both wallet types and disables the GUI, assuming
-`sqlite3` and `db5` are both installed.
+`sqlite3` and `db4` are both installed.
```bash
./autogen.sh
-./configure --with-gui=no --with-incompatible-bdb \
- BDB_LIBS="-ldb_cxx-5" \
- BDB_CFLAGS="-I/usr/local/include/db5" \
+./configure --with-gui=no \
+ BDB_LIBS="-L${BDB_PREFIX}/lib -ldb_cxx-4.8" \
+ BDB_CFLAGS="-I${BDB_PREFIX}/include" \
MAKE=gmake
```
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 3349c81c46..a9ca5b3e7a 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -19,7 +19,7 @@ You can find installation instructions in the `build-*.md` file for your platfor
| --- | --- | --- | --- | --- |
| [Boost](../depends/packages/boost.mk) | [link](https://www.boost.org/users/download/) | [1.81.0](https://github.com/bitcoin/bitcoin/pull/26557) | [1.64.0](https://github.com/bitcoin/bitcoin/pull/22320) | No |
| [libevent](../depends/packages/libevent.mk) | [link](https://github.com/libevent/libevent/releases) | [2.1.12-stable](https://github.com/bitcoin/bitcoin/pull/21991) | [2.1.8](https://github.com/bitcoin/bitcoin/pull/24681) | No |
-| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.18](https://github.com/bitcoin/bitcoin/pull/23511) | Yes |
+| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.27](https://github.com/bitcoin/bitcoin/pull/27029) | Yes |
| Linux Kernel | [link](https://www.kernel.org/) | N/A | 3.2.0 | Yes |
## Optional
@@ -36,7 +36,7 @@ You can find installation instructions in the `build-*.md` file for your platfor
| Dependency | Releases | Version used | Minimum required | Runtime |
| --- | --- | --- | --- | --- |
| [libnatpmp](../depends/packages/libnatpmp.mk) | [link](https://github.com/miniupnp/libnatpmp/) | commit [07004b9...](https://github.com/bitcoin/bitcoin/pull/25917) | | No |
-| [MiniUPnPc](../depends/packages/miniupnpc.mk) | [link](https://miniupnp.tuxfamily.org/) | [2.2.2](https://github.com/bitcoin/bitcoin/pull/20421) | 1.9 | No |
+| [MiniUPnPc](../depends/packages/miniupnpc.mk) | [link](https://miniupnp.tuxfamily.org/) | [2.2.2](https://github.com/bitcoin/bitcoin/pull/20421) | 2.1 | No |
### Notifications
| Dependency | Releases | Version used | Minimum required | Runtime |
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 00c68911ef..e2e54e13d3 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -109,6 +109,10 @@ code.
- `++i` is preferred over `i++`.
- `nullptr` is preferred over `NULL` or `(void*)0`.
- `static_assert` is preferred over `assert` where possible. Generally; compile-time checking is preferred over run-time checking.
+ - Use a named cast or functional cast, not a C-Style cast. When casting
+ between integer types, use functional casts such as `int(x)` or `int{x}`
+ instead of `(int) x`. When casting between more complex types, use static_cast.
+ Use reinterpret_cast and const_cast as appropriate.
For function calls a namespace should be specified explicitly, unless such functions have been declared within it.
Otherwise, [argument-dependent lookup](https://en.cppreference.com/w/cpp/language/adl), also known as ADL, could be
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index b498f2f41b..84ebb0986d 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -136,10 +136,10 @@ You may also need to take care of giving the correct path for `clang` and
`clang++`, like `CC=/path/to/clang CXX=/path/to/clang++` if the non-systems
`clang` does not come first in your path.
-Full configure that was tested on macOS Catalina with `brew` installed `llvm`:
+Full configure that was tested on macOS with `brew` installed `llvm`:
```sh
-./configure --enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=/usr/local/opt/llvm/bin/clang CXX=/usr/local/opt/llvm/bin/clang++ --disable-asm
+./configure --enable-fuzz --with-sanitizers=fuzzer,address,undefined --disable-asm CC=$(brew --prefix llvm)/bin/clang CXX=$(brew --prefix llvm)/bin/clang++
```
Read the [libFuzzer documentation](https://llvm.org/docs/LibFuzzer.html) for more information. This [libFuzzer tutorial](https://github.com/google/fuzzing/blob/master/tutorial/libFuzzerTutorial.md) might also be of interest.
diff --git a/doc/init.md b/doc/init.md
index 399b819bf4..7f79027718 100644
--- a/doc/init.md
+++ b/doc/init.md
@@ -70,7 +70,7 @@ NOTE: When using the systemd .service file, the creation of the aforementioned
directories and the setting of their permissions is automatically handled by
systemd. Directories are given a permission of 710, giving the bitcoin group
access to files under it _if_ the files themselves give permission to the
-bitcoin group to do so (e.g. when `-sysperms` is specified). This does not allow
+bitcoin group to do so. This does not allow
for the listing of files under the directory.
NOTE: It is not currently possible to override `datadir` in
diff --git a/doc/release-notes-27037.md b/doc/release-notes-27037.md
new file mode 100644
index 0000000000..ee30e64010
--- /dev/null
+++ b/doc/release-notes-27037.md
@@ -0,0 +1,5 @@
+RPC
+---
+
+- `decodescript` may now infer a Miniscript descriptor under P2WSH context if it is not lacking
+ information.
diff --git a/share/rpcauth/rpcauth.py b/share/rpcauth/rpcauth.py
index d441d5f21d..cc7bba1f8b 100755
--- a/share/rpcauth/rpcauth.py
+++ b/share/rpcauth/rpcauth.py
@@ -4,22 +4,20 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from argparse import ArgumentParser
-from base64 import urlsafe_b64encode
from getpass import getpass
-from os import urandom
-
+from secrets import token_hex, token_urlsafe
import hmac
def generate_salt(size):
"""Create size byte hex salt"""
- return urandom(size).hex()
+ return token_hex(size)
def generate_password():
"""Create 32 byte b64 password"""
- return urlsafe_b64encode(urandom(32)).decode('utf-8')
+ return token_urlsafe(32)
def password_to_hmac(salt, password):
- m = hmac.new(bytearray(salt, 'utf-8'), bytearray(password, 'utf-8'), 'SHA256')
+ m = hmac.new(salt.encode('utf-8'), password.encode('utf-8'), 'SHA256')
return m.hexdigest()
def main():
@@ -38,8 +36,8 @@ def main():
password_hmac = password_to_hmac(salt, args.password)
print('String to be appended to bitcoin.conf:')
- print('rpcauth={0}:{1}${2}'.format(args.username, salt, password_hmac))
- print('Your password:\n{0}'.format(args.password))
+ print(f'rpcauth={args.username}:{salt}${password_hmac}')
+ print(f'Your password:\n{args.password}')
if __name__ == '__main__':
main()
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 4d867fdc2f..fa77e28736 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -162,7 +162,8 @@ BITCOIN_TESTS =\
test/validation_flush_tests.cpp \
test/validation_tests.cpp \
test/validationinterface_tests.cpp \
- test/versionbits_tests.cpp
+ test/versionbits_tests.cpp \
+ test/xoroshiro128plusplus_tests.cpp
if ENABLE_WALLET
BITCOIN_TESTS += \
@@ -248,6 +249,7 @@ test_fuzz_fuzz_SOURCES = \
test/fuzz/chain.cpp \
test/fuzz/checkqueue.cpp \
test/fuzz/coins_view.cpp \
+ test/fuzz/coinscache_sim.cpp \
test/fuzz/connman.cpp \
test/fuzz/crypto.cpp \
test/fuzz/crypto_aes256.cpp \
diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include
index 8496b3698a..ae77b79b8b 100644
--- a/src/Makefile.test_util.include
+++ b/src/Makefile.test_util.include
@@ -19,7 +19,8 @@ TEST_UTIL_H = \
test/util/str.h \
test/util/transaction_utils.h \
test/util/txmempool.h \
- test/util/validation.h
+ test/util/validation.h \
+ test/util/xoroshiro128plusplus.h
if ENABLE_WALLET
TEST_UTIL_H += wallet/test/util.h
diff --git a/src/bench/chacha20.cpp b/src/bench/chacha20.cpp
index 656fb833e7..115cd064bd 100644
--- a/src/bench/chacha20.cpp
+++ b/src/bench/chacha20.cpp
@@ -14,9 +14,9 @@ static const uint64_t BUFFER_SIZE_LARGE = 1024*1024;
static void CHACHA20(benchmark::Bench& bench, size_t buffersize)
{
std::vector<uint8_t> key(32,0);
- ChaCha20 ctx(key.data(), key.size());
+ ChaCha20 ctx(key.data());
ctx.SetIV(0);
- ctx.Seek(0);
+ ctx.Seek64(0);
std::vector<uint8_t> in(buffersize,0);
std::vector<uint8_t> out(buffersize,0);
bench.batch(in.size()).unit("byte").run([&] {
diff --git a/src/bench/nanobench.h b/src/bench/nanobench.h
index 70e02083c9..8b3dc6c71c 100644
--- a/src/bench/nanobench.h
+++ b/src/bench/nanobench.h
@@ -7,7 +7,7 @@
//
// Licensed under the MIT License <http://opensource.org/licenses/MIT>.
// SPDX-License-Identifier: MIT
-// Copyright (c) 2019-2021 Martin Ankerl <martin.ankerl@gmail.com>
+// Copyright (c) 2019-2023 Martin Leitner-Ankerl <martin.ankerl@gmail.com>
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
@@ -31,19 +31,20 @@
#define ANKERL_NANOBENCH_H_INCLUDED
// see https://semver.org/
-#define ANKERL_NANOBENCH_VERSION_MAJOR 4 // incompatible API changes
-#define ANKERL_NANOBENCH_VERSION_MINOR 3 // backwards-compatible changes
-#define ANKERL_NANOBENCH_VERSION_PATCH 6 // backwards-compatible bug fixes
+#define ANKERL_NANOBENCH_VERSION_MAJOR 4 // incompatible API changes
+#define ANKERL_NANOBENCH_VERSION_MINOR 3 // backwards-compatible changes
+#define ANKERL_NANOBENCH_VERSION_PATCH 10 // backwards-compatible bug fixes
///////////////////////////////////////////////////////////////////////////////////////////////////
// public facing api - as minimal as possible
///////////////////////////////////////////////////////////////////////////////////////////////////
-#include <chrono> // high_resolution_clock
-#include <cstring> // memcpy
-#include <iosfwd> // for std::ostream* custom output target in Config
-#include <string> // all names
-#include <vector> // holds all results
+#include <chrono> // high_resolution_clock
+#include <cstring> // memcpy
+#include <iosfwd> // for std::ostream* custom output target in Config
+#include <string> // all names
+#include <unordered_map> // holds context information of results
+#include <vector> // holds all results
#define ANKERL_NANOBENCH(x) ANKERL_NANOBENCH_PRIVATE_##x()
@@ -91,7 +92,7 @@
#define ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS() 0
#if defined(__linux__) && !defined(ANKERL_NANOBENCH_DISABLE_PERF_COUNTERS)
# include <linux/version.h>
-# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
// PERF_COUNT_HW_REF_CPU_CYCLES only available since kernel 3.3
// PERF_FLAG_FD_CLOEXEC since kernel 3.14
# undef ANKERL_NANOBENCH_PRIVATE_PERF_COUNTERS
@@ -144,43 +145,45 @@ class BigO;
* * `{{#result}}` Marks the begin of the result layer. Whatever comes after this will be instantiated as often as
* a benchmark result is available. Within it, you can use these tags:
*
- * * `{{title}}` See Bench::title().
+ * * `{{title}}` See Bench::title.
*
- * * `{{name}}` Benchmark name, usually directly provided with Bench::run(), but can also be set with Bench::name().
+ * * `{{name}}` Benchmark name, usually directly provided with Bench::run, but can also be set with Bench::name.
*
- * * `{{unit}}` Unit, e.g. `byte`. Defaults to `op`, see Bench::title().
+ * * `{{unit}}` Unit, e.g. `byte`. Defaults to `op`, see Bench::unit.
*
- * * `{{batch}}` Batch size, see Bench::batch().
+ * * `{{batch}}` Batch size, see Bench::batch.
*
- * * `{{complexityN}}` Value used for asymptotic complexity calculation. See Bench::complexityN().
+ * * `{{complexityN}}` Value used for asymptotic complexity calculation. See Bench::complexityN.
*
- * * `{{epochs}}` Number of epochs, see Bench::epochs().
+ * * `{{epochs}}` Number of epochs, see Bench::epochs.
*
* * `{{clockResolution}}` Accuracy of the clock, i.e. what's the smallest time possible to measure with the clock.
* For modern systems, this can be around 20 ns. This value is automatically determined by nanobench at the first
* benchmark that is run, and used as a static variable throughout the application's runtime.
*
- * * `{{clockResolutionMultiple}}` Configuration multiplier for `clockResolution`. See Bench::clockResolutionMultiple().
+ * * `{{clockResolutionMultiple}}` Configuration multiplier for `clockResolution`. See Bench::clockResolutionMultiple.
* This is the target runtime for each measurement (epoch). That means the more accurate your clock is, the faster
* will be the benchmark. Basing the measurement's runtime on the clock resolution is the main reason why nanobench is so fast.
*
* * `{{maxEpochTime}}` Configuration for a maximum time each measurement (epoch) is allowed to take. Note that at least
- * a single iteration will be performed, even when that takes longer than maxEpochTime. See Bench::maxEpochTime().
+ * a single iteration will be performed, even when that takes longer than maxEpochTime. See Bench::maxEpochTime.
*
- * * `{{minEpochTime}}` Minimum epoch time, usually not set. See Bench::minEpochTime().
+ * * `{{minEpochTime}}` Minimum epoch time, defaults to 1ms. See Bench::minEpochTime.
*
- * * `{{minEpochIterations}}` See Bench::minEpochIterations().
+ * * `{{minEpochIterations}}` See Bench::minEpochIterations.
*
- * * `{{epochIterations}}` See Bench::epochIterations().
+ * * `{{epochIterations}}` See Bench::epochIterations.
*
- * * `{{warmup}}` Number of iterations used before measuring starts. See Bench::warmup().
+ * * `{{warmup}}` Number of iterations used before measuring starts. See Bench::warmup.
*
- * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative().
+ * * `{{relative}}` True or false, depending on the setting you have used. See Bench::relative.
+ *
+ * * `{{context(variableName)}}` See Bench::context.
*
* Apart from these tags, it is also possible to use some mathematical operations on the measurement data. The operations
* are of the form `{{command(name)}}`. Currently `name` can be one of `elapsed`, `iterations`. If performance counters
* are available (currently only on current Linux systems), you also have `pagefaults`, `cpucycles`,
- * `contextswitches`, `instructions`, `branchinstructions`, and `branchmisses`. All the measuers (except `iterations`) are
+ * `contextswitches`, `instructions`, `branchinstructions`, and `branchmisses`. All the measures (except `iterations`) are
* provided for a single iteration (so `elapsed` is the time a single iteration took). The following tags are available:
*
* * `{{median(<name>)}}` Calculate median of a measurement data set, e.g. `{{median(elapsed)}}`.
@@ -201,7 +204,7 @@ class BigO;
* This measurement is a bit hard to interpret, but it is very robust against outliers. E.g. a value of 5% means that half of the
* measurements deviate less than 5% from the median, and the other deviate more than 5% from the median.
*
- * * `{{sum(<name>)}}` Sums of all the measurements. E.g. `{{sum(iterations)}}` will give you the total number of iterations
+ * * `{{sum(<name>)}}` Sum of all the measurements. E.g. `{{sum(iterations)}}` will give you the total number of iterations
* measured in this benchmark.
*
* * `{{minimum(<name>)}}` Minimum of all measurements.
@@ -244,21 +247,21 @@ class BigO;
* For the layer tags *result* and *measurement* you additionally can use these special markers:
*
* * ``{{#-first}}`` - Begin marker of a template that will be instantiated *only for the first* entry in the layer. Use is only
- * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between
+ * allowed between the begin and end marker of the layer. So between ``{{#result}}`` and ``{{/result}}``, or between
* ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-first}}``.
*
* * ``{{^-first}}`` - Begin marker of a template that will be instantiated *for each except the first* entry in the layer. This,
- * this is basically the inversion of ``{{#-first}}``. Use is only allowed between the begin and end marker of the layer allowed.
+ * this is basically the inversion of ``{{#-first}}``. Use is only allowed between the begin and end marker of the layer.
* So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``.
*
* * ``{{/-first}}`` - End marker for either ``{{#-first}}`` or ``{{^-first}}``.
*
* * ``{{#-last}}`` - Begin marker of a template that will be instantiated *only for the last* entry in the layer. Use is only
- * allowed between the begin and end marker of the layer allowed. So between ``{{#result}}`` and ``{{/result}}``, or between
+ * allowed between the begin and end marker of the layer. So between ``{{#result}}`` and ``{{/result}}``, or between
* ``{{#measurement}}`` and ``{{/measurement}}``. Finish the template with ``{{/-last}}``.
*
* * ``{{^-last}}`` - Begin marker of a template that will be instantiated *for each except the last* entry in the layer. This,
- * this is basically the inversion of ``{{#-last}}``. Use is only allowed between the begin and end marker of the layer allowed.
+ * this is basically the inversion of ``{{#-last}}``. Use is only allowed between the begin and end marker of the layer.
* So between ``{{#result}}`` and ``{{/result}}``, or between ``{{#measurement}}`` and ``{{/measurement}}``.
*
* * ``{{/-last}}`` - End marker for either ``{{#-last}}`` or ``{{^-last}}``.
@@ -316,12 +319,12 @@ char const* csv() noexcept;
See the tutorial at :ref:`tutorial-template-html` for an example.
@endverbatim
- @see ankerl::nanobench::render()
+ @see also ankerl::nanobench::render()
*/
char const* htmlBoxplot() noexcept;
/*!
- @brief Output in pyperf compatible JSON format, which can be used for more analyzations.
+ @brief Output in pyperf compatible JSON format, which can be used for more analyzation.
@verbatim embed:rst
See the tutorial at :ref:`tutorial-template-pyperf` for an example how to further analyze the output.
@endverbatim
@@ -378,30 +381,32 @@ struct PerfCountSet {
ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
struct Config {
// actual benchmark config
- std::string mBenchmarkTitle = "benchmark";
- std::string mBenchmarkName = "noname";
- std::string mUnit = "op";
- double mBatch = 1.0;
- double mComplexityN = -1.0;
- size_t mNumEpochs = 11;
- size_t mClockResolutionMultiple = static_cast<size_t>(1000);
- std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100);
- std::chrono::nanoseconds mMinEpochTime{};
- uint64_t mMinEpochIterations{1};
- uint64_t mEpochIterations{0}; // If not 0, run *exactly* these number of iterations per epoch.
- uint64_t mWarmup = 0;
- std::ostream* mOut = nullptr;
- std::chrono::duration<double> mTimeUnit = std::chrono::nanoseconds{1};
- std::string mTimeUnitName = "ns";
- bool mShowPerformanceCounters = true;
- bool mIsRelative = false;
+ std::string mBenchmarkTitle = "benchmark"; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::string mBenchmarkName = "noname"; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::string mUnit = "op"; // NOLINT(misc-non-private-member-variables-in-classes)
+ double mBatch = 1.0; // NOLINT(misc-non-private-member-variables-in-classes)
+ double mComplexityN = -1.0; // NOLINT(misc-non-private-member-variables-in-classes)
+ size_t mNumEpochs = 11; // NOLINT(misc-non-private-member-variables-in-classes)
+ size_t mClockResolutionMultiple = static_cast<size_t>(1000); // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::nanoseconds mMaxEpochTime = std::chrono::milliseconds(100); // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::nanoseconds mMinEpochTime = std::chrono::milliseconds(1); // NOLINT(misc-non-private-member-variables-in-classes)
+ uint64_t mMinEpochIterations{1}; // NOLINT(misc-non-private-member-variables-in-classes)
+ // If not 0, run *exactly* these number of iterations per epoch.
+ uint64_t mEpochIterations{0}; // NOLINT(misc-non-private-member-variables-in-classes)
+ uint64_t mWarmup = 0; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::ostream* mOut = nullptr; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::duration<double> mTimeUnit = std::chrono::nanoseconds{1}; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::string mTimeUnitName = "ns"; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool mShowPerformanceCounters = true; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool mIsRelative = false; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::unordered_map<std::string, std::string> mContext{}; // NOLINT(misc-non-private-member-variables-in-classes)
Config();
~Config();
- Config& operator=(Config const&);
- Config& operator=(Config&&);
- Config(Config const&);
- Config(Config&&) noexcept;
+ Config& operator=(Config const& other);
+ Config& operator=(Config&& other) noexcept;
+ Config(Config const& other);
+ Config(Config&& other) noexcept;
};
ANKERL_NANOBENCH(IGNORE_PADDED_POP)
@@ -421,13 +426,13 @@ public:
_size
};
- explicit Result(Config const& benchmarkConfig);
+ explicit Result(Config benchmarkConfig);
~Result();
- Result& operator=(Result const&);
- Result& operator=(Result&&);
- Result(Result const&);
- Result(Result&&) noexcept;
+ Result& operator=(Result const& other);
+ Result& operator=(Result&& other) noexcept;
+ Result(Result const& other);
+ Result(Result&& other) noexcept;
// adds new measurement results
// all values are scaled by iters (except iters...)
@@ -442,6 +447,8 @@ public:
ANKERL_NANOBENCH(NODISCARD) double sumProduct(Measure m1, Measure m2) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double minimum(Measure m) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double maximum(Measure m) const noexcept;
+ ANKERL_NANOBENCH(NODISCARD) std::string const& context(char const* variableName) const;
+ ANKERL_NANOBENCH(NODISCARD) std::string const& context(std::string const& variableName) const;
ANKERL_NANOBENCH(NODISCARD) bool has(Measure m) const noexcept;
ANKERL_NANOBENCH(NODISCARD) double get(size_t idx, Measure m) const;
@@ -485,9 +492,9 @@ public:
static constexpr uint64_t(max)();
/**
- * As a safety precausion, we don't allow copying. Copying a PRNG would mean you would have two random generators that produce the
+ * As a safety precaution, we don't allow copying. Copying a PRNG would mean you would have two random generators that produce the
* same sequence, which is generally not what one wants. Instead create a new rng with the default constructor Rng(), which is
- * automatically seeded from `std::random_device`. If you really need a copy, use copy().
+ * automatically seeded from `std::random_device`. If you really need a copy, use `copy()`.
*/
Rng(Rng const&) = delete;
@@ -528,7 +535,7 @@ public:
*/
explicit Rng(uint64_t seed) noexcept;
Rng(uint64_t x, uint64_t y) noexcept;
- Rng(std::vector<uint64_t> const& data);
+ explicit Rng(std::vector<uint64_t> const& data);
/**
* Creates a copy of the Rng, thus the copy provides exactly the same random sequence as the original.
@@ -620,8 +627,8 @@ public:
*/
Bench();
- Bench(Bench&& other);
- Bench& operator=(Bench&& other);
+ Bench(Bench&& other) noexcept;
+ Bench& operator=(Bench&& other) noexcept;
Bench(Bench const& other);
Bench& operator=(Bench const& other);
~Bench() noexcept;
@@ -667,6 +674,10 @@ public:
*/
Bench& title(char const* benchmarkTitle);
Bench& title(std::string const& benchmarkTitle);
+
+ /**
+ * @brief Gets the title of the benchmark
+ */
ANKERL_NANOBENCH(NODISCARD) std::string const& title() const noexcept;
/// Name of the benchmark, will be shown in the table row.
@@ -675,6 +686,31 @@ public:
ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;
/**
+ * @brief Set context information.
+ *
+ * The information can be accessed using custom render templates via `{{context(variableName)}}`.
+ * Trying to render a variable that hasn't been set before raises an exception.
+ * Not included in (default) markdown table.
+ *
+ * @see clearContext, render
+ *
+ * @param variableName The name of the context variable.
+ * @param variableValue The value of the context variable.
+ */
+ Bench& context(char const* variableName, char const* variableValue);
+ Bench& context(std::string const& variableName, std::string const& variableValue);
+
+ /**
+ * @brief Reset context information.
+ *
+ * This may improve efficiency when using many context entries,
+ * or improve robustness by removing spurious context entries.
+ *
+ * @see context
+ */
+ Bench& clearContext();
+
+ /**
* @brief Sets the batch size.
*
* E.g. number of processed byte, or some other metric for the size of the processed data in each iteration. If you benchmark
@@ -754,9 +790,9 @@ public:
* representation of the benchmarked code's runtime stability.
*
* Choose the value wisely. In practice, 11 has been shown to be a reasonable choice between runtime performance and accuracy.
- * This setting goes hand in hand with minEpocIterations() (or minEpochTime()). If you are more interested in *median* runtime, you
- * might want to increase epochs(). If you are more interested in *mean* runtime, you might want to increase minEpochIterations()
- * instead.
+ * This setting goes hand in hand with minEpochIterations() (or minEpochTime()). If you are more interested in *median* runtime,
+ * you might want to increase epochs(). If you are more interested in *mean* runtime, you might want to increase
+ * minEpochIterations() instead.
*
* @param numEpochs Number of epochs.
*/
@@ -766,10 +802,10 @@ public:
/**
* @brief Upper limit for the runtime of each epoch.
*
- * As a safety precausion if the clock is not very accurate, we can set an upper limit for the maximum evaluation time per
+ * As a safety precaution if the clock is not very accurate, we can set an upper limit for the maximum evaluation time per
* epoch. Default is 100ms. At least a single evaluation of the benchmark is performed.
*
- * @see minEpochTime(), minEpochIterations()
+ * @see minEpochTime, minEpochIterations
*
* @param t Maximum target runtime for a single epoch.
*/
@@ -782,7 +818,7 @@ public:
* Default is zero, so we are fully relying on clockResolutionMultiple(). In most cases this is exactly what you want. If you see
* that the evaluation is unreliable with a high `err%`, you can increase either minEpochTime() or minEpochIterations().
*
- * @see maxEpochTime(), minEpochIterations()
+ * @see maxEpochTim), minEpochIterations
*
* @param t Minimum time each epoch should take.
*/
@@ -793,9 +829,9 @@ public:
* @brief Sets the minimum number of iterations each epoch should take.
*
* Default is 1, and we rely on clockResolutionMultiple(). If the `err%` is high and you want a more smooth result, you might want
- * to increase the minimum number or iterations, or increase the minEpochTime().
+ * to increase the minimum number of iterations, or increase the minEpochTime().
*
- * @see minEpochTime(), maxEpochTime(), minEpochIterations()
+ * @see minEpochTime, maxEpochTime, minEpochIterations
*
* @param numIters Minimum number of iterations per epoch.
*/
@@ -886,10 +922,10 @@ public:
@endverbatim
@tparam T Any type is cast to `double`.
- @param b Length of N for the next benchmark run, so it is possible to calculate `bigO`.
+ @param n Length of N for the next benchmark run, so it is possible to calculate `bigO`.
*/
template <typename T>
- Bench& complexityN(T b) noexcept;
+ Bench& complexityN(T n) noexcept;
ANKERL_NANOBENCH(NODISCARD) double complexityN() const noexcept;
/*!
@@ -993,7 +1029,7 @@ void doNotOptimizeAway(T const& val);
#else
// These assembly magic is directly from what Google Benchmark is doing. I have previously used what facebook's folly was doing, but
-// this seemd to have compilation problems in some cases. Google Benchmark seemed to be the most well tested anyways.
+// this seemed to have compilation problems in some cases. Google Benchmark seemed to be the most well tested anyways.
// see https://github.com/google/benchmark/blob/master/include/benchmark/benchmark.h#L307
template <typename T>
void doNotOptimizeAway(T const& val) {
@@ -1019,7 +1055,11 @@ void doNotOptimizeAway(T& val) {
ANKERL_NANOBENCH(IGNORE_EFFCPP_PUSH)
class IterationLogic {
public:
- explicit IterationLogic(Bench const& config) noexcept;
+ explicit IterationLogic(Bench const& bench);
+ IterationLogic(IterationLogic&&) = delete;
+ IterationLogic& operator=(IterationLogic&&) = delete;
+ IterationLogic(IterationLogic const&) = delete;
+ IterationLogic& operator=(IterationLogic const&) = delete;
~IterationLogic();
ANKERL_NANOBENCH(NODISCARD) uint64_t numIters() const noexcept;
@@ -1036,7 +1076,9 @@ ANKERL_NANOBENCH(IGNORE_PADDED_PUSH)
class PerformanceCounters {
public:
PerformanceCounters(PerformanceCounters const&) = delete;
+ PerformanceCounters(PerformanceCounters&&) = delete;
PerformanceCounters& operator=(PerformanceCounters const&) = delete;
+ PerformanceCounters& operator=(PerformanceCounters&&) = delete;
PerformanceCounters();
~PerformanceCounters();
@@ -1081,11 +1123,11 @@ public:
: BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
template <typename Op>
- BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure, Op rangeToN)
- : BigO(bigOName, mapRangeMeasure(rangeMeasure, rangeToN)) {}
+ BigO(std::string bigOName, RangeMeasure const& rangeMeasure, Op rangeToN)
+ : BigO(std::move(bigOName), mapRangeMeasure(rangeMeasure, rangeToN)) {}
BigO(char const* bigOName, RangeMeasure const& scaledRangeMeasure);
- BigO(std::string const& bigOName, RangeMeasure const& scaledRangeMeasure);
+ BigO(std::string bigOName, RangeMeasure const& scaledRangeMeasure);
ANKERL_NANOBENCH(NODISCARD) std::string const& name() const noexcept;
ANKERL_NANOBENCH(NODISCARD) double constant() const noexcept;
ANKERL_NANOBENCH(NODISCARD) double normalizedRootMeanSquare() const noexcept;
@@ -1127,7 +1169,7 @@ uint64_t Rng::operator()() noexcept {
ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined")
uint32_t Rng::bounded(uint32_t range) noexcept {
- uint64_t r32 = static_cast<uint32_t>(operator()());
+ uint64_t const r32 = static_cast<uint32_t>(operator()());
auto multiresult = r32 * range;
return static_cast<uint32_t>(multiresult >> 32U);
}
@@ -1136,18 +1178,23 @@ double Rng::uniform01() noexcept {
auto i = (UINT64_C(0x3ff) << 52U) | (operator()() >> 12U);
// can't use union in c++ here for type puning, it's undefined behavior.
// std::memcpy is optimized anyways.
- double d;
+ double d{};
std::memcpy(&d, &i, sizeof(double));
return d - 1.0;
}
template <typename Container>
void Rng::shuffle(Container& container) noexcept {
- auto size = static_cast<uint32_t>(container.size());
- for (auto i = size; i > 1U; --i) {
+ auto i = container.size();
+ while (i > 1U) {
using std::swap;
- auto p = bounded(i); // number in [0, i)
- swap(container[i - 1], container[p]);
+ auto n = operator()();
+ // using decltype(i) instead of size_t to be compatible to containers with 32bit index (see #80)
+ auto b1 = static_cast<decltype(i)>((static_cast<uint32_t>(n) * static_cast<uint64_t>(i)) >> 32U);
+ swap(container[--i], container[b1]);
+
+ auto b2 = static_cast<decltype(i)>(((n >> 32U) * static_cast<uint64_t>(i)) >> 32U);
+ swap(container[--i], container[b2]);
}
}
@@ -1165,11 +1212,11 @@ Bench& Bench::run(Op&& op) {
while (auto n = iterationLogic.numIters()) {
pc.beginMeasure();
- Clock::time_point before = Clock::now();
+ Clock::time_point const before = Clock::now();
while (n-- > 0) {
op();
}
- Clock::time_point after = Clock::now();
+ Clock::time_point const after = Clock::now();
pc.endMeasure();
pc.updateResults(iterationLogic.numIters());
iterationLogic.add(after - before, pc);
@@ -1270,7 +1317,6 @@ void doNotOptimizeAway(T const& val) {
# include <linux/perf_event.h>
# include <sys/ioctl.h>
# include <sys/syscall.h>
-# include <unistd.h>
# endif
// declarations ///////////////////////////////////////////////////////////////////////////////////
@@ -1436,31 +1482,37 @@ struct Node {
template <size_t N>
// NOLINTNEXTLINE(hicpp-avoid-c-arrays,modernize-avoid-c-arrays,cppcoreguidelines-avoid-c-arrays)
bool operator==(char const (&str)[N]) const noexcept {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-array-to-pointer-decay)
return static_cast<size_t>(std::distance(begin, end) + 1) == N && 0 == strncmp(str, begin, N - 1);
}
};
ANKERL_NANOBENCH(IGNORE_PADDED_POP)
+// NOLINTNEXTLINE(misc-no-recursion)
static std::vector<Node> parseMustacheTemplate(char const** tpl) {
std::vector<Node> nodes;
while (true) {
- auto begin = std::strstr(*tpl, "{{");
- auto end = begin;
+ auto const* begin = std::strstr(*tpl, "{{");
+ auto const* end = begin;
if (begin != nullptr) {
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
begin += 2;
end = std::strstr(begin, "}}");
}
if (begin == nullptr || end == nullptr) {
// nothing found, finish node
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
nodes.emplace_back(Node{*tpl, *tpl + std::strlen(*tpl), std::vector<Node>{}, Node::Type::content});
return nodes;
}
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
nodes.emplace_back(Node{*tpl, begin - 2, std::vector<Node>{}, Node::Type::content});
// we found a tag
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
*tpl = end + 2;
switch (*begin) {
case '/':
@@ -1468,10 +1520,12 @@ static std::vector<Node> parseMustacheTemplate(char const** tpl) {
return nodes;
case '#':
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::section});
break;
case '^':
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-bounds-pointer-arithmetic)
nodes.emplace_back(Node{begin + 1, end, parseMustacheTemplate(tpl), Node::Type::inverted_section});
break;
@@ -1484,8 +1538,8 @@ static std::vector<Node> parseMustacheTemplate(char const** tpl) {
static bool generateFirstLast(Node const& n, size_t idx, size_t size, std::ostream& out) {
ANKERL_NANOBENCH_LOG("n.type=" << static_cast<int>(n.type));
- bool matchFirst = n == "-first";
- bool matchLast = n == "-last";
+ bool const matchFirst = n == "-first";
+ bool const matchLast = n == "-last";
if (!matchFirst && !matchLast) {
return false;
}
@@ -1518,7 +1572,7 @@ static bool matchCmdArgs(std::string const& str, std::vector<std::string>& match
matchResult.emplace_back(str.substr(0, idxOpen));
// split by comma
- matchResult.emplace_back(std::string{});
+ matchResult.emplace_back();
for (size_t i = idxOpen + 1; i != idxClose; ++i) {
if (str[i] == ' ' || str[i] == '\t') {
// skip whitespace
@@ -1526,7 +1580,7 @@ static bool matchCmdArgs(std::string const& str, std::vector<std::string>& match
}
if (str[i] == ',') {
// got a comma => new string
- matchResult.emplace_back(std::string{});
+ matchResult.emplace_back();
continue;
}
// no whitespace no comma, append
@@ -1541,49 +1595,63 @@ static bool generateConfigTag(Node const& n, Config const& config, std::ostream&
if (n == "title") {
out << config.mBenchmarkTitle;
return true;
- } else if (n == "name") {
+ }
+ if (n == "name") {
out << config.mBenchmarkName;
return true;
- } else if (n == "unit") {
+ }
+ if (n == "unit") {
out << config.mUnit;
return true;
- } else if (n == "batch") {
+ }
+ if (n == "batch") {
out << config.mBatch;
return true;
- } else if (n == "complexityN") {
+ }
+ if (n == "complexityN") {
out << config.mComplexityN;
return true;
- } else if (n == "epochs") {
+ }
+ if (n == "epochs") {
out << config.mNumEpochs;
return true;
- } else if (n == "clockResolution") {
+ }
+ if (n == "clockResolution") {
out << d(detail::clockResolution());
return true;
- } else if (n == "clockResolutionMultiple") {
+ }
+ if (n == "clockResolutionMultiple") {
out << config.mClockResolutionMultiple;
return true;
- } else if (n == "maxEpochTime") {
+ }
+ if (n == "maxEpochTime") {
out << d(config.mMaxEpochTime);
return true;
- } else if (n == "minEpochTime") {
+ }
+ if (n == "minEpochTime") {
out << d(config.mMinEpochTime);
return true;
- } else if (n == "minEpochIterations") {
+ }
+ if (n == "minEpochIterations") {
out << config.mMinEpochIterations;
return true;
- } else if (n == "epochIterations") {
+ }
+ if (n == "epochIterations") {
out << config.mEpochIterations;
return true;
- } else if (n == "warmup") {
+ }
+ if (n == "warmup") {
out << config.mWarmup;
return true;
- } else if (n == "relative") {
+ }
+ if (n == "relative") {
out << config.mIsRelative;
return true;
}
return false;
}
+// NOLINTNEXTLINE(readability-function-cognitive-complexity)
static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostream& out) {
if (generateConfigTag(n, r.config(), out)) {
return out;
@@ -1596,6 +1664,10 @@ static std::ostream& generateResultTag(Node const& n, Result const& r, std::ostr
std::vector<std::string> matchResult;
if (matchCmdArgs(std::string(n.begin, n.end), matchResult)) {
if (matchResult.size() == 2) {
+ if (matchResult[0] == "context") {
+ return out << r.context(matchResult[1]);
+ }
+
auto m = Result::fromString(matchResult[1]);
if (m == Result::Measure::_size) {
return out << 0.0;
@@ -1712,7 +1784,7 @@ template <typename T>
T parseFile(std::string const& filename);
void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<std::string>& recommendations);
-void printStabilityInformationOnce(std::ostream* os);
+void printStabilityInformationOnce(std::ostream* outStream);
// remembers the last table settings used. When it changes, a new table header is automatically written for the new entry.
uint64_t& singletonHeaderHash() noexcept;
@@ -1779,13 +1851,13 @@ private:
};
// helper replacement for std::to_string of signed/unsigned numbers so we are locale independent
-std::string to_s(uint64_t s);
+std::string to_s(uint64_t n);
std::ostream& operator<<(std::ostream& os, Number const& n);
class MarkDownColumn {
public:
- MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val);
+ MarkDownColumn(int w, int prec, std::string tit, std::string suff, double val);
std::string title() const;
std::string separator() const;
std::string invalid() const;
@@ -1823,8 +1895,9 @@ std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode);
namespace ankerl {
namespace nanobench {
+// NOLINTNEXTLINE(readability-function-cognitive-complexity)
void render(char const* mustacheTemplate, std::vector<Result> const& results, std::ostream& out) {
- detail::fmt::StreamStateRestorer restorer(out);
+ detail::fmt::StreamStateRestorer const restorer(out);
out.precision(std::numeric_limits<double>::digits10);
auto nodes = templates::parseMustacheTemplate(&mustacheTemplate);
@@ -1905,7 +1978,7 @@ PerformanceCounters& performanceCounters() {
// Windows version of doNotOptimizeAway
// see https://github.com/google/benchmark/blob/master/include/benchmark/benchmark.h#L307
// see https://github.com/facebook/folly/blob/master/folly/Benchmark.h#L280
-// see https://learn.microsoft.com/en-us/cpp/preprocessor/optimize
+// see https://docs.microsoft.com/en-us/cpp/preprocessor/optimize
# if defined(_MSC_VER)
# pragma optimize("", off)
void doNotOptimizeAwaySink(void const*) {}
@@ -1914,7 +1987,7 @@ void doNotOptimizeAwaySink(void const*) {}
template <typename T>
T parseFile(std::string const& filename) {
- std::ifstream fin(filename);
+ std::ifstream fin(filename); // NOLINT(misc-const-correctness)
T num{};
fin >> num;
return num;
@@ -1925,20 +1998,20 @@ char const* getEnv(char const* name) {
# pragma warning(push)
# pragma warning(disable : 4996) // getenv': This function or variable may be unsafe.
# endif
- return std::getenv(name);
+ return std::getenv(name); // NOLINT(concurrency-mt-unsafe)
# if defined(_MSC_VER)
# pragma warning(pop)
# endif
}
bool isEndlessRunning(std::string const& name) {
- auto endless = getEnv("NANOBENCH_ENDLESS");
+ auto const* const endless = getEnv("NANOBENCH_ENDLESS");
return nullptr != endless && endless == name;
}
// True when environment variable NANOBENCH_SUPPRESS_WARNINGS is either not set at all, or set to "0"
bool isWarningsEnabled() {
- auto suppression = getEnv("NANOBENCH_SUPPRESS_WARNINGS");
+ auto const* const suppression = getEnv("NANOBENCH_SUPPRESS_WARNINGS");
return nullptr == suppression || suppression == std::string("0");
}
@@ -1946,11 +2019,11 @@ void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<
warnings.clear();
recommendations.clear();
- bool recommendCheckFlags = false;
-
# if defined(DEBUG)
warnings.emplace_back("DEBUG defined");
- recommendCheckFlags = true;
+ bool const recommendCheckFlags = true;
+# else
+ bool const recommendCheckFlags = false;
# endif
bool recommendPyPerf = false;
@@ -2000,7 +2073,7 @@ void gatherStabilityInformation(std::vector<std::string>& warnings, std::vector<
void printStabilityInformationOnce(std::ostream* outStream) {
static bool shouldPrint = true;
- if (shouldPrint && outStream && isWarningsEnabled()) {
+ if (shouldPrint && (nullptr != outStream) && isWarningsEnabled()) {
auto& os = *outStream;
shouldPrint = false;
std::vector<std::string> warnings;
@@ -2050,7 +2123,7 @@ Clock::duration calcClockResolution(size_t numEvaluations) noexcept {
// Calculates clock resolution once, and remembers the result
Clock::duration clockResolution() noexcept {
- static Clock::duration sResolution = calcClockResolution(20);
+ static Clock::duration const sResolution = calcClockResolution(20);
return sResolution;
}
@@ -2183,6 +2256,7 @@ struct IterationLogic::Impl {
<< ", mState=" << static_cast<int>(mState));
}
+ // NOLINTNEXTLINE(readability-function-cognitive-complexity)
void showResult(std::string const& errorMessage) const {
ANKERL_NANOBENCH_LOG(errorMessage);
@@ -2208,7 +2282,7 @@ struct IterationLogic::Impl {
rMedian / (mBench.timeUnit().count() * mBench.batch()));
columns.emplace_back(22, 2, mBench.unit() + "/s", "", rMedian <= 0.0 ? 0.0 : mBench.batch() / rMedian);
- double rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed);
+ double const rErrorMedian = mResult.medianAbsolutePercentError(Result::Measure::elapsed);
columns.emplace_back(10, 1, "err%", "%", rErrorMedian * 100.0);
double rInsMedian = -1.0;
@@ -2226,7 +2300,7 @@ struct IterationLogic::Impl {
columns.emplace_back(9, 3, "IPC", "", rCycMedian <= 0.0 ? 0.0 : rInsMedian / rCycMedian);
}
if (mBench.performanceCounters() && mResult.has(Result::Measure::branchinstructions)) {
- double rBraMedian = mResult.median(Result::Measure::branchinstructions);
+ double const rBraMedian = mResult.median(Result::Measure::branchinstructions);
columns.emplace_back(17, 2, "bra/" + mBench.unit(), "", rBraMedian / mBench.batch());
if (mResult.has(Result::Measure::branchmisses)) {
double p = 0.0;
@@ -2299,25 +2373,22 @@ struct IterationLogic::Impl {
return elapsed * 3 >= mTargetRuntimePerEpoch * 2;
}
- uint64_t mNumIters = 1;
- Bench const& mBench;
- std::chrono::nanoseconds mTargetRuntimePerEpoch{};
- Result mResult;
- Rng mRng{123};
- std::chrono::nanoseconds mTotalElapsed{};
- uint64_t mTotalNumIters = 0;
-
- State mState = State::upscaling_runtime;
+ uint64_t mNumIters = 1; // NOLINT(misc-non-private-member-variables-in-classes)
+ Bench const& mBench; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::nanoseconds mTargetRuntimePerEpoch{}; // NOLINT(misc-non-private-member-variables-in-classes)
+ Result mResult; // NOLINT(misc-non-private-member-variables-in-classes)
+ Rng mRng{123}; // NOLINT(misc-non-private-member-variables-in-classes)
+ std::chrono::nanoseconds mTotalElapsed{}; // NOLINT(misc-non-private-member-variables-in-classes)
+ uint64_t mTotalNumIters = 0; // NOLINT(misc-non-private-member-variables-in-classes)
+ State mState = State::upscaling_runtime; // NOLINT(misc-non-private-member-variables-in-classes)
};
ANKERL_NANOBENCH(IGNORE_PADDED_POP)
-IterationLogic::IterationLogic(Bench const& bench) noexcept
+IterationLogic::IterationLogic(Bench const& bench)
: mPimpl(new Impl(bench)) {}
IterationLogic::~IterationLogic() {
- if (mPimpl) {
- delete mPimpl;
- }
+ delete mPimpl;
}
uint64_t IterationLogic::numIters() const noexcept {
@@ -2344,11 +2415,16 @@ public:
, correctMeasuringOverhead(correctMeasuringOverhead_)
, correctLoopOverhead(correctLoopOverhead_) {}
- uint64_t* targetValue{};
- bool correctMeasuringOverhead{};
- bool correctLoopOverhead{};
+ uint64_t* targetValue{}; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool correctMeasuringOverhead{}; // NOLINT(misc-non-private-member-variables-in-classes)
+ bool correctLoopOverhead{}; // NOLINT(misc-non-private-member-variables-in-classes)
};
+ LinuxPerformanceCounters() = default;
+ LinuxPerformanceCounters(LinuxPerformanceCounters const&) = delete;
+ LinuxPerformanceCounters(LinuxPerformanceCounters&&) = delete;
+ LinuxPerformanceCounters& operator=(LinuxPerformanceCounters const&) = delete;
+ LinuxPerformanceCounters& operator=(LinuxPerformanceCounters&&) = delete;
~LinuxPerformanceCounters();
// quick operation
@@ -2370,13 +2446,13 @@ public:
return;
}
- // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg)
mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_RESET, PERF_IOC_FLAG_GROUP);
if (mHasError) {
return;
}
- // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg)
mHasError = -1 == ioctl(mFd, PERF_EVENT_IOC_ENABLE, PERF_IOC_FLAG_GROUP);
}
@@ -2385,7 +2461,7 @@ public:
return;
}
- // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg)
mHasError = (-1 == ioctl(mFd, PERF_EVENT_IOC_DISABLE, PERF_IOC_FLAG_GROUP));
if (mHasError) {
return;
@@ -2406,9 +2482,9 @@ public:
ANKERL_NANOBENCH_NO_SANITIZE("integer", "undefined")
static inline uint32_t mix(uint32_t x) noexcept {
- x ^= x << 13;
- x ^= x >> 17;
- x ^= x << 5;
+ x ^= x << 13U;
+ x ^= x >> 17U;
+ x ^= x << 5U;
return x;
}
@@ -2448,7 +2524,7 @@ public:
// marsaglia's xorshift: mov, sal/shr, xor. Times 3.
// This has the nice property that the compiler doesn't seem to be able to optimize multiple calls any further.
// see https://godbolt.org/z/49RVQ5
- uint64_t const numIters = 100000U + (std::random_device{}() & 3);
+ uint64_t const numIters = 100000U + (std::random_device{}() & 3U);
uint64_t n = numIters;
uint32_t x = 1234567;
@@ -2582,6 +2658,7 @@ bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target t
const unsigned long flags = 0;
# endif
+ // NOLINTNEXTLINE(cppcoreguidelines-pro-type-vararg)
auto fd = static_cast<int>(syscall(__NR_perf_event_open, &pea, pid, cpu, mFd, flags));
if (-1 == fd) {
return false;
@@ -2591,7 +2668,7 @@ bool LinuxPerformanceCounters::monitor(uint32_t type, uint64_t eventid, Target t
mFd = fd;
}
uint64_t id = 0;
- // NOLINTNEXTLINE(hicpp-signed-bitwise)
+ // NOLINTNEXTLINE(hicpp-signed-bitwise,cppcoreguidelines-pro-type-vararg)
if (-1 == ioctl(fd, PERF_EVENT_IOC_ID, &id)) {
// couldn't get id
return false;
@@ -2639,9 +2716,8 @@ PerformanceCounters::PerformanceCounters()
}
PerformanceCounters::~PerformanceCounters() {
- if (nullptr != mPc) {
- delete mPc;
- }
+ // no need to check for nullptr, delete nullptr has no effect
+ delete mPc;
}
void PerformanceCounters::beginMeasure() {
@@ -2721,7 +2797,7 @@ Number::Number(int width, int precision, double value)
, mValue(value) {}
std::ostream& Number::write(std::ostream& os) const {
- StreamStateRestorer restorer(os);
+ StreamStateRestorer const restorer(os);
os.imbue(std::locale(os.getloc(), new NumSep(',')));
os << std::setw(mWidth) << std::setprecision(mPrecision) << std::fixed << mValue;
return os;
@@ -2747,11 +2823,11 @@ std::ostream& operator<<(std::ostream& os, Number const& n) {
return n.write(os);
}
-MarkDownColumn::MarkDownColumn(int w, int prec, std::string const& tit, std::string const& suff, double val)
+MarkDownColumn::MarkDownColumn(int w, int prec, std::string tit, std::string suff, double val)
: mWidth(w)
, mPrecision(prec)
- , mTitle(tit)
- , mSuffix(suff)
+ , mTitle(std::move(tit))
+ , mSuffix(std::move(suff))
, mValue(val) {}
std::string MarkDownColumn::title() const {
@@ -2785,7 +2861,7 @@ std::string MarkDownColumn::value() const {
MarkDownCode::MarkDownCode(std::string const& what) {
mWhat.reserve(what.size() + 2);
mWhat.push_back('`');
- for (char c : what) {
+ for (char const c : what) {
mWhat.push_back(c);
if ('`' == c) {
mWhat.push_back('`');
@@ -2808,14 +2884,14 @@ std::ostream& operator<<(std::ostream& os, MarkDownCode const& mdCode) {
Config::Config() = default;
Config::~Config() = default;
Config& Config::operator=(Config const&) = default;
-Config& Config::operator=(Config&&) = default;
+Config& Config::operator=(Config&&) noexcept = default;
Config::Config(Config const&) = default;
Config::Config(Config&&) noexcept = default;
// provide implementation here so it's only generated once
Result::~Result() = default;
Result& Result::operator=(Result const&) = default;
-Result& Result::operator=(Result&&) = default;
+Result& Result::operator=(Result&&) noexcept = default;
Result::Result(Result const&) = default;
Result::Result(Result&&) noexcept = default;
@@ -2827,15 +2903,15 @@ inline constexpr typename std::underlying_type<T>::type u(T val) noexcept {
} // namespace detail
// Result returned after a benchmark has finished. Can be used as a baseline for relative().
-Result::Result(Config const& benchmarkConfig)
- : mConfig(benchmarkConfig)
+Result::Result(Config benchmarkConfig)
+ : mConfig(std::move(benchmarkConfig))
, mNameToMeasurements{detail::u(Result::Measure::_size)} {}
void Result::add(Clock::duration totalElapsed, uint64_t iters, detail::PerformanceCounters const& pc) {
using detail::d;
using detail::u;
- double dIters = d(iters);
+ double const dIters = d(iters);
mNameToMeasurements[u(Result::Measure::iterations)].push_back(dIters);
mNameToMeasurements[u(Result::Measure::elapsed)].push_back(d(totalElapsed) / dIters);
@@ -2987,27 +3063,41 @@ double Result::maximum(Measure m) const noexcept {
return *std::max_element(data.begin(), data.end());
}
+std::string const& Result::context(char const* variableName) const {
+ return mConfig.mContext.at(variableName);
+}
+
+std::string const& Result::context(std::string const& variableName) const {
+ return mConfig.mContext.at(variableName);
+}
+
Result::Measure Result::fromString(std::string const& str) {
if (str == "elapsed") {
return Measure::elapsed;
- } else if (str == "iterations") {
+ }
+ if (str == "iterations") {
return Measure::iterations;
- } else if (str == "pagefaults") {
+ }
+ if (str == "pagefaults") {
return Measure::pagefaults;
- } else if (str == "cpucycles") {
+ }
+ if (str == "cpucycles") {
return Measure::cpucycles;
- } else if (str == "contextswitches") {
+ }
+ if (str == "contextswitches") {
return Measure::contextswitches;
- } else if (str == "instructions") {
+ }
+ if (str == "instructions") {
return Measure::instructions;
- } else if (str == "branchinstructions") {
+ }
+ if (str == "branchinstructions") {
return Measure::branchinstructions;
- } else if (str == "branchmisses") {
+ }
+ if (str == "branchmisses") {
return Measure::branchmisses;
- } else {
- // not found, return _size
- return Measure::_size;
}
+ // not found, return _size
+ return Measure::_size;
}
// Configuration of a microbenchmark.
@@ -3015,8 +3105,8 @@ Bench::Bench() {
mConfig.mOut = &std::cout;
}
-Bench::Bench(Bench&&) = default;
-Bench& Bench::operator=(Bench&&) = default;
+Bench::Bench(Bench&&) noexcept = default;
+Bench& Bench::operator=(Bench&&) noexcept = default;
Bench::Bench(Bench const&) = default;
Bench& Bench::operator=(Bench const&) = default;
Bench::~Bench() noexcept = default;
@@ -3114,6 +3204,21 @@ std::string const& Bench::name() const noexcept {
return mConfig.mBenchmarkName;
}
+Bench& Bench::context(char const* variableName, char const* variableValue) {
+ mConfig.mContext[variableName] = variableValue;
+ return *this;
+}
+
+Bench& Bench::context(std::string const& variableName, std::string const& variableValue) {
+ mConfig.mContext[variableName] = variableValue;
+ return *this;
+}
+
+Bench& Bench::clearContext() {
+ mConfig.mContext.clear();
+ return *this;
+}
+
// Number of epochs to evaluate. The reported result will be the median of evaluation of each epoch.
Bench& Bench::epochs(size_t numEpochs) noexcept {
mConfig.mNumEpochs = numEpochs;
@@ -3295,27 +3400,27 @@ BigO::RangeMeasure BigO::collectRangeMeasure(std::vector<Result> const& results)
return rangeMeasure;
}
-BigO::BigO(std::string const& bigOName, RangeMeasure const& rangeMeasure)
- : mName(bigOName) {
+BigO::BigO(std::string bigOName, RangeMeasure const& rangeMeasure)
+ : mName(std::move(bigOName)) {
// estimate the constant factor
double sumRangeMeasure = 0.0;
double sumRangeRange = 0.0;
- for (size_t i = 0; i < rangeMeasure.size(); ++i) {
- sumRangeMeasure += rangeMeasure[i].first * rangeMeasure[i].second;
- sumRangeRange += rangeMeasure[i].first * rangeMeasure[i].first;
+ for (const auto& rm : rangeMeasure) {
+ sumRangeMeasure += rm.first * rm.second;
+ sumRangeRange += rm.first * rm.first;
}
mConstant = sumRangeMeasure / sumRangeRange;
// calculate root mean square
double err = 0.0;
double sumMeasure = 0.0;
- for (size_t i = 0; i < rangeMeasure.size(); ++i) {
- auto diff = mConstant * rangeMeasure[i].first - rangeMeasure[i].second;
+ for (const auto& rm : rangeMeasure) {
+ auto diff = mConstant * rm.first - rm.second;
err += diff * diff;
- sumMeasure += rangeMeasure[i].second;
+ sumMeasure += rm.second;
}
auto n = static_cast<double>(rangeMeasure.size());
@@ -3347,7 +3452,7 @@ std::ostream& operator<<(std::ostream& os, BigO const& bigO) {
}
std::ostream& operator<<(std::ostream& os, std::vector<ankerl::nanobench::BigO> const& bigOs) {
- detail::fmt::StreamStateRestorer restorer(os);
+ detail::fmt::StreamStateRestorer const restorer(os);
os << std::endl << "| coefficient | err% | complexity" << std::endl << "|--------------:|-------:|------------" << std::endl;
for (auto const& bigO : bigOs) {
os << "|" << std::setw(14) << std::setprecision(7) << std::scientific << bigO.constant() << " ";
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index e6e33007d5..df8fb7cece 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -55,7 +55,10 @@ static constexpr int DEFAULT_WAIT_CLIENT_TIMEOUT = 0;
static const bool DEFAULT_NAMED=false;
static const int CONTINUE_EXECUTION=-1;
static constexpr int8_t UNKNOWN_NETWORK{-1};
-static constexpr std::array NETWORKS{"ipv4", "ipv6", "onion", "i2p", "cjdns"};
+// See GetNetworkName() in netbase.cpp
+static constexpr std::array NETWORKS{"not_publicly_routable", "ipv4", "ipv6", "onion", "i2p", "cjdns", "internal"};
+static constexpr std::array NETWORK_SHORT_NAMES{"npr", "ipv4", "ipv6", "onion", "i2p", "cjdns", "int"};
+static constexpr std::array UNREACHABLE_NETWORK_IDS{/*not_publicly_routable*/0, /*internal*/6};
/** Default number of blocks to generate for RPC generatetoaddress. */
static const std::string DEFAULT_NBLOCKS = "1";
@@ -289,7 +292,7 @@ public:
// Prepare result to return to user.
UniValue result{UniValue::VOBJ}, addresses{UniValue::VOBJ};
uint64_t total{0}; // Total address count
- for (size_t i = 0; i < NETWORKS.size(); ++i) {
+ for (size_t i = 1; i < NETWORKS.size() - 1; ++i) {
addresses.pushKV(NETWORKS[i], counts.at(i));
total += counts.at(i);
}
@@ -506,7 +509,7 @@ public:
const bool is_addr_relay_enabled{peer["addr_relay_enabled"].isNull() ? false : peer["addr_relay_enabled"].get_bool()};
const bool is_bip152_hb_from{peer["bip152_hb_from"].get_bool()};
const bool is_bip152_hb_to{peer["bip152_hb_to"].get_bool()};
- m_peers.push_back({addr, sub_version, conn_type, network, age, min_ping, ping, addr_processed, addr_rate_limited, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_addr_relay_enabled, is_bip152_hb_from, is_bip152_hb_to, is_outbound, is_tx_relay});
+ m_peers.push_back({addr, sub_version, conn_type, NETWORK_SHORT_NAMES[network_id], age, min_ping, ping, addr_processed, addr_rate_limited, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_addr_relay_enabled, is_bip152_hb_from, is_bip152_hb_to, is_outbound, is_tx_relay});
m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length);
m_max_addr_processed_length = std::max(ToString(addr_processed).length(), m_max_addr_processed_length);
m_max_addr_rate_limited_length = std::max(ToString(addr_rate_limited).length(), m_max_addr_rate_limited_length);
@@ -571,6 +574,13 @@ public:
reachable_networks.push_back(network_id);
}
};
+
+ for (const size_t network_id : UNREACHABLE_NETWORK_IDS) {
+ if (m_counts.at(2).at(network_id) == 0) continue;
+ result += strprintf("%8s", NETWORK_SHORT_NAMES.at(network_id)); // column header
+ reachable_networks.push_back(network_id);
+ }
+
result += " total block";
if (m_manual_peers_count) result += " manual";
@@ -636,7 +646,7 @@ public:
" \"manual\" - peer we manually added using RPC addnode or the -addnode/-connect config options\n"
" \"feeler\" - short-lived connection for testing addresses\n"
" \"addr\" - address fetch; short-lived connection for requesting addresses\n"
- " net Network the peer connected through (\"ipv4\", \"ipv6\", \"onion\", \"i2p\", or \"cjdns\")\n"
+ " net Network the peer connected through (\"ipv4\", \"ipv6\", \"onion\", \"i2p\", \"cjdns\", or \"npr\" (not publicly routable))\n"
" mping Minimum observed ping time, in milliseconds (ms)\n"
" ping Last observed ping time, in milliseconds (ms)\n"
" send Time since last message sent to the peer, in seconds\n"
diff --git a/src/blockencodings.h b/src/blockencodings.h
index 7207ff1ae2..afdfa426f1 100644
--- a/src/blockencodings.h
+++ b/src/blockencodings.h
@@ -135,7 +135,7 @@ protected:
public:
CBlockHeader header;
- // Can be overriden for testing
+ // Can be overridden for testing
using CheckBlockFn = std::function<bool(const CBlock&, BlockValidationState&, const Consensus::Params&, bool, bool)>;
CheckBlockFn m_check_block_mock{nullptr};
diff --git a/src/coins.cpp b/src/coins.cpp
index e98bf816ab..5a6ae525a7 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -32,7 +32,10 @@ bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock,
std::unique_ptr<CCoinsViewCursor> CCoinsViewBacked::Cursor() const { return base->Cursor(); }
size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); }
-CCoinsViewCache::CCoinsViewCache(CCoinsView* baseIn) : CCoinsViewBacked(baseIn) {}
+CCoinsViewCache::CCoinsViewCache(CCoinsView* baseIn, bool deterministic) :
+ CCoinsViewBacked(baseIn), m_deterministic(deterministic),
+ cacheCoins(0, SaltedOutpointHasher(/*deterministic=*/deterministic))
+{}
size_t CCoinsViewCache::DynamicMemoryUsage() const {
return memusage::DynamicUsage(cacheCoins) + cachedCoinsUsage;
@@ -311,7 +314,24 @@ void CCoinsViewCache::ReallocateCache()
// Cache should be empty when we're calling this.
assert(cacheCoins.size() == 0);
cacheCoins.~CCoinsMap();
- ::new (&cacheCoins) CCoinsMap();
+ ::new (&cacheCoins) CCoinsMap(0, SaltedOutpointHasher(/*deterministic=*/m_deterministic));
+}
+
+void CCoinsViewCache::SanityCheck() const
+{
+ size_t recomputed_usage = 0;
+ for (const auto& [_, entry] : cacheCoins) {
+ unsigned attr = 0;
+ if (entry.flags & CCoinsCacheEntry::DIRTY) attr |= 1;
+ if (entry.flags & CCoinsCacheEntry::FRESH) attr |= 2;
+ if (entry.coin.IsSpent()) attr |= 4;
+ // Only 5 combinations are possible.
+ assert(attr != 2 && attr != 4 && attr != 7);
+
+ // Recompute cachedCoinsUsage.
+ recomputed_usage += entry.coin.DynamicMemoryUsage();
+ }
+ assert(recomputed_usage == cachedCoinsUsage);
}
static const size_t MIN_TRANSACTION_OUTPUT_WEIGHT = WITNESS_SCALE_FACTOR * ::GetSerializeSize(CTxOut(), PROTOCOL_VERSION);
diff --git a/src/coins.h b/src/coins.h
index 710b8c7c83..dd336b210a 100644
--- a/src/coins.h
+++ b/src/coins.h
@@ -211,6 +211,9 @@ public:
/** CCoinsView that adds a memory cache for transactions to another CCoinsView */
class CCoinsViewCache : public CCoinsViewBacked
{
+private:
+ const bool m_deterministic;
+
protected:
/**
* Make mutable so that we can "fill the cache" even from Get-methods
@@ -223,7 +226,7 @@ protected:
mutable size_t cachedCoinsUsage{0};
public:
- CCoinsViewCache(CCoinsView *baseIn);
+ CCoinsViewCache(CCoinsView *baseIn, bool deterministic = false);
/**
* By deleting the copy constructor, we prevent accidentally using it when one intends to create a cache on top of a base cache.
@@ -320,6 +323,9 @@ public:
//! See: https://stackoverflow.com/questions/42114044/how-to-release-unordered-map-memory
void ReallocateCache();
+ //! Run an internal sanity check on the cache data structure. */
+ void SanityCheck() const;
+
private:
/**
* @note this is marked const, but may actually append to `cacheCoins`, increasing
diff --git a/src/consensus/params.h b/src/consensus/params.h
index be92556611..25f53eb620 100644
--- a/src/consensus/params.h
+++ b/src/consensus/params.h
@@ -11,6 +11,7 @@
#include <chrono>
#include <limits>
#include <map>
+#include <vector>
namespace Consensus {
diff --git a/src/core_io.h b/src/core_io.h
index 33e1ad82fc..997f3bfd5b 100644
--- a/src/core_io.h
+++ b/src/core_io.h
@@ -15,6 +15,7 @@ class CBlockHeader;
class CScript;
class CTransaction;
struct CMutableTransaction;
+class SigningProvider;
class uint256;
class UniValue;
class CTxUndo;
@@ -52,7 +53,7 @@ UniValue ValueFromAmount(const CAmount amount);
std::string FormatScript(const CScript& script);
std::string EncodeHexTx(const CTransaction& tx, const int serializeFlags = 0);
std::string SighashToStr(unsigned char sighash_type);
-void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex = true, bool include_address = false);
+void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex = true, bool include_address = false, const SigningProvider* provider = nullptr);
void TxToUniv(const CTransaction& tx, const uint256& block_hash, UniValue& entry, bool include_hex = true, int serialize_flags = 0, const CTxUndo* txundo = nullptr, TxVerbosity verbosity = TxVerbosity::SHOW_DETAILS);
#endif // BITCOIN_CORE_IO_H
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 300bd30e43..b0e3b0b3c4 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -147,13 +147,13 @@ std::string EncodeHexTx(const CTransaction& tx, const int serializeFlags)
return HexStr(ssTx);
}
-void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex, bool include_address)
+void ScriptToUniv(const CScript& script, UniValue& out, bool include_hex, bool include_address, const SigningProvider* provider)
{
CTxDestination address;
out.pushKV("asm", ScriptToAsmStr(script));
if (include_address) {
- out.pushKV("desc", InferDescriptor(script, DUMMY_SIGNING_PROVIDER)->ToString());
+ out.pushKV("desc", InferDescriptor(script, provider ? *provider : DUMMY_SIGNING_PROVIDER)->ToString());
}
if (include_hex) {
out.pushKV("hex", HexStr(script));
diff --git a/src/crypto/chacha20.cpp b/src/crypto/chacha20.cpp
index 25d7baa8cc..6934cef163 100644
--- a/src/crypto/chacha20.cpp
+++ b/src/crypto/chacha20.cpp
@@ -8,6 +8,7 @@
#include <crypto/common.h>
#include <crypto/chacha20.h>
+#include <algorithm>
#include <string.h>
constexpr static inline uint32_t rotl32(uint32_t v, int c) { return (v << c) | (v >> (32 - c)); }
@@ -20,95 +21,69 @@ constexpr static inline uint32_t rotl32(uint32_t v, int c) { return (v << c) | (
#define REPEAT10(a) do { {a}; {a}; {a}; {a}; {a}; {a}; {a}; {a}; {a}; {a}; } while(0)
-static const unsigned char sigma[] = "expand 32-byte k";
-static const unsigned char tau[] = "expand 16-byte k";
-
-void ChaCha20::SetKey(const unsigned char* k, size_t keylen)
+void ChaCha20Aligned::SetKey32(const unsigned char* k)
{
- const unsigned char *constants;
-
- input[4] = ReadLE32(k + 0);
- input[5] = ReadLE32(k + 4);
- input[6] = ReadLE32(k + 8);
- input[7] = ReadLE32(k + 12);
- if (keylen == 32) { /* recommended */
- k += 16;
- constants = sigma;
- } else { /* keylen == 16 */
- constants = tau;
- }
- input[8] = ReadLE32(k + 0);
- input[9] = ReadLE32(k + 4);
- input[10] = ReadLE32(k + 8);
- input[11] = ReadLE32(k + 12);
- input[0] = ReadLE32(constants + 0);
- input[1] = ReadLE32(constants + 4);
- input[2] = ReadLE32(constants + 8);
- input[3] = ReadLE32(constants + 12);
- input[12] = 0;
- input[13] = 0;
- input[14] = 0;
- input[15] = 0;
+ input[0] = ReadLE32(k + 0);
+ input[1] = ReadLE32(k + 4);
+ input[2] = ReadLE32(k + 8);
+ input[3] = ReadLE32(k + 12);
+ input[4] = ReadLE32(k + 16);
+ input[5] = ReadLE32(k + 20);
+ input[6] = ReadLE32(k + 24);
+ input[7] = ReadLE32(k + 28);
+ input[8] = 0;
+ input[9] = 0;
+ input[10] = 0;
+ input[11] = 0;
}
-ChaCha20::ChaCha20()
+ChaCha20Aligned::ChaCha20Aligned()
{
memset(input, 0, sizeof(input));
}
-ChaCha20::ChaCha20(const unsigned char* k, size_t keylen)
+ChaCha20Aligned::ChaCha20Aligned(const unsigned char* key32)
{
- SetKey(k, keylen);
+ SetKey32(key32);
}
-void ChaCha20::SetIV(uint64_t iv)
+void ChaCha20Aligned::SetIV(uint64_t iv)
{
- input[14] = iv;
- input[15] = iv >> 32;
+ input[10] = iv;
+ input[11] = iv >> 32;
}
-void ChaCha20::Seek(uint64_t pos)
+void ChaCha20Aligned::Seek64(uint64_t pos)
{
- input[12] = pos;
- input[13] = pos >> 32;
+ input[8] = pos;
+ input[9] = pos >> 32;
}
-void ChaCha20::Keystream(unsigned char* c, size_t bytes)
+inline void ChaCha20Aligned::Keystream64(unsigned char* c, size_t blocks)
{
uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
- uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
- unsigned char *ctarget = nullptr;
- unsigned char tmp[64];
- unsigned int i;
-
- if (!bytes) return;
-
- j0 = input[0];
- j1 = input[1];
- j2 = input[2];
- j3 = input[3];
- j4 = input[4];
- j5 = input[5];
- j6 = input[6];
- j7 = input[7];
- j8 = input[8];
- j9 = input[9];
- j10 = input[10];
- j11 = input[11];
- j12 = input[12];
- j13 = input[13];
- j14 = input[14];
- j15 = input[15];
+ uint32_t j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
+
+ if (!blocks) return;
+
+ j4 = input[0];
+ j5 = input[1];
+ j6 = input[2];
+ j7 = input[3];
+ j8 = input[4];
+ j9 = input[5];
+ j10 = input[6];
+ j11 = input[7];
+ j12 = input[8];
+ j13 = input[9];
+ j14 = input[10];
+ j15 = input[11];
for (;;) {
- if (bytes < 64) {
- ctarget = c;
- c = tmp;
- }
- x0 = j0;
- x1 = j1;
- x2 = j2;
- x3 = j3;
+ x0 = 0x61707865;
+ x1 = 0x3320646e;
+ x2 = 0x79622d32;
+ x3 = 0x6b206574;
x4 = j4;
x5 = j5;
x6 = j6;
@@ -134,10 +109,10 @@ void ChaCha20::Keystream(unsigned char* c, size_t bytes)
QUARTERROUND( x3, x4, x9,x14);
);
- x0 += j0;
- x1 += j1;
- x2 += j2;
- x3 += j3;
+ x0 += 0x61707865;
+ x1 += 0x3320646e;
+ x2 += 0x79622d32;
+ x3 += 0x6b206574;
x4 += j4;
x5 += j5;
x6 += j6;
@@ -171,59 +146,41 @@ void ChaCha20::Keystream(unsigned char* c, size_t bytes)
WriteLE32(c + 56, x14);
WriteLE32(c + 60, x15);
- if (bytes <= 64) {
- if (bytes < 64) {
- for (i = 0;i < bytes;++i) ctarget[i] = c[i];
- }
- input[12] = j12;
- input[13] = j13;
+ if (blocks == 1) {
+ input[8] = j12;
+ input[9] = j13;
return;
}
- bytes -= 64;
+ blocks -= 1;
c += 64;
}
}
-void ChaCha20::Crypt(const unsigned char* m, unsigned char* c, size_t bytes)
+inline void ChaCha20Aligned::Crypt64(const unsigned char* m, unsigned char* c, size_t blocks)
{
uint32_t x0, x1, x2, x3, x4, x5, x6, x7, x8, x9, x10, x11, x12, x13, x14, x15;
- uint32_t j0, j1, j2, j3, j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
- unsigned char *ctarget = nullptr;
- unsigned char tmp[64];
- unsigned int i;
-
- if (!bytes) return;
-
- j0 = input[0];
- j1 = input[1];
- j2 = input[2];
- j3 = input[3];
- j4 = input[4];
- j5 = input[5];
- j6 = input[6];
- j7 = input[7];
- j8 = input[8];
- j9 = input[9];
- j10 = input[10];
- j11 = input[11];
- j12 = input[12];
- j13 = input[13];
- j14 = input[14];
- j15 = input[15];
+ uint32_t j4, j5, j6, j7, j8, j9, j10, j11, j12, j13, j14, j15;
+
+ if (!blocks) return;
+
+ j4 = input[0];
+ j5 = input[1];
+ j6 = input[2];
+ j7 = input[3];
+ j8 = input[4];
+ j9 = input[5];
+ j10 = input[6];
+ j11 = input[7];
+ j12 = input[8];
+ j13 = input[9];
+ j14 = input[10];
+ j15 = input[11];
for (;;) {
- if (bytes < 64) {
- // if m has fewer than 64 bytes available, copy m to tmp and
- // read from tmp instead
- for (i = 0;i < bytes;++i) tmp[i] = m[i];
- m = tmp;
- ctarget = c;
- c = tmp;
- }
- x0 = j0;
- x1 = j1;
- x2 = j2;
- x3 = j3;
+ x0 = 0x61707865;
+ x1 = 0x3320646e;
+ x2 = 0x79622d32;
+ x3 = 0x6b206574;
x4 = j4;
x5 = j5;
x6 = j6;
@@ -249,10 +206,10 @@ void ChaCha20::Crypt(const unsigned char* m, unsigned char* c, size_t bytes)
QUARTERROUND( x3, x4, x9,x14);
);
- x0 += j0;
- x1 += j1;
- x2 += j2;
- x3 += j3;
+ x0 += 0x61707865;
+ x1 += 0x3320646e;
+ x2 += 0x79622d32;
+ x3 += 0x6b206574;
x4 += j4;
x5 += j5;
x6 += j6;
@@ -303,16 +260,65 @@ void ChaCha20::Crypt(const unsigned char* m, unsigned char* c, size_t bytes)
WriteLE32(c + 56, x14);
WriteLE32(c + 60, x15);
- if (bytes <= 64) {
- if (bytes < 64) {
- for (i = 0;i < bytes;++i) ctarget[i] = c[i];
- }
- input[12] = j12;
- input[13] = j13;
+ if (blocks == 1) {
+ input[8] = j12;
+ input[9] = j13;
return;
}
- bytes -= 64;
+ blocks -= 1;
c += 64;
m += 64;
}
}
+
+void ChaCha20::Keystream(unsigned char* c, size_t bytes)
+{
+ if (!bytes) return;
+ if (m_bufleft) {
+ unsigned reuse = std::min<size_t>(m_bufleft, bytes);
+ memcpy(c, m_buffer + 64 - m_bufleft, reuse);
+ m_bufleft -= reuse;
+ bytes -= reuse;
+ c += reuse;
+ }
+ if (bytes >= 64) {
+ size_t blocks = bytes / 64;
+ m_aligned.Keystream64(c, blocks);
+ c += blocks * 64;
+ bytes -= blocks * 64;
+ }
+ if (bytes) {
+ m_aligned.Keystream64(m_buffer, 1);
+ memcpy(c, m_buffer, bytes);
+ m_bufleft = 64 - bytes;
+ }
+}
+
+void ChaCha20::Crypt(const unsigned char* m, unsigned char* c, size_t bytes)
+{
+ if (!bytes) return;
+ if (m_bufleft) {
+ unsigned reuse = std::min<size_t>(m_bufleft, bytes);
+ for (unsigned i = 0; i < reuse; i++) {
+ c[i] = m[i] ^ m_buffer[64 - m_bufleft + i];
+ }
+ m_bufleft -= reuse;
+ bytes -= reuse;
+ c += reuse;
+ m += reuse;
+ }
+ if (bytes >= 64) {
+ size_t blocks = bytes / 64;
+ m_aligned.Crypt64(m, c, blocks);
+ c += blocks * 64;
+ m += blocks * 64;
+ bytes -= blocks * 64;
+ }
+ if (bytes) {
+ m_aligned.Keystream64(m_buffer, 1);
+ for (unsigned i = 0; i < bytes; i++) {
+ c[i] = m[i] ^ m_buffer[i];
+ }
+ m_bufleft = 64 - bytes;
+ }
+}
diff --git a/src/crypto/chacha20.h b/src/crypto/chacha20.h
index 624c083191..b286ef59fe 100644
--- a/src/crypto/chacha20.h
+++ b/src/crypto/chacha20.h
@@ -8,19 +8,69 @@
#include <cstdlib>
#include <stdint.h>
-/** A class for ChaCha20 256-bit stream cipher developed by Daniel J. Bernstein
- https://cr.yp.to/chacha/chacha-20080128.pdf */
+// classes for ChaCha20 256-bit stream cipher developed by Daniel J. Bernstein
+// https://cr.yp.to/chacha/chacha-20080128.pdf */
+
+/** ChaCha20 cipher that only operates on multiples of 64 bytes. */
+class ChaCha20Aligned
+{
+private:
+ uint32_t input[12];
+
+public:
+ ChaCha20Aligned();
+
+ /** Initialize a cipher with specified 32-byte key. */
+ ChaCha20Aligned(const unsigned char* key32);
+
+ /** set 32-byte key. */
+ void SetKey32(const unsigned char* key32);
+
+ /** set the 64-bit nonce. */
+ void SetIV(uint64_t iv);
+
+ /** set the 64bit block counter (pos seeks to byte position 64*pos). */
+ void Seek64(uint64_t pos);
+
+ /** outputs the keystream of size <64*blocks> into <c> */
+ void Keystream64(unsigned char* c, size_t blocks);
+
+ /** enciphers the message <input> of length <64*blocks> and write the enciphered representation into <output>
+ * Used for encryption and decryption (XOR)
+ */
+ void Crypt64(const unsigned char* input, unsigned char* output, size_t blocks);
+};
+
+/** Unrestricted ChaCha20 cipher. */
class ChaCha20
{
private:
- uint32_t input[16];
+ ChaCha20Aligned m_aligned;
+ unsigned char m_buffer[64] = {0};
+ unsigned m_bufleft{0};
public:
- ChaCha20();
- ChaCha20(const unsigned char* key, size_t keylen);
- void SetKey(const unsigned char* key, size_t keylen); //!< set key with flexible keylength; 256bit recommended */
- void SetIV(uint64_t iv); // set the 64bit nonce
- void Seek(uint64_t pos); // set the 64bit block counter
+ ChaCha20() = default;
+
+ /** Initialize a cipher with specified 32-byte key. */
+ ChaCha20(const unsigned char* key32) : m_aligned(key32) {}
+
+ /** set 32-byte key. */
+ void SetKey32(const unsigned char* key32)
+ {
+ m_aligned.SetKey32(key32);
+ m_bufleft = 0;
+ }
+
+ /** set the 64-bit nonce. */
+ void SetIV(uint64_t iv) { m_aligned.SetIV(iv); }
+
+ /** set the 64bit block counter (pos seeks to byte position 64*pos). */
+ void Seek64(uint64_t pos)
+ {
+ m_aligned.Seek64(pos);
+ m_bufleft = 0;
+ }
/** outputs the keystream of size <bytes> into <c> */
void Keystream(unsigned char* c, size_t bytes);
diff --git a/src/crypto/chacha_poly_aead.cpp b/src/crypto/chacha_poly_aead.cpp
index 6511f46adc..119ad6902f 100644
--- a/src/crypto/chacha_poly_aead.cpp
+++ b/src/crypto/chacha_poly_aead.cpp
@@ -36,8 +36,9 @@ ChaCha20Poly1305AEAD::ChaCha20Poly1305AEAD(const unsigned char* K_1, size_t K_1_
assert(K_1_len == CHACHA20_POLY1305_AEAD_KEY_LEN);
assert(K_2_len == CHACHA20_POLY1305_AEAD_KEY_LEN);
- m_chacha_header.SetKey(K_1, CHACHA20_POLY1305_AEAD_KEY_LEN);
- m_chacha_main.SetKey(K_2, CHACHA20_POLY1305_AEAD_KEY_LEN);
+ static_assert(CHACHA20_POLY1305_AEAD_KEY_LEN == 32);
+ m_chacha_header.SetKey32(K_1);
+ m_chacha_main.SetKey32(K_2);
// set the cached sequence number to uint64 max which hints for an unset cache.
// we can't hit uint64 max since the rekey rule (which resets the sequence number) is 1GB
@@ -62,7 +63,7 @@ bool ChaCha20Poly1305AEAD::Crypt(uint64_t seqnr_payload, uint64_t seqnr_aad, int
// block counter 0 for the poly1305 key
// use lower 32bytes for the poly1305 key
// (throws away 32 unused bytes (upper 32) from this ChaCha20 round)
- m_chacha_main.Seek(0);
+ m_chacha_main.Seek64(0);
m_chacha_main.Crypt(poly_key, poly_key, sizeof(poly_key));
// if decrypting, verify the tag prior to decryption
@@ -85,7 +86,7 @@ bool ChaCha20Poly1305AEAD::Crypt(uint64_t seqnr_payload, uint64_t seqnr_aad, int
if (m_cached_aad_seqnr != seqnr_aad) {
m_cached_aad_seqnr = seqnr_aad;
m_chacha_header.SetIV(seqnr_aad);
- m_chacha_header.Seek(0);
+ m_chacha_header.Seek64(0);
m_chacha_header.Keystream(m_aad_keystream_buffer, CHACHA20_ROUND_OUTPUT);
}
// crypt the AAD (3 bytes message length) with given position in AAD cipher instance keystream
@@ -94,7 +95,7 @@ bool ChaCha20Poly1305AEAD::Crypt(uint64_t seqnr_payload, uint64_t seqnr_aad, int
dest[2] = src[2] ^ m_aad_keystream_buffer[aad_pos + 2];
// Set the playload ChaCha instance block counter to 1 and crypt the payload
- m_chacha_main.Seek(1);
+ m_chacha_main.Seek64(1);
m_chacha_main.Crypt(src + CHACHA20_POLY1305_AEAD_AAD_LEN, dest + CHACHA20_POLY1305_AEAD_AAD_LEN, src_len - CHACHA20_POLY1305_AEAD_AAD_LEN);
// If encrypting, calculate and append tag
@@ -117,7 +118,7 @@ bool ChaCha20Poly1305AEAD::GetLength(uint32_t* len24_out, uint64_t seqnr_aad, in
// we need to calculate the 64 keystream bytes since we reached a new aad sequence number
m_cached_aad_seqnr = seqnr_aad;
m_chacha_header.SetIV(seqnr_aad); // use LE for the nonce
- m_chacha_header.Seek(0); // block counter 0
+ m_chacha_header.Seek64(0); // block counter 0
m_chacha_header.Keystream(m_aad_keystream_buffer, CHACHA20_ROUND_OUTPUT); // write keystream to the cache
}
diff --git a/src/crypto/muhash.cpp b/src/crypto/muhash.cpp
index 26f0248663..471ee6af97 100644
--- a/src/crypto/muhash.cpp
+++ b/src/crypto/muhash.cpp
@@ -299,7 +299,7 @@ Num3072 MuHash3072::ToNum3072(Span<const unsigned char> in) {
unsigned char tmp[Num3072::BYTE_SIZE];
uint256 hashed_in{(HashWriter{} << in).GetSHA256()};
- ChaCha20(hashed_in.data(), hashed_in.size()).Keystream(tmp, Num3072::BYTE_SIZE);
+ ChaCha20Aligned(hashed_in.data()).Keystream64(tmp, Num3072::BYTE_SIZE / 64);
Num3072 out{tmp};
return out;
diff --git a/src/fs.cpp b/src/fs.cpp
index 0429b8cd0f..64411fe41f 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -60,36 +60,20 @@ FileLock::~FileLock()
}
}
-static bool IsWSL()
-{
- struct utsname uname_data;
- return uname(&uname_data) == 0 && std::string(uname_data.version).find("Microsoft") != std::string::npos;
-}
-
bool FileLock::TryLock()
{
if (fd == -1) {
return false;
}
- // Exclusive file locking is broken on WSL using fcntl (issue #18622)
- // This workaround can be removed once the bug on WSL is fixed
- static const bool is_wsl = IsWSL();
- if (is_wsl) {
- if (flock(fd, LOCK_EX | LOCK_NB) == -1) {
- reason = GetErrorReason();
- return false;
- }
- } else {
- struct flock lock;
- lock.l_type = F_WRLCK;
- lock.l_whence = SEEK_SET;
- lock.l_start = 0;
- lock.l_len = 0;
- if (fcntl(fd, F_SETLK, &lock) == -1) {
- reason = GetErrorReason();
- return false;
- }
+ struct flock lock;
+ lock.l_type = F_WRLCK;
+ lock.l_whence = SEEK_SET;
+ lock.l_start = 0;
+ lock.l_len = 0;
+ if (fcntl(fd, F_SETLK, &lock) == -1) {
+ reason = GetErrorReason();
+ return false;
}
return true;
diff --git a/src/i2p.cpp b/src/i2p.cpp
index 586ee649a7..8e98440747 100644
--- a/src/i2p.cpp
+++ b/src/i2p.cpp
@@ -336,7 +336,7 @@ void Session::GenerateAndSavePrivateKey(const Sock& sock)
{
DestGenerate(sock);
- // umask is set to 077 in init.cpp, which is ok (unless -sysperms is given)
+ // umask is set to 0077 in util/system.cpp, which is ok.
if (!WriteBinaryFile(m_private_key_file,
std::string(m_private_key.begin(), m_private_key.end()))) {
throw std::runtime_error(
diff --git a/src/init.cpp b/src/init.cpp
index 5b486854e0..73ae36e4f7 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -459,11 +459,6 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-startupnotify=<cmd>", "Execute command on startup.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-shutdownnotify=<cmd>", "Execute command immediately before beginning shutdown. The need for shutdown may be urgent, so be careful not to delay it long (if the command doesn't require interaction with the server, consider having it fork into the background).", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
#endif
-#ifndef WIN32
- argsman.AddArg("-sysperms", "Create new files with system default permissions, instead of umask 077 (only effective with disabled wallet functionality)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
-#else
- hidden_args.emplace_back("-sysperms");
-#endif
argsman.AddArg("-txindex", strprintf("Maintain a full transaction index, used by the getrawtransaction rpc call (default: %u)", DEFAULT_TXINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-blockfilterindex=<type>",
strprintf("Maintain an index of compact filters by block (default: %s, values: %s).", DEFAULT_BLOCKFILTERINDEX, ListBlockFilterTypes()) +
@@ -821,10 +816,6 @@ bool AppInitBasicSetup(const ArgsManager& args)
}
#ifndef WIN32
- if (!args.GetBoolArg("-sysperms", false)) {
- umask(077);
- }
-
// Clean shutdown on SIGTERM
registerSignalHandler(SIGTERM, HandleSIGTERM);
registerSignalHandler(SIGINT, HandleSIGTERM);
diff --git a/src/interfaces/node.h b/src/interfaces/node.h
index ce6c44e2bc..7e87d5a523 100644
--- a/src/interfaces/node.h
+++ b/src/interfaces/node.h
@@ -177,11 +177,8 @@ public:
//! Is initial block download.
virtual bool isInitialBlockDownload() = 0;
- //! Get reindex.
- virtual bool getReindex() = 0;
-
- //! Get importing.
- virtual bool getImporting() = 0;
+ //! Is loading blocks.
+ virtual bool isLoadingBlocks() = 0;
//! Set network active.
virtual void setNetworkActive(bool active) = 0;
diff --git a/src/logging/timer.h b/src/logging/timer.h
index ea0821dede..993ba99c25 100644
--- a/src/logging/timer.h
+++ b/src/logging/timer.h
@@ -12,6 +12,7 @@
#include <util/types.h>
#include <chrono>
+#include <optional>
#include <string>
@@ -28,14 +29,14 @@ public:
std::string prefix,
std::string end_msg,
BCLog::LogFlags log_category = BCLog::LogFlags::ALL,
- bool msg_on_completion = true) :
- m_prefix(std::move(prefix)),
- m_title(std::move(end_msg)),
- m_log_category(log_category),
- m_message_on_completion(msg_on_completion)
+ bool msg_on_completion = true)
+ : m_prefix(std::move(prefix)),
+ m_title(std::move(end_msg)),
+ m_log_category(log_category),
+ m_message_on_completion(msg_on_completion)
{
this->Log(strprintf("%s started", m_title));
- m_start_t = GetTime<std::chrono::microseconds>();
+ m_start_t = std::chrono::steady_clock::now();
}
~Timer()
@@ -60,24 +61,25 @@ public:
std::string LogMsg(const std::string& msg)
{
- const auto end_time = GetTime<std::chrono::microseconds>() - m_start_t;
- if (m_start_t.count() <= 0) {
+ const auto end_time{std::chrono::steady_clock::now()};
+ if (!m_start_t) {
return strprintf("%s: %s", m_prefix, msg);
}
+ const auto duration{end_time - *m_start_t};
if constexpr (std::is_same<TimeType, std::chrono::microseconds>::value) {
- return strprintf("%s: %s (%iμs)", m_prefix, msg, end_time.count());
+ return strprintf("%s: %s (%iμs)", m_prefix, msg, Ticks<std::chrono::microseconds>(duration));
} else if constexpr (std::is_same<TimeType, std::chrono::milliseconds>::value) {
- return strprintf("%s: %s (%.2fms)", m_prefix, msg, end_time.count() * 0.001);
+ return strprintf("%s: %s (%.2fms)", m_prefix, msg, Ticks<MillisecondsDouble>(duration));
} else if constexpr (std::is_same<TimeType, std::chrono::seconds>::value) {
- return strprintf("%s: %s (%.2fs)", m_prefix, msg, end_time.count() * 0.000001);
+ return strprintf("%s: %s (%.2fs)", m_prefix, msg, Ticks<SecondsDouble>(duration));
} else {
static_assert(ALWAYS_FALSE<TimeType>, "Error: unexpected time type");
}
}
private:
- std::chrono::microseconds m_start_t{};
+ std::optional<std::chrono::steady_clock::time_point> m_start_t{};
//! Log prefix; usually the name of the function this was created in.
const std::string m_prefix;
diff --git a/src/mapport.cpp b/src/mapport.cpp
index e6a473c185..84b889f22d 100644
--- a/src/mapport.cpp
+++ b/src/mapport.cpp
@@ -27,9 +27,9 @@
#include <miniupnpc/miniupnpc.h>
#include <miniupnpc/upnpcommands.h>
#include <miniupnpc/upnperrors.h>
-// The minimum supported miniUPnPc API version is set to 10. This keeps compatibility
-// with Ubuntu 16.04 LTS and Debian 8 libminiupnpc-dev packages.
-static_assert(MINIUPNPC_API_VERSION >= 10, "miniUPnPc API version >= 10 assumed");
+// The minimum supported miniUPnPc API version is set to 17. This excludes
+// versions with known vulnerabilities.
+static_assert(MINIUPNPC_API_VERSION >= 17, "miniUPnPc API version >= 17 assumed");
#endif // USE_UPNP
#include <atomic>
@@ -159,11 +159,7 @@ static bool ProcessUpnp()
char lanaddr[64];
int error = 0;
-#if MINIUPNPC_API_VERSION < 14
- devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, &error);
-#else
devlist = upnpDiscover(2000, multicastif, minissdpdpath, 0, 0, 2, &error);
-#endif
struct UPNPUrls urls;
struct IGDdatas data;
diff --git a/src/net.cpp b/src/net.cpp
index 98500bd30e..884fe25543 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -825,7 +825,13 @@ size_t CConnman::SocketSendData(CNode& node) const
if (!node.m_sock) {
break;
}
- nBytes = node.m_sock->Send(reinterpret_cast<const char*>(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, MSG_NOSIGNAL | MSG_DONTWAIT);
+ int flags = MSG_NOSIGNAL | MSG_DONTWAIT;
+#ifdef MSG_MORE
+ if (it + 1 != node.vSendMsg.end()) {
+ flags |= MSG_MORE;
+ }
+#endif
+ nBytes = node.m_sock->Send(reinterpret_cast<const char*>(data.data()) + node.nSendOffset, data.size() - node.nSendOffset, flags);
}
if (nBytes > 0) {
node.m_last_send = GetTime<std::chrono::seconds>();
@@ -1665,7 +1671,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// Therefore, we do not add them to addrman in the first place.
// In case previously unreachable networks become reachable
// (e.g. in case of -onlynet changes by the user), fixed seeds will
- // be loaded only for networks for which we have no addressses.
+ // be loaded only for networks for which we have no addresses.
seed_addrs.erase(std::remove_if(seed_addrs.begin(), seed_addrs.end(),
[&fixed_seed_networks](const CAddress& addr) { return fixed_seed_networks.count(addr.GetNetwork()) == 0; }),
seed_addrs.end());
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index 782b692d30..7672eb66a0 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -1106,29 +1106,6 @@ bool CSubNet::IsValid() const
return valid;
}
-bool CSubNet::SanityCheck() const
-{
- switch (network.m_net) {
- case NET_IPV4:
- case NET_IPV6:
- break;
- case NET_ONION:
- case NET_I2P:
- case NET_CJDNS:
- return true;
- case NET_INTERNAL:
- case NET_UNROUTABLE:
- case NET_MAX:
- return false;
- }
-
- for (size_t x = 0; x < network.m_addr.size(); ++x) {
- if (network.m_addr[x] & ~netmask[x]) return false;
- }
-
- return true;
-}
-
bool operator==(const CSubNet& a, const CSubNet& b)
{
return a.valid == b.valid && a.network == b.network && !memcmp(a.netmask, b.netmask, 16);
diff --git a/src/netaddress.h b/src/netaddress.h
index 7f782674d3..c4e521fe4b 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -476,8 +476,6 @@ protected:
/// Is this value valid? (only used to signal parse errors)
bool valid;
- bool SanityCheck() const;
-
public:
/**
* Construct an invalid subnet (empty, `Match()` always returns false).
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index eda359568f..9c2db5ff0c 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -295,8 +295,7 @@ public:
bool isInitialBlockDownload() override {
return chainman().ActiveChainstate().IsInitialBlockDownload();
}
- bool getReindex() override { return node::fReindex; }
- bool getImporting() override { return node::fImporting; }
+ bool isLoadingBlocks() override { return node::fReindex || node::fImporting; }
void setNetworkActive(bool active) override
{
if (m_context->connman) {
diff --git a/src/psbt.cpp b/src/psbt.cpp
index 50ccd9e2c0..fe45f2318c 100644
--- a/src/psbt.cpp
+++ b/src/psbt.cpp
@@ -132,6 +132,18 @@ void PSBTInput::FillSignatureData(SignatureData& sigdata) const
for (const auto& [pubkey, leaf_origin] : m_tap_bip32_paths) {
sigdata.taproot_misc_pubkeys.emplace(pubkey, leaf_origin);
}
+ for (const auto& [hash, preimage] : ripemd160_preimages) {
+ sigdata.ripemd160_preimages.emplace(std::vector<unsigned char>(hash.begin(), hash.end()), preimage);
+ }
+ for (const auto& [hash, preimage] : sha256_preimages) {
+ sigdata.sha256_preimages.emplace(std::vector<unsigned char>(hash.begin(), hash.end()), preimage);
+ }
+ for (const auto& [hash, preimage] : hash160_preimages) {
+ sigdata.hash160_preimages.emplace(std::vector<unsigned char>(hash.begin(), hash.end()), preimage);
+ }
+ for (const auto& [hash, preimage] : hash256_preimages) {
+ sigdata.hash256_preimages.emplace(std::vector<unsigned char>(hash.begin(), hash.end()), preimage);
+ }
}
void PSBTInput::FromSignatureData(const SignatureData& sigdata)
diff --git a/src/psbt.h b/src/psbt.h
index 8fafadcea9..c497584f36 100644
--- a/src/psbt.h
+++ b/src/psbt.h
@@ -889,7 +889,7 @@ struct PSBTOutput
} else if (key.size() != 33) {
throw std::ios_base::failure("Output Taproot BIP32 keypath key is not at 33 bytes");
}
- XOnlyPubKey xonly(uint256({key.begin() + 1, key.begin() + 33}));
+ XOnlyPubKey xonly(uint256(Span<uint8_t>(key).last(32)));
std::set<uint256> leaf_hashes;
uint64_t value_len = ReadCompactSize(s);
size_t before_hashes = s.size();
@@ -1164,7 +1164,7 @@ struct PartiallySignedTransaction
// Make sure that we got an unsigned tx
if (!tx) {
- throw std::ios_base::failure("No unsigned transcation was provided");
+ throw std::ios_base::failure("No unsigned transaction was provided");
}
// Read input data
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index a7ffd367d7..c025d4e3aa 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -647,6 +647,8 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel, interfaces::BlockAndH
// initialize the disable state of the tray icon with the current value in the model.
trayIcon->setVisible(optionsModel->getShowTrayIcon());
}
+
+ m_mask_values_action->setChecked(_clientModel->getOptionsModel()->getOption(OptionsModel::OptionID::MaskValues).toBool());
} else {
if(trayIconMenu)
{
@@ -1071,7 +1073,7 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer
statusBar()->clearMessage();
// Acquire current block source
- enum BlockSource blockSource = clientModel->getBlockSource();
+ BlockSource blockSource{clientModel->getBlockSource()};
switch (blockSource) {
case BlockSource::NETWORK:
if (synctype == SyncType::HEADER_PRESYNC) {
@@ -1091,9 +1093,6 @@ void BitcoinGUI::setNumBlocks(int count, const QDateTime& blockDate, double nVer
progressBarLabel->setText(tr("Processing blocks on disk…"));
}
break;
- case BlockSource::REINDEX:
- progressBarLabel->setText(tr("Reindexing blocks on disk…"));
- break;
case BlockSource::NONE:
if (synctype != SyncType::BLOCK_SYNC) {
return;
diff --git a/src/qt/clientmodel.cpp b/src/qt/clientmodel.cpp
index c0d1a0e226..69ed5d3a90 100644
--- a/src/qt/clientmodel.cpp
+++ b/src/qt/clientmodel.cpp
@@ -146,15 +146,10 @@ uint256 ClientModel::getBestBlockHash()
return m_cached_tip_blocks;
}
-enum BlockSource ClientModel::getBlockSource() const
+BlockSource ClientModel::getBlockSource() const
{
- if (m_node.getReindex())
- return BlockSource::REINDEX;
- else if (m_node.getImporting())
- return BlockSource::DISK;
- else if (getNumConnections() > 0)
- return BlockSource::NETWORK;
-
+ if (m_node.isLoadingBlocks()) return BlockSource::DISK;
+ if (getNumConnections() > 0) return BlockSource::NETWORK;
return BlockSource::NONE;
}
diff --git a/src/qt/clientmodel.h b/src/qt/clientmodel.h
index 9ff64fe772..493e18a07d 100644
--- a/src/qt/clientmodel.h
+++ b/src/qt/clientmodel.h
@@ -32,9 +32,8 @@ QT_END_NAMESPACE
enum class BlockSource {
NONE,
- REINDEX,
DISK,
- NETWORK
+ NETWORK,
};
enum class SyncType {
@@ -72,8 +71,8 @@ public:
int getHeaderTipHeight() const;
int64_t getHeaderTipTime() const;
- //! Returns enum BlockSource of the current importing/syncing state
- enum BlockSource getBlockSource() const;
+ //! Returns the block source of the current importing/syncing state
+ BlockSource getBlockSource() const;
//! Return warnings to be displayed in status bar
QString getStatusBarWarnings() const;
diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp
index 68f2139057..bee8fafddc 100644
--- a/src/qt/optionsmodel.cpp
+++ b/src/qt/optionsmodel.cpp
@@ -60,7 +60,7 @@ static const char* SettingName(OptionsModel::OptionID option)
}
/** Call node.updateRwSetting() with Bitcoin 22.x workaround. */
-static void UpdateRwSetting(interfaces::Node& node, OptionsModel::OptionID option, const util::SettingsValue& value)
+static void UpdateRwSetting(interfaces::Node& node, OptionsModel::OptionID option, const std::string& suffix, const util::SettingsValue& value)
{
if (value.isNum() &&
(option == OptionsModel::DatabaseCache ||
@@ -74,9 +74,9 @@ static void UpdateRwSetting(interfaces::Node& node, OptionsModel::OptionID optio
// in later releases by https://github.com/bitcoin/bitcoin/pull/24498.
// If new numeric settings are added, they can be written as numbers
// instead of strings, because bitcoin 22.x will not try to read these.
- node.updateRwSetting(SettingName(option), value.getValStr());
+ node.updateRwSetting(SettingName(option) + suffix, value.getValStr());
} else {
- node.updateRwSetting(SettingName(option), value);
+ node.updateRwSetting(SettingName(option) + suffix, value);
}
}
@@ -132,13 +132,6 @@ void OptionsModel::addOverriddenOption(const std::string &option)
bool OptionsModel::Init(bilingual_str& error)
{
// Initialize display settings from stored settings.
- m_prune_size_gb = PruneSizeGB(node().getPersistentSetting("prune"));
- ProxySetting proxy = ParseProxyString(SettingToString(node().getPersistentSetting("proxy"), GetDefaultProxyAddress().toStdString()));
- m_proxy_ip = proxy.ip;
- m_proxy_port = proxy.port;
- ProxySetting onion = ParseProxyString(SettingToString(node().getPersistentSetting("onion"), GetDefaultProxyAddress().toStdString()));
- m_onion_ip = onion.ip;
- m_onion_port = onion.port;
language = QString::fromStdString(SettingToString(node().getPersistentSetting("lang"), ""));
checkAndMigrate();
@@ -228,6 +221,8 @@ bool OptionsModel::Init(bilingual_str& error)
m_use_embedded_monospaced_font = settings.value("UseEmbeddedMonospacedFont").toBool();
Q_EMIT useEmbeddedMonospacedFontChanged(m_use_embedded_monospaced_font);
+ m_mask_values = settings.value("mask_values", false).toBool();
+
return true;
}
@@ -319,8 +314,6 @@ void OptionsModel::SetPruneTargetGB(int prune_target_gb)
const util::SettingsValue cur_value = node().getPersistentSetting("prune");
const util::SettingsValue new_value = PruneSetting(prune_target_gb > 0, prune_target_gb);
- m_prune_size_gb = prune_target_gb;
-
// Force setting to take effect. It is still safe to change the value at
// this point because this function is only called after the intro screen is
// shown, before the node starts.
@@ -333,7 +326,12 @@ void OptionsModel::SetPruneTargetGB(int prune_target_gb)
PruneSizeGB(cur_value) != PruneSizeGB(new_value)) {
// Call UpdateRwSetting() instead of setOption() to avoid setting
// RestartRequired flag
- UpdateRwSetting(node(), Prune, new_value);
+ UpdateRwSetting(node(), Prune, "", new_value);
+ }
+
+ // Keep previous pruning size, if pruning was disabled.
+ if (PruneEnabled(cur_value)) {
+ UpdateRwSetting(node(), Prune, "-prev", PruneEnabled(new_value) ? util::SettingsValue{} : cur_value);
}
}
@@ -361,9 +359,9 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in
return successful;
}
-QVariant OptionsModel::getOption(OptionID option) const
+QVariant OptionsModel::getOption(OptionID option, const std::string& suffix) const
{
- auto setting = [&]{ return node().getPersistentSetting(SettingName(option)); };
+ auto setting = [&]{ return node().getPersistentSetting(SettingName(option) + suffix); };
QSettings settings;
switch (option) {
@@ -390,19 +388,30 @@ QVariant OptionsModel::getOption(OptionID option) const
// default proxy
case ProxyUse:
+ case ProxyUseTor:
return ParseProxyString(SettingToString(setting(), "")).is_set;
case ProxyIP:
- return m_proxy_ip;
+ case ProxyIPTor: {
+ ProxySetting proxy = ParseProxyString(SettingToString(setting(), ""));
+ if (proxy.is_set) {
+ return proxy.ip;
+ } else if (suffix.empty()) {
+ return getOption(option, "-prev");
+ } else {
+ return ParseProxyString(GetDefaultProxyAddress().toStdString()).ip;
+ }
+ }
case ProxyPort:
- return m_proxy_port;
-
- // separate Tor proxy
- case ProxyUseTor:
- return ParseProxyString(SettingToString(setting(), "")).is_set;
- case ProxyIPTor:
- return m_onion_ip;
- case ProxyPortTor:
- return m_onion_port;
+ case ProxyPortTor: {
+ ProxySetting proxy = ParseProxyString(SettingToString(setting(), ""));
+ if (proxy.is_set) {
+ return proxy.port;
+ } else if (suffix.empty()) {
+ return getOption(option, "-prev");
+ } else {
+ return ParseProxyString(GetDefaultProxyAddress().toStdString()).port;
+ }
+ }
#ifdef ENABLE_WALLET
case SpendZeroConfChange:
@@ -427,7 +436,9 @@ QVariant OptionsModel::getOption(OptionID option) const
case Prune:
return PruneEnabled(setting());
case PruneSize:
- return m_prune_size_gb;
+ return PruneEnabled(setting()) ? PruneSizeGB(setting()) :
+ suffix.empty() ? getOption(option, "-prev") :
+ DEFAULT_PRUNE_TARGET_GB;
case DatabaseCache:
return qlonglong(SettingToInt(setting(), nDefaultDbCache));
case ThreadsScriptVerif:
@@ -436,15 +447,17 @@ QVariant OptionsModel::getOption(OptionID option) const
return SettingToBool(setting(), DEFAULT_LISTEN);
case Server:
return SettingToBool(setting(), false);
+ case MaskValues:
+ return m_mask_values;
default:
return QVariant();
}
}
-bool OptionsModel::setOption(OptionID option, const QVariant& value)
+bool OptionsModel::setOption(OptionID option, const QVariant& value, const std::string& suffix)
{
- auto changed = [&] { return value.isValid() && value != getOption(option); };
- auto update = [&](const util::SettingsValue& value) { return UpdateRwSetting(node(), option, value); };
+ auto changed = [&] { return value.isValid() && value != getOption(option, suffix); };
+ auto update = [&](const util::SettingsValue& value) { return UpdateRwSetting(node(), option, suffix, value); };
bool successful = true; /* set to false on parse error */
QSettings settings;
@@ -482,52 +495,60 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value)
// default proxy
case ProxyUse:
if (changed()) {
- update(ProxyString(value.toBool(), m_proxy_ip, m_proxy_port));
- setRestartRequired(true);
+ if (suffix.empty() && !value.toBool()) setOption(option, true, "-prev");
+ update(ProxyString(value.toBool(), getOption(ProxyIP).toString(), getOption(ProxyPort).toString()));
+ if (suffix.empty() && value.toBool()) UpdateRwSetting(node(), option, "-prev", {});
+ if (suffix.empty()) setRestartRequired(true);
}
break;
case ProxyIP:
if (changed()) {
- m_proxy_ip = value.toString();
- if (getOption(ProxyUse).toBool()) {
- update(ProxyString(true, m_proxy_ip, m_proxy_port));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(ProxyUse).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(ProxyString(true, value.toString(), getOption(ProxyPort).toString()));
}
+ if (suffix.empty() && getOption(ProxyUse).toBool()) setRestartRequired(true);
}
break;
case ProxyPort:
if (changed()) {
- m_proxy_port = value.toString();
- if (getOption(ProxyUse).toBool()) {
- update(ProxyString(true, m_proxy_ip, m_proxy_port));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(ProxyUse).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(ProxyString(true, getOption(ProxyIP).toString(), value.toString()));
}
+ if (suffix.empty() && getOption(ProxyUse).toBool()) setRestartRequired(true);
}
break;
// separate Tor proxy
case ProxyUseTor:
if (changed()) {
- update(ProxyString(value.toBool(), m_onion_ip, m_onion_port));
- setRestartRequired(true);
+ if (suffix.empty() && !value.toBool()) setOption(option, true, "-prev");
+ update(ProxyString(value.toBool(), getOption(ProxyIPTor).toString(), getOption(ProxyPortTor).toString()));
+ if (suffix.empty() && value.toBool()) UpdateRwSetting(node(), option, "-prev", {});
+ if (suffix.empty()) setRestartRequired(true);
}
break;
case ProxyIPTor:
if (changed()) {
- m_onion_ip = value.toString();
- if (getOption(ProxyUseTor).toBool()) {
- update(ProxyString(true, m_onion_ip, m_onion_port));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(ProxyUseTor).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(ProxyString(true, value.toString(), getOption(ProxyPortTor).toString()));
}
+ if (suffix.empty() && getOption(ProxyUseTor).toBool()) setRestartRequired(true);
}
break;
case ProxyPortTor:
if (changed()) {
- m_onion_port = value.toString();
- if (getOption(ProxyUseTor).toBool()) {
- update(ProxyString(true, m_onion_ip, m_onion_port));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(ProxyUseTor).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(ProxyString(true, getOption(ProxyIPTor).toString(), value.toString()));
}
+ if (suffix.empty() && getOption(ProxyUseTor).toBool()) setRestartRequired(true);
}
break;
@@ -581,17 +602,20 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value)
break;
case Prune:
if (changed()) {
- update(PruneSetting(value.toBool(), m_prune_size_gb));
- setRestartRequired(true);
+ if (suffix.empty() && !value.toBool()) setOption(option, true, "-prev");
+ update(PruneSetting(value.toBool(), getOption(PruneSize).toInt()));
+ if (suffix.empty() && value.toBool()) UpdateRwSetting(node(), option, "-prev", {});
+ if (suffix.empty()) setRestartRequired(true);
}
break;
case PruneSize:
if (changed()) {
- m_prune_size_gb = ParsePruneSizeGB(value);
- if (getOption(Prune).toBool()) {
- update(PruneSetting(true, m_prune_size_gb));
- setRestartRequired(true);
+ if (suffix.empty() && !getOption(Prune).toBool()) {
+ setOption(option, value, "-prev");
+ } else {
+ update(PruneSetting(true, ParsePruneSizeGB(value)));
}
+ if (suffix.empty() && getOption(Prune).toBool()) setRestartRequired(true);
}
break;
case DatabaseCache:
@@ -613,6 +637,10 @@ bool OptionsModel::setOption(OptionID option, const QVariant& value)
setRestartRequired(true);
}
break;
+ case MaskValues:
+ m_mask_values = value.toBool();
+ settings.setValue("mask_values", m_mask_values);
+ break;
default:
break;
}
diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h
index ccab9c19ad..f28a1087ba 100644
--- a/src/qt/optionsmodel.h
+++ b/src/qt/optionsmodel.h
@@ -72,6 +72,7 @@ public:
Listen, // bool
Server, // bool
EnablePSBTControls, // bool
+ MaskValues, // bool
OptionIDRowCount,
};
@@ -81,8 +82,8 @@ public:
int rowCount(const QModelIndex & parent = QModelIndex()) const override;
QVariant data(const QModelIndex & index, int role = Qt::DisplayRole) const override;
bool setData(const QModelIndex & index, const QVariant & value, int role = Qt::EditRole) override;
- QVariant getOption(OptionID option) const;
- bool setOption(OptionID option, const QVariant& value);
+ QVariant getOption(OptionID option, const std::string& suffix="") const;
+ bool setOption(OptionID option, const QVariant& value, const std::string& suffix="");
/** Updates current unit in memory, settings and emits displayUnitChanged(new_unit) signal */
void setDisplayUnit(const QVariant& new_unit);
@@ -123,15 +124,7 @@ private:
bool fCoinControlFeatures;
bool m_sub_fee_from_amount;
bool m_enable_psbt_controls;
-
- //! In-memory settings for display. These are stored persistently by the
- //! bitcoin node but it's also nice to store them in memory to prevent them
- //! getting cleared when enable/disable toggles are used in the GUI.
- int m_prune_size_gb;
- QString m_proxy_ip;
- QString m_proxy_port;
- QString m_onion_ip;
- QString m_onion_port;
+ bool m_mask_values;
/* settings that were overridden by command-line */
QString strOverriddenByCommandLine;
diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp
index c9caec39cf..0f00d167f7 100644
--- a/src/qt/overviewpage.cpp
+++ b/src/qt/overviewpage.cpp
@@ -173,6 +173,7 @@ void OverviewPage::handleTransactionClicked(const QModelIndex &index)
void OverviewPage::setPrivacy(bool privacy)
{
m_privacy = privacy;
+ clientModel->getOptionsModel()->setOption(OptionsModel::OptionID::MaskValues, privacy);
const auto& balances = walletModel->getCachedBalance();
if (balances.balance != -1) {
setBalance(balances);
@@ -262,7 +263,6 @@ void OverviewPage::setWalletModel(WalletModel *model)
// Set up transaction list
filter.reset(new TransactionFilterProxy());
filter->setSourceModel(model->getTransactionTableModel());
- filter->setLimit(NUM_ITEMS);
filter->setDynamicSortFilter(true);
filter->setSortRole(Qt::EditRole);
filter->setShowInactive(false);
@@ -271,6 +271,10 @@ void OverviewPage::setWalletModel(WalletModel *model)
ui->listTransactions->setModel(filter.get());
ui->listTransactions->setModelColumn(TransactionTableModel::ToAddress);
+ connect(filter.get(), &TransactionFilterProxy::rowsInserted, this, &OverviewPage::LimitTransactionRows);
+ connect(filter.get(), &TransactionFilterProxy::rowsRemoved, this, &OverviewPage::LimitTransactionRows);
+ connect(filter.get(), &TransactionFilterProxy::rowsMoved, this, &OverviewPage::LimitTransactionRows);
+ LimitTransactionRows();
// Keep up to date with wallet
setBalance(model->getCachedBalance());
connect(model, &WalletModel::balanceChanged, this, &OverviewPage::setBalance);
@@ -299,6 +303,16 @@ void OverviewPage::changeEvent(QEvent* e)
QWidget::changeEvent(e);
}
+// Only show most recent NUM_ITEMS rows
+void OverviewPage::LimitTransactionRows()
+{
+ if (filter && ui->listTransactions && ui->listTransactions->model() && filter.get() == ui->listTransactions->model()) {
+ for (int i = 0; i < filter->rowCount(); ++i) {
+ ui->listTransactions->setRowHidden(i, i >= NUM_ITEMS);
+ }
+ }
+}
+
void OverviewPage::updateDisplayUnit()
{
if (walletModel && walletModel->getOptionsModel()) {
diff --git a/src/qt/overviewpage.h b/src/qt/overviewpage.h
index 2ca38b78dd..5c487ee116 100644
--- a/src/qt/overviewpage.h
+++ b/src/qt/overviewpage.h
@@ -60,6 +60,7 @@ private:
std::unique_ptr<TransactionFilterProxy> filter;
private Q_SLOTS:
+ void LimitTransactionRows();
void updateDisplayUnit();
void handleTransactionClicked(const QModelIndex &index);
void updateAlerts(const QString &warnings);
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 843cd46d13..b46a3c039b 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -780,8 +780,8 @@ void RPCConsole::addWallet(WalletModel * const walletModel)
{
// use name for text and wallet model for internal data object (to allow to move to a wallet id later)
ui->WalletSelector->addItem(walletModel->getDisplayName(), QVariant::fromValue(walletModel));
- if (ui->WalletSelector->count() == 2 && !isVisible()) {
- // First wallet added, set to default so long as the window isn't presently visible (and potentially in use)
+ if (ui->WalletSelector->count() == 2) {
+ // First wallet added, set to default to match wallet RPC behavior
ui->WalletSelector->setCurrentIndex(1);
}
if (ui->WalletSelector->count() > 2) {
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 4a50416fc6..89dd0ada62 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -699,7 +699,7 @@ void SendCoinsDialog::setBalance(const interfaces::WalletBalances& balances)
CAmount balance = balances.balance;
if (model->wallet().hasExternalSigner()) {
ui->labelBalanceName->setText(tr("External balance:"));
- } else if (model->wallet().privateKeysDisabled()) {
+ } else if (model->wallet().isLegacy() && model->wallet().privateKeysDisabled()) {
balance = balances.watch_only_balance;
ui->labelBalanceName->setText(tr("Watch-only balance:"));
}
diff --git a/src/qt/transactionfilterproxy.cpp b/src/qt/transactionfilterproxy.cpp
index 3cc0cc839d..173fd326a3 100644
--- a/src/qt/transactionfilterproxy.cpp
+++ b/src/qt/transactionfilterproxy.cpp
@@ -88,25 +88,8 @@ void TransactionFilterProxy::setWatchOnlyFilter(WatchOnlyFilter filter)
invalidateFilter();
}
-void TransactionFilterProxy::setLimit(int limit)
-{
- this->limitRows = limit;
-}
-
void TransactionFilterProxy::setShowInactive(bool _showInactive)
{
this->showInactive = _showInactive;
invalidateFilter();
}
-
-int TransactionFilterProxy::rowCount(const QModelIndex &parent) const
-{
- if(limitRows != -1)
- {
- return std::min(QSortFilterProxyModel::rowCount(parent), limitRows);
- }
- else
- {
- return QSortFilterProxyModel::rowCount(parent);
- }
-}
diff --git a/src/qt/transactionfilterproxy.h b/src/qt/transactionfilterproxy.h
index 8e5f72d764..73c4f21426 100644
--- a/src/qt/transactionfilterproxy.h
+++ b/src/qt/transactionfilterproxy.h
@@ -42,14 +42,9 @@ public:
void setMinAmount(const CAmount& minimum);
void setWatchOnlyFilter(WatchOnlyFilter filter);
- /** Set maximum number of rows returned, -1 if unlimited. */
- void setLimit(int limit);
-
/** Set whether to show conflicted transactions. */
void setShowInactive(bool showInactive);
- int rowCount(const QModelIndex &parent = QModelIndex()) const override;
-
protected:
bool filterAcceptsRow(int source_row, const QModelIndex & source_parent) const override;
@@ -60,7 +55,6 @@ private:
quint32 typeFilter;
WatchOnlyFilter watchOnlyFilter{WatchOnlyFilter_All};
CAmount minAmount{0};
- int limitRows{-1};
bool showInactive{true};
};
diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp
index 3b32137bd4..25d54bdce6 100644
--- a/src/qt/transactiontablemodel.cpp
+++ b/src/qt/transactiontablemodel.cpp
@@ -93,10 +93,7 @@ public:
TransactionTableModel *parent;
- /* Local cache of wallet.
- * As it is in the same order as the CWallet, by definition
- * this is sorted by sha256.
- */
+ //! Local cache of wallet sorted by transaction hash
QList<TransactionRecord> cachedWallet;
/** True when model finishes loading all wallet transactions on start */
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index cb8491e27a..3c69d46b7e 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -477,13 +477,6 @@ WalletModel::UnlockContext::~UnlockContext()
}
}
-void WalletModel::UnlockContext::CopyFrom(UnlockContext&& rhs)
-{
- // Transfer context; old object no longer relocks wallet
- *this = rhs;
- rhs.relock = false;
-}
-
bool WalletModel::bumpFee(uint256 hash, uint256& new_hash)
{
CCoinControl coin_control;
diff --git a/src/qt/walletmodel.h b/src/qt/walletmodel.h
index 604a9e03c8..17a39349f3 100644
--- a/src/qt/walletmodel.h
+++ b/src/qt/walletmodel.h
@@ -111,7 +111,7 @@ public:
bool setWalletLocked(bool locked, const SecureString &passPhrase=SecureString());
bool changePassphrase(const SecureString &oldPass, const SecureString &newPass);
- // RAI object for unlocking wallet, returned by requestUnlock()
+ // RAII object for unlocking wallet, returned by requestUnlock()
class UnlockContext
{
public:
@@ -120,18 +120,16 @@ public:
bool isValid() const { return valid; }
- // Copy constructor is disabled.
+ // Disable unused copy/move constructors/assignments explicitly.
UnlockContext(const UnlockContext&) = delete;
- // Move operator and constructor transfer the context
- UnlockContext(UnlockContext&& obj) { CopyFrom(std::move(obj)); }
- UnlockContext& operator=(UnlockContext&& rhs) { CopyFrom(std::move(rhs)); return *this; }
+ UnlockContext(UnlockContext&&) = delete;
+ UnlockContext& operator=(const UnlockContext&) = delete;
+ UnlockContext& operator=(UnlockContext&&) = delete;
+
private:
WalletModel *wallet;
- bool valid;
- mutable bool relock; // mutable, as it can be set to false by copying
-
- UnlockContext& operator=(const UnlockContext&) = default;
- void CopyFrom(UnlockContext&& rhs);
+ const bool valid;
+ const bool relock;
};
UnlockContext requestUnlock();
diff --git a/src/random.cpp b/src/random.cpp
index 23ea9ba6b7..5f50c001cd 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -599,18 +599,15 @@ uint256 GetRandHash() noexcept
void FastRandomContext::RandomSeed()
{
uint256 seed = GetRandHash();
- rng.SetKey(seed.begin(), 32);
+ rng.SetKey32(seed.begin());
requires_seed = false;
}
uint256 FastRandomContext::rand256() noexcept
{
- if (bytebuf_size < 32) {
- FillByteBuffer();
- }
+ if (requires_seed) RandomSeed();
uint256 ret;
- memcpy(ret.begin(), bytebuf + 64 - bytebuf_size, 32);
- bytebuf_size -= 32;
+ rng.Keystream(ret.data(), ret.size());
return ret;
}
@@ -624,9 +621,9 @@ std::vector<unsigned char> FastRandomContext::randbytes(size_t len)
return ret;
}
-FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), bytebuf_size(0), bitbuf_size(0)
+FastRandomContext::FastRandomContext(const uint256& seed) noexcept : requires_seed(false), bitbuf_size(0)
{
- rng.SetKey(seed.begin(), 32);
+ rng.SetKey32(seed.begin());
}
bool Random_SanityCheck()
@@ -675,25 +672,22 @@ bool Random_SanityCheck()
return true;
}
-FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), bytebuf_size(0), bitbuf_size(0)
+FastRandomContext::FastRandomContext(bool fDeterministic) noexcept : requires_seed(!fDeterministic), bitbuf_size(0)
{
if (!fDeterministic) {
return;
}
uint256 seed;
- rng.SetKey(seed.begin(), 32);
+ rng.SetKey32(seed.begin());
}
FastRandomContext& FastRandomContext::operator=(FastRandomContext&& from) noexcept
{
requires_seed = from.requires_seed;
rng = from.rng;
- std::copy(std::begin(from.bytebuf), std::end(from.bytebuf), std::begin(bytebuf));
- bytebuf_size = from.bytebuf_size;
bitbuf = from.bitbuf;
bitbuf_size = from.bitbuf_size;
from.requires_seed = true;
- from.bytebuf_size = 0;
from.bitbuf_size = 0;
return *this;
}
diff --git a/src/random.h b/src/random.h
index e890e909c7..49c0dff5bf 100644
--- a/src/random.h
+++ b/src/random.h
@@ -15,6 +15,7 @@
#include <chrono>
#include <cstdint>
#include <limits>
+#include <vector>
/**
* Overall design of the RNG and entropy sources.
@@ -145,23 +146,11 @@ private:
bool requires_seed;
ChaCha20 rng;
- unsigned char bytebuf[64];
- int bytebuf_size;
-
uint64_t bitbuf;
int bitbuf_size;
void RandomSeed();
- void FillByteBuffer()
- {
- if (requires_seed) {
- RandomSeed();
- }
- rng.Keystream(bytebuf, sizeof(bytebuf));
- bytebuf_size = sizeof(bytebuf);
- }
-
void FillBitBuffer()
{
bitbuf = rand64();
@@ -185,10 +174,10 @@ public:
/** Generate a random 64-bit integer. */
uint64_t rand64() noexcept
{
- if (bytebuf_size < 8) FillByteBuffer();
- uint64_t ret = ReadLE64(bytebuf + 64 - bytebuf_size);
- bytebuf_size -= 8;
- return ret;
+ if (requires_seed) RandomSeed();
+ unsigned char buf[8];
+ rng.Keystream(buf, 8);
+ return ReadLE64(buf);
}
/** Generate a random (bits)-bit integer. */
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index f0e5b90509..618a5d0dd4 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -702,9 +702,7 @@ static RPCHelpMan setban()
throw std::runtime_error(help.ToString());
}
NodeContext& node = EnsureAnyNodeContext(request.context);
- if (!node.banman) {
- throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
- }
+ BanMan& banman = EnsureBanman(node);
CSubNet subNet;
CNetAddr netAddr;
@@ -726,7 +724,7 @@ static RPCHelpMan setban()
if (strCommand == "add")
{
- if (isSubnet ? node.banman->IsBanned(subNet) : node.banman->IsBanned(netAddr)) {
+ if (isSubnet ? banman.IsBanned(subNet) : banman.IsBanned(netAddr)) {
throw JSONRPCError(RPC_CLIENT_NODE_ALREADY_ADDED, "Error: IP/Subnet already banned");
}
@@ -741,12 +739,12 @@ static RPCHelpMan setban()
}
if (isSubnet) {
- node.banman->Ban(subNet, banTime, absolute);
+ banman.Ban(subNet, banTime, absolute);
if (node.connman) {
node.connman->DisconnectNode(subNet);
}
} else {
- node.banman->Ban(netAddr, banTime, absolute);
+ banman.Ban(netAddr, banTime, absolute);
if (node.connman) {
node.connman->DisconnectNode(netAddr);
}
@@ -754,7 +752,7 @@ static RPCHelpMan setban()
}
else if(strCommand == "remove")
{
- if (!( isSubnet ? node.banman->Unban(subNet) : node.banman->Unban(netAddr) )) {
+ if (!( isSubnet ? banman.Unban(subNet) : banman.Unban(netAddr) )) {
throw JSONRPCError(RPC_CLIENT_INVALID_IP_OR_SUBNET, "Error: Unban failed. Requested address/subnet was not previously manually banned.");
}
}
@@ -785,13 +783,10 @@ static RPCHelpMan listbanned()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- NodeContext& node = EnsureAnyNodeContext(request.context);
- if(!node.banman) {
- throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
- }
+ BanMan& banman = EnsureAnyBanman(request.context);
banmap_t banMap;
- node.banman->GetBanned(banMap);
+ banman.GetBanned(banMap);
const int64_t current_time{GetTime()};
UniValue bannedAddresses(UniValue::VARR);
@@ -825,12 +820,9 @@ static RPCHelpMan clearbanned()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- NodeContext& node = EnsureAnyNodeContext(request.context);
- if (!node.banman) {
- throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
- }
+ BanMan& banman = EnsureAnyBanman(request.context);
- node.banman->ClearBanned();
+ banman.ClearBanned();
return UniValue::VNULL;
},
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 7704496c10..503099d8a9 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -522,15 +522,17 @@ static RPCHelpMan decodescript()
if (can_wrap_P2WSH) {
UniValue sr(UniValue::VOBJ);
CScript segwitScr;
+ FillableSigningProvider provider;
if (which_type == TxoutType::PUBKEY) {
segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0])));
} else if (which_type == TxoutType::PUBKEYHASH) {
segwitScr = GetScriptForDestination(WitnessV0KeyHash(uint160{solutions_data[0]}));
} else {
// Scripts that are not fit for P2WPKH are encoded as P2WSH.
+ provider.AddCScript(script);
segwitScr = GetScriptForDestination(WitnessV0ScriptHash(script));
}
- ScriptToUniv(segwitScr, /*out=*/sr, /*include_hex=*/true, /*include_address=*/true);
+ ScriptToUniv(segwitScr, /*out=*/sr, /*include_hex=*/true, /*include_address=*/true, /*provider=*/&provider);
sr.pushKV("p2sh-segwit", EncodeDestination(ScriptHash(segwitScr)));
r.pushKV("segwit", sr);
}
diff --git a/src/rpc/rawtransaction_util.cpp b/src/rpc/rawtransaction_util.cpp
index 15b8e1dcd0..3ba930f84f 100644
--- a/src/rpc/rawtransaction_util.cpp
+++ b/src/rpc/rawtransaction_util.cpp
@@ -21,12 +21,8 @@
#include <util/strencodings.h>
#include <util/translation.h>
-CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniValue& outputs_in, const UniValue& locktime, std::optional<bool> rbf)
+void AddInputs(CMutableTransaction& rawTx, const UniValue& inputs_in, std::optional<bool> rbf)
{
- if (outputs_in.isNull()) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, output argument must be non-null");
- }
-
UniValue inputs;
if (inputs_in.isNull()) {
inputs = UniValue::VARR;
@@ -34,18 +30,6 @@ CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniVal
inputs = inputs_in.get_array();
}
- const bool outputs_is_obj = outputs_in.isObject();
- UniValue outputs = outputs_is_obj ? outputs_in.get_obj() : outputs_in.get_array();
-
- CMutableTransaction rawTx;
-
- if (!locktime.isNull()) {
- int64_t nLockTime = locktime.getInt<int64_t>();
- if (nLockTime < 0 || nLockTime > LOCKTIME_MAX)
- throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, locktime out of range");
- rawTx.nLockTime = nLockTime;
- }
-
for (unsigned int idx = 0; idx < inputs.size(); idx++) {
const UniValue& input = inputs[idx];
const UniValue& o = input.get_obj();
@@ -84,6 +68,16 @@ CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniVal
rawTx.vin.push_back(in);
}
+}
+
+void AddOutputs(CMutableTransaction& rawTx, const UniValue& outputs_in)
+{
+ if (outputs_in.isNull()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, output argument must be non-null");
+ }
+
+ const bool outputs_is_obj = outputs_in.isObject();
+ UniValue outputs = outputs_is_obj ? outputs_in.get_obj() : outputs_in.get_array();
if (!outputs_is_obj) {
// Translate array of key-value pairs into dict
@@ -132,6 +126,21 @@ CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniVal
rawTx.vout.push_back(out);
}
}
+}
+
+CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniValue& outputs_in, const UniValue& locktime, std::optional<bool> rbf)
+{
+ CMutableTransaction rawTx;
+
+ if (!locktime.isNull()) {
+ int64_t nLockTime = locktime.getInt<int64_t>();
+ if (nLockTime < 0 || nLockTime > LOCKTIME_MAX)
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, locktime out of range");
+ rawTx.nLockTime = nLockTime;
+ }
+
+ AddInputs(rawTx, inputs_in, rbf);
+ AddOutputs(rawTx, outputs_in);
if (rbf.has_value() && rbf.value() && rawTx.vin.size() > 0 && !SignalsOptInRBF(CTransaction(rawTx))) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter combination: Sequence number(s) contradict replaceable option");
diff --git a/src/rpc/rawtransaction_util.h b/src/rpc/rawtransaction_util.h
index 0c3823bc1e..a863432b7a 100644
--- a/src/rpc/rawtransaction_util.h
+++ b/src/rpc/rawtransaction_util.h
@@ -38,6 +38,13 @@ void SignTransactionResultToJSON(CMutableTransaction& mtx, bool complete, const
*/
void ParsePrevouts(const UniValue& prevTxsUnival, FillableSigningProvider* keystore, std::map<COutPoint, Coin>& coins);
+
+/** Normalize univalue-represented inputs and add them to the transaction */
+void AddInputs(CMutableTransaction& rawTx, const UniValue& inputs_in, bool rbf);
+
+/** Normalize univalue-represented outputs and add them to the transaction */
+void AddOutputs(CMutableTransaction& rawTx, const UniValue& outputs_in);
+
/** Create a transaction from univalue parameters */
CMutableTransaction ConstructTransaction(const UniValue& inputs_in, const UniValue& outputs_in, const UniValue& locktime, std::optional<bool> rbf);
diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp
index 0bb5533d71..6f37fe2a99 100644
--- a/src/rpc/request.cpp
+++ b/src/rpc/request.cpp
@@ -86,7 +86,7 @@ bool GenerateAuthCookie(std::string *cookie_out)
std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd);
/** the umask determines what permissions are used to create this file -
- * these are set to 077 in init.cpp unless overridden with -sysperms.
+ * these are set to 0077 in util/system.cpp.
*/
std::ofstream file;
fs::path filepath_tmp = GetAuthCookieFile(true);
diff --git a/src/rpc/server_util.cpp b/src/rpc/server_util.cpp
index 50f9ce7b3c..7a708ec813 100644
--- a/src/rpc/server_util.cpp
+++ b/src/rpc/server_util.cpp
@@ -39,6 +39,20 @@ CTxMemPool& EnsureAnyMemPool(const std::any& context)
return EnsureMemPool(EnsureAnyNodeContext(context));
}
+
+BanMan& EnsureBanman(const NodeContext& node)
+{
+ if (!node.banman) {
+ throw JSONRPCError(RPC_DATABASE_ERROR, "Error: Ban database not loaded");
+ }
+ return *node.banman;
+}
+
+BanMan& EnsureAnyBanman(const std::any& context)
+{
+ return EnsureBanman(EnsureAnyNodeContext(context));
+}
+
ArgsManager& EnsureArgsman(const NodeContext& node)
{
if (!node.args) {
diff --git a/src/rpc/server_util.h b/src/rpc/server_util.h
index fa008a8155..9af9572431 100644
--- a/src/rpc/server_util.h
+++ b/src/rpc/server_util.h
@@ -13,6 +13,7 @@ class CConnman;
class CTxMemPool;
class ChainstateManager;
class PeerManager;
+class BanMan;
namespace node {
struct NodeContext;
} // namespace node
@@ -20,6 +21,8 @@ struct NodeContext;
node::NodeContext& EnsureAnyNodeContext(const std::any& context);
CTxMemPool& EnsureMemPool(const node::NodeContext& node);
CTxMemPool& EnsureAnyMemPool(const std::any& context);
+BanMan& EnsureBanman(const node::NodeContext& node);
+BanMan& EnsureAnyBanman(const std::any& context);
ArgsManager& EnsureArgsman(const node::NodeContext& node);
ArgsManager& EnsureAnyArgsman(const std::any& context);
ChainstateManager& EnsureChainman(const node::NodeContext& node);
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index 8991bda340..857fee1818 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -1014,7 +1014,7 @@ public:
return false;
}
- bool IsSolvable() const override { return false; } // For now, mark these descriptors as non-solvable (as we don't have signing logic for them).
+ bool IsSolvable() const override { return true; }
bool IsSingleType() const final { return true; }
};
diff --git a/src/script/descriptor.h b/src/script/descriptor.h
index cb3b366acf..39b1a37f9a 100644
--- a/src/script/descriptor.h
+++ b/src/script/descriptor.h
@@ -35,7 +35,7 @@ public:
/** Retrieve a cached parent xpub
*
* @param[in] key_exp_pos Position of the key expression within the descriptor
- * @param[in] xpub The CExtPubKey to get from cache
+ * @param[out] xpub The CExtPubKey to get from cache
*/
bool GetCachedParentExtPubKey(uint32_t key_exp_pos, CExtPubKey& xpub) const;
/** Cache an xpub derived at an index
@@ -49,7 +49,7 @@ public:
*
* @param[in] key_exp_pos Position of the key expression within the descriptor
* @param[in] der_index Derivation index of the xpub
- * @param[in] xpub The CExtPubKey to get from cache
+ * @param[out] xpub The CExtPubKey to get from cache
*/
bool GetCachedDerivedExtPubKey(uint32_t key_exp_pos, uint32_t der_index, CExtPubKey& xpub) const;
/** Cache a last hardened xpub
@@ -61,7 +61,7 @@ public:
/** Retrieve a cached last hardened xpub
*
* @param[in] key_exp_pos Position of the key expression within the descriptor
- * @param[in] xpub The CExtPubKey to get from cache
+ * @param[out] xpub The CExtPubKey to get from cache
*/
bool GetCachedLastHardenedExtPubKey(uint32_t key_exp_pos, CExtPubKey& xpub) const;
diff --git a/src/script/miniscript.cpp b/src/script/miniscript.cpp
index 5e471cbe89..3937638cf8 100644
--- a/src/script/miniscript.cpp
+++ b/src/script/miniscript.cpp
@@ -172,8 +172,8 @@ Type ComputeType(Fragment fragment, Type x, Type y, Type z, const std::vector<Ty
(y & "B"_mst).If(x << "Bdu"_mst) | // B=B_y*B_x*d_x*u_x
(x & "o"_mst).If(y << "z"_mst) | // o=o_x*z_y
(x & y & "m"_mst).If(x << "e"_mst && (x | y) << "s"_mst) | // m=m_x*m_y*e_x*(s_x+s_y)
- (x & y & "zes"_mst) | // z=z_x*z_y, e=e_x*e_y, s=s_x*s_y
- (y & "ufd"_mst) | // u=u_y, f=f_y, d=d_y
+ (x & y & "zs"_mst) | // z=z_x*z_y, s=s_x*s_y
+ (y & "ufde"_mst) | // u=u_y, f=f_y, d=d_y, e=e_y
"x"_mst | // x
((x | y) & "ghij"_mst) | // g=g_x+g_y, h=h_x+h_y, i=i_x+i_y, j=j_x+j_y
(x & y & "k"_mst); // k=k_x*k_y
@@ -201,7 +201,7 @@ Type ComputeType(Fragment fragment, Type x, Type y, Type z, const std::vector<Ty
(y & z & "u"_mst) | // u=u_y*u_z
(z & "f"_mst).If((x << "s"_mst) || (y << "f"_mst)) | // f=(s_x+f_y)*f_z
(z & "d"_mst) | // d=d_z
- (x & z & "e"_mst).If(x << "s"_mst || y << "f"_mst) | // e=e_x*e_z*(s_x+f_y)
+ (z & "e"_mst).If(x << "s"_mst || y << "f"_mst) | // e=e_z*(s_x+f_y)
(x & y & z & "m"_mst).If(x << "e"_mst && (x | y | z) << "s"_mst) | // m=m_x*m_y*m_z*e_x*(s_x+s_y+s_z)
(z & (x | y) & "s"_mst) | // s=s_z*(s_x+s_y)
"x"_mst | // x
@@ -279,6 +279,76 @@ size_t ComputeScriptLen(Fragment fragment, Type sub0typ, size_t subsize, uint32_
assert(false);
}
+InputStack& InputStack::SetAvailable(Availability avail) {
+ available = avail;
+ if (avail == Availability::NO) {
+ stack.clear();
+ size = std::numeric_limits<size_t>::max();
+ has_sig = false;
+ malleable = false;
+ non_canon = false;
+ }
+ return *this;
+}
+
+InputStack& InputStack::SetWithSig() {
+ has_sig = true;
+ return *this;
+}
+
+InputStack& InputStack::SetNonCanon() {
+ non_canon = true;
+ return *this;
+}
+
+InputStack& InputStack::SetMalleable(bool x) {
+ malleable = x;
+ return *this;
+}
+
+InputStack operator+(InputStack a, InputStack b) {
+ a.stack = Cat(std::move(a.stack), std::move(b.stack));
+ if (a.available != Availability::NO && b.available != Availability::NO) a.size += b.size;
+ a.has_sig |= b.has_sig;
+ a.malleable |= b.malleable;
+ a.non_canon |= b.non_canon;
+ if (a.available == Availability::NO || b.available == Availability::NO) {
+ a.SetAvailable(Availability::NO);
+ } else if (a.available == Availability::MAYBE || b.available == Availability::MAYBE) {
+ a.SetAvailable(Availability::MAYBE);
+ }
+ return a;
+}
+
+InputStack operator|(InputStack a, InputStack b) {
+ // If only one is invalid, pick the other one. If both are invalid, pick an arbitrary one.
+ if (a.available == Availability::NO) return b;
+ if (b.available == Availability::NO) return a;
+ // If only one of the solutions has a signature, we must pick the other one.
+ if (!a.has_sig && b.has_sig) return a;
+ if (!b.has_sig && a.has_sig) return b;
+ if (!a.has_sig && !b.has_sig) {
+ // If neither solution requires a signature, the result is inevitably malleable.
+ a.malleable = true;
+ b.malleable = true;
+ } else {
+ // If both options require a signature, prefer the non-malleable one.
+ if (b.malleable && !a.malleable) return a;
+ if (a.malleable && !b.malleable) return b;
+ }
+ // Between two malleable or two non-malleable solutions, pick the smaller one between
+ // YESes, and the bigger ones between MAYBEs. Prefer YES over MAYBE.
+ if (a.available == Availability::YES && b.available == Availability::YES) {
+ return std::move(a.size <= b.size ? a : b);
+ } else if (a.available == Availability::MAYBE && b.available == Availability::MAYBE) {
+ return std::move(a.size >= b.size ? a : b);
+ } else if (a.available == Availability::YES) {
+ return a;
+ } else {
+ return b;
+ }
+}
+
std::optional<std::vector<Opcode>> DecomposeScript(const CScript& script)
{
std::vector<Opcode> out;
diff --git a/src/script/miniscript.h b/src/script/miniscript.h
index 3a3f724f03..c42b530c4d 100644
--- a/src/script/miniscript.h
+++ b/src/script/miniscript.h
@@ -223,6 +223,11 @@ enum class Fragment {
// WRAP_U(X) is represented as OR_I(X,0)
};
+enum class Availability {
+ NO,
+ YES,
+ MAYBE,
+};
namespace internal {
@@ -235,6 +240,62 @@ size_t ComputeScriptLen(Fragment fragment, Type sub0typ, size_t subsize, uint32_
//! A helper sanitizer/checker for the output of CalcType.
Type SanitizeType(Type x);
+//! An object representing a sequence of witness stack elements.
+struct InputStack {
+ /** Whether this stack is valid for its intended purpose (satisfaction or dissatisfaction of a Node).
+ * The MAYBE value is used for size estimation, when keys/preimages may actually be unavailable,
+ * but may be available at signing time. This makes the InputStack structure and signing logic,
+ * filled with dummy signatures/preimages usable for witness size estimation.
+ */
+ Availability available = Availability::YES;
+ //! Whether this stack contains a digital signature.
+ bool has_sig = false;
+ //! Whether this stack is malleable (can be turned into an equally valid other stack by a third party).
+ bool malleable = false;
+ //! Whether this stack is non-canonical (using a construction known to be unnecessary for satisfaction).
+ //! Note that this flag does not affect the satisfaction algorithm; it is only used for sanity checking.
+ bool non_canon = false;
+ //! Serialized witness size.
+ size_t size = 0;
+ //! Data elements.
+ std::vector<std::vector<unsigned char>> stack;
+ //! Construct an empty stack (valid).
+ InputStack() {}
+ //! Construct a valid single-element stack (with an element up to 75 bytes).
+ InputStack(std::vector<unsigned char> in) : size(in.size() + 1), stack(Vector(std::move(in))) {}
+ //! Change availability
+ InputStack& SetAvailable(Availability avail);
+ //! Mark this input stack as having a signature.
+ InputStack& SetWithSig();
+ //! Mark this input stack as non-canonical (known to not be necessary in non-malleable satisfactions).
+ InputStack& SetNonCanon();
+ //! Mark this input stack as malleable.
+ InputStack& SetMalleable(bool x = true);
+ //! Concatenate two input stacks.
+ friend InputStack operator+(InputStack a, InputStack b);
+ //! Choose between two potential input stacks.
+ friend InputStack operator|(InputStack a, InputStack b);
+};
+
+/** A stack consisting of a single zero-length element (interpreted as 0 by the script interpreter in numeric context). */
+static const auto ZERO = InputStack(std::vector<unsigned char>());
+/** A stack consisting of a single malleable 32-byte 0x0000...0000 element (for dissatisfying hash challenges). */
+static const auto ZERO32 = InputStack(std::vector<unsigned char>(32, 0)).SetMalleable();
+/** A stack consisting of a single 0x01 element (interpreted as 1 by the script interpreted in numeric context). */
+static const auto ONE = InputStack(Vector((unsigned char)1));
+/** The empty stack. */
+static const auto EMPTY = InputStack();
+/** A stack representing the lack of any (dis)satisfactions. */
+static const auto INVALID = InputStack().SetAvailable(Availability::NO);
+
+//! A pair of a satisfaction and a dissatisfaction InputStack.
+struct InputResult {
+ InputStack nsat, sat;
+
+ template<typename A, typename B>
+ InputResult(A&& in_nsat, B&& in_sat) : nsat(std::forward<A>(in_nsat)), sat(std::forward<B>(in_sat)) {}
+};
+
//! Class whose objects represent the maximum of a list of integers.
template<typename I>
struct MaxInt {
@@ -785,6 +846,226 @@ private:
assert(false);
}
+ template<typename Ctx>
+ internal::InputResult ProduceInput(const Ctx& ctx) const {
+ using namespace internal;
+
+ // Internal function which is invoked for every tree node, constructing satisfaction/dissatisfactions
+ // given those of its subnodes.
+ auto helper = [&ctx](const Node& node, Span<InputResult> subres) -> InputResult {
+ switch (node.fragment) {
+ case Fragment::PK_K: {
+ std::vector<unsigned char> sig;
+ Availability avail = ctx.Sign(node.keys[0], sig);
+ return {ZERO, InputStack(std::move(sig)).SetWithSig().SetAvailable(avail)};
+ }
+ case Fragment::PK_H: {
+ std::vector<unsigned char> key = ctx.ToPKBytes(node.keys[0]), sig;
+ Availability avail = ctx.Sign(node.keys[0], sig);
+ return {ZERO + InputStack(key), (InputStack(std::move(sig)).SetWithSig() + InputStack(key)).SetAvailable(avail)};
+ }
+ case Fragment::MULTI: {
+ // sats[j] represents the best stack containing j valid signatures (out of the first i keys).
+ // In the loop below, these stacks are built up using a dynamic programming approach.
+ // sats[0] starts off being {0}, due to the CHECKMULTISIG bug that pops off one element too many.
+ std::vector<InputStack> sats = Vector(ZERO);
+ for (size_t i = 0; i < node.keys.size(); ++i) {
+ std::vector<unsigned char> sig;
+ Availability avail = ctx.Sign(node.keys[i], sig);
+ // Compute signature stack for just the i'th key.
+ auto sat = InputStack(std::move(sig)).SetWithSig().SetAvailable(avail);
+ // Compute the next sats vector: next_sats[0] is a copy of sats[0] (no signatures). All further
+ // next_sats[j] are equal to either the existing sats[j], or sats[j-1] plus a signature for the
+ // current (i'th) key. The very last element needs all signatures filled.
+ std::vector<InputStack> next_sats;
+ next_sats.push_back(sats[0]);
+ for (size_t j = 1; j < sats.size(); ++j) next_sats.push_back(sats[j] | (std::move(sats[j - 1]) + sat));
+ next_sats.push_back(std::move(sats[sats.size() - 1]) + std::move(sat));
+ // Switch over.
+ sats = std::move(next_sats);
+ }
+ // The dissatisfaction consists of k+1 stack elements all equal to 0.
+ InputStack nsat = ZERO;
+ for (size_t i = 0; i < node.k; ++i) nsat = std::move(nsat) + ZERO;
+ assert(node.k <= sats.size());
+ return {std::move(nsat), std::move(sats[node.k])};
+ }
+ case Fragment::THRESH: {
+ // sats[k] represents the best stack that satisfies k out of the *last* i subexpressions.
+ // In the loop below, these stacks are built up using a dynamic programming approach.
+ // sats[0] starts off empty.
+ std::vector<InputStack> sats = Vector(EMPTY);
+ for (size_t i = 0; i < subres.size(); ++i) {
+ // Introduce an alias for the i'th last satisfaction/dissatisfaction.
+ auto& res = subres[subres.size() - i - 1];
+ // Compute the next sats vector: next_sats[0] is sats[0] plus res.nsat (thus containing all dissatisfactions
+ // so far. next_sats[j] is either sats[j] + res.nsat (reusing j earlier satisfactions) or sats[j-1] + res.sat
+ // (reusing j-1 earlier satisfactions plus a new one). The very last next_sats[j] is all satisfactions.
+ std::vector<InputStack> next_sats;
+ next_sats.push_back(sats[0] + res.nsat);
+ for (size_t j = 1; j < sats.size(); ++j) next_sats.push_back((sats[j] + res.nsat) | (std::move(sats[j - 1]) + res.sat));
+ next_sats.push_back(std::move(sats[sats.size() - 1]) + std::move(res.sat));
+ // Switch over.
+ sats = std::move(next_sats);
+ }
+ // At this point, sats[k].sat is the best satisfaction for the overall thresh() node. The best dissatisfaction
+ // is computed by gathering all sats[i].nsat for i != k.
+ InputStack nsat = INVALID;
+ for (size_t i = 0; i < sats.size(); ++i) {
+ // i==k is the satisfaction; i==0 is the canonical dissatisfaction;
+ // the rest are non-canonical (a no-signature dissatisfaction - the i=0
+ // form - is always available) and malleable (due to overcompleteness).
+ // Marking the solutions malleable here is not strictly necessary, as they
+ // should already never be picked in non-malleable solutions due to the
+ // availability of the i=0 form.
+ if (i != 0 && i != node.k) sats[i].SetMalleable().SetNonCanon();
+ // Include all dissatisfactions (even these non-canonical ones) in nsat.
+ if (i != node.k) nsat = std::move(nsat) | std::move(sats[i]);
+ }
+ assert(node.k <= sats.size());
+ return {std::move(nsat), std::move(sats[node.k])};
+ }
+ case Fragment::OLDER: {
+ return {INVALID, ctx.CheckOlder(node.k) ? EMPTY : INVALID};
+ }
+ case Fragment::AFTER: {
+ return {INVALID, ctx.CheckAfter(node.k) ? EMPTY : INVALID};
+ }
+ case Fragment::SHA256: {
+ std::vector<unsigned char> preimage;
+ Availability avail = ctx.SatSHA256(node.data, preimage);
+ return {ZERO32, InputStack(std::move(preimage)).SetAvailable(avail)};
+ }
+ case Fragment::RIPEMD160: {
+ std::vector<unsigned char> preimage;
+ Availability avail = ctx.SatRIPEMD160(node.data, preimage);
+ return {ZERO32, InputStack(std::move(preimage)).SetAvailable(avail)};
+ }
+ case Fragment::HASH256: {
+ std::vector<unsigned char> preimage;
+ Availability avail = ctx.SatHASH256(node.data, preimage);
+ return {ZERO32, InputStack(std::move(preimage)).SetAvailable(avail)};
+ }
+ case Fragment::HASH160: {
+ std::vector<unsigned char> preimage;
+ Availability avail = ctx.SatHASH160(node.data, preimage);
+ return {ZERO32, InputStack(std::move(preimage)).SetAvailable(avail)};
+ }
+ case Fragment::AND_V: {
+ auto& x = subres[0], &y = subres[1];
+ // As the dissatisfaction here only consist of a single option, it doesn't
+ // actually need to be listed (it's not required for reasoning about malleability of
+ // other options), and is never required (no valid miniscript relies on the ability
+ // to satisfy the type V left subexpression). It's still listed here for
+ // completeness, as a hypothetical (not currently implemented) satisfier that doesn't
+ // care about malleability might in some cases prefer it still.
+ return {(y.nsat + x.sat).SetNonCanon(), y.sat + x.sat};
+ }
+ case Fragment::AND_B: {
+ auto& x = subres[0], &y = subres[1];
+ // Note that it is not strictly necessary to mark the 2nd and 3rd dissatisfaction here
+ // as malleable. While they are definitely malleable, they are also non-canonical due
+ // to the guaranteed existence of a no-signature other dissatisfaction (the 1st)
+ // option. Because of that, the 2nd and 3rd option will never be chosen, even if they
+ // weren't marked as malleable.
+ return {(y.nsat + x.nsat) | (y.sat + x.nsat).SetMalleable().SetNonCanon() | (y.nsat + x.sat).SetMalleable().SetNonCanon(), y.sat + x.sat};
+ }
+ case Fragment::OR_B: {
+ auto& x = subres[0], &z = subres[1];
+ // The (sat(Z) sat(X)) solution is overcomplete (attacker can change either into dsat).
+ return {z.nsat + x.nsat, (z.nsat + x.sat) | (z.sat + x.nsat) | (z.sat + x.sat).SetMalleable().SetNonCanon()};
+ }
+ case Fragment::OR_C: {
+ auto& x = subres[0], &z = subres[1];
+ return {INVALID, std::move(x.sat) | (z.sat + x.nsat)};
+ }
+ case Fragment::OR_D: {
+ auto& x = subres[0], &z = subres[1];
+ return {z.nsat + x.nsat, std::move(x.sat) | (z.sat + x.nsat)};
+ }
+ case Fragment::OR_I: {
+ auto& x = subres[0], &z = subres[1];
+ return {(x.nsat + ONE) | (z.nsat + ZERO), (x.sat + ONE) | (z.sat + ZERO)};
+ }
+ case Fragment::ANDOR: {
+ auto& x = subres[0], &y = subres[1], &z = subres[2];
+ return {(y.nsat + x.sat).SetNonCanon() | (z.nsat + x.nsat), (y.sat + x.sat) | (z.sat + x.nsat)};
+ }
+ case Fragment::WRAP_A:
+ case Fragment::WRAP_S:
+ case Fragment::WRAP_C:
+ case Fragment::WRAP_N:
+ return std::move(subres[0]);
+ case Fragment::WRAP_D: {
+ auto &x = subres[0];
+ return {ZERO, x.sat + ONE};
+ }
+ case Fragment::WRAP_J: {
+ auto &x = subres[0];
+ // If a dissatisfaction with a nonzero top stack element exists, an alternative dissatisfaction exists.
+ // As the dissatisfaction logic currently doesn't keep track of this nonzeroness property, and thus even
+ // if a dissatisfaction with a top zero element is found, we don't know whether another one with a
+ // nonzero top stack element exists. Make the conservative assumption that whenever the subexpression is weakly
+ // dissatisfiable, this alternative dissatisfaction exists and leads to malleability.
+ return {InputStack(ZERO).SetMalleable(x.nsat.available != Availability::NO && !x.nsat.has_sig), std::move(x.sat)};
+ }
+ case Fragment::WRAP_V: {
+ auto &x = subres[0];
+ return {INVALID, std::move(x.sat)};
+ }
+ case Fragment::JUST_0: return {EMPTY, INVALID};
+ case Fragment::JUST_1: return {INVALID, EMPTY};
+ }
+ assert(false);
+ return {INVALID, INVALID};
+ };
+
+ auto tester = [&helper](const Node& node, Span<InputResult> subres) -> InputResult {
+ auto ret = helper(node, subres);
+
+ // Do a consistency check between the satisfaction code and the type checker
+ // (the actual satisfaction code in ProduceInputHelper does not use GetType)
+
+ // For 'z' nodes, available satisfactions/dissatisfactions must have stack size 0.
+ if (node.GetType() << "z"_mst && ret.nsat.available != Availability::NO) assert(ret.nsat.stack.size() == 0);
+ if (node.GetType() << "z"_mst && ret.sat.available != Availability::NO) assert(ret.sat.stack.size() == 0);
+
+ // For 'o' nodes, available satisfactions/dissatisfactions must have stack size 1.
+ if (node.GetType() << "o"_mst && ret.nsat.available != Availability::NO) assert(ret.nsat.stack.size() == 1);
+ if (node.GetType() << "o"_mst && ret.sat.available != Availability::NO) assert(ret.sat.stack.size() == 1);
+
+ // For 'n' nodes, available satisfactions/dissatisfactions must have stack size 1 or larger. For satisfactions,
+ // the top element cannot be 0.
+ if (node.GetType() << "n"_mst && ret.sat.available != Availability::NO) assert(ret.sat.stack.size() >= 1);
+ if (node.GetType() << "n"_mst && ret.nsat.available != Availability::NO) assert(ret.nsat.stack.size() >= 1);
+ if (node.GetType() << "n"_mst && ret.sat.available != Availability::NO) assert(!ret.sat.stack.back().empty());
+
+ // For 'd' nodes, a dissatisfaction must exist, and they must not need a signature. If it is non-malleable,
+ // it must be canonical.
+ if (node.GetType() << "d"_mst) assert(ret.nsat.available != Availability::NO);
+ if (node.GetType() << "d"_mst) assert(!ret.nsat.has_sig);
+ if (node.GetType() << "d"_mst && !ret.nsat.malleable) assert(!ret.nsat.non_canon);
+
+ // For 'f'/'s' nodes, dissatisfactions/satisfactions must have a signature.
+ if (node.GetType() << "f"_mst && ret.nsat.available != Availability::NO) assert(ret.nsat.has_sig);
+ if (node.GetType() << "s"_mst && ret.sat.available != Availability::NO) assert(ret.sat.has_sig);
+
+ // For non-malleable 'e' nodes, a non-malleable dissatisfaction must exist.
+ if (node.GetType() << "me"_mst) assert(ret.nsat.available != Availability::NO);
+ if (node.GetType() << "me"_mst) assert(!ret.nsat.malleable);
+
+ // For 'm' nodes, if a satisfaction exists, it must be non-malleable.
+ if (node.GetType() << "m"_mst && ret.sat.available != Availability::NO) assert(!ret.sat.malleable);
+
+ // If a non-malleable satisfaction exists, it must be canonical.
+ if (ret.sat.available != Availability::NO && !ret.sat.malleable) assert(!ret.sat.non_canon);
+
+ return ret;
+ };
+
+ return TreeEval<InputResult>(tester);
+ }
+
public:
/** Update duplicate key information in this Node.
*
@@ -877,6 +1158,47 @@ public:
});
}
+ //! Determine whether a Miniscript node is satisfiable. fn(node) will be invoked for all
+ //! key, time, and hashing nodes, and should return their satisfiability.
+ template<typename F>
+ bool IsSatisfiable(F fn) const
+ {
+ // TreeEval() doesn't support bool as NodeType, so use int instead.
+ return TreeEval<int>([&fn](const Node& node, Span<int> subs) -> bool {
+ switch (node.fragment) {
+ case Fragment::JUST_0:
+ return false;
+ case Fragment::JUST_1:
+ return true;
+ case Fragment::PK_K:
+ case Fragment::PK_H:
+ case Fragment::MULTI:
+ case Fragment::AFTER:
+ case Fragment::OLDER:
+ case Fragment::HASH256:
+ case Fragment::HASH160:
+ case Fragment::SHA256:
+ case Fragment::RIPEMD160:
+ return bool{fn(node)};
+ case Fragment::ANDOR:
+ return (subs[0] && subs[1]) || subs[2];
+ case Fragment::AND_V:
+ case Fragment::AND_B:
+ return subs[0] && subs[1];
+ case Fragment::OR_B:
+ case Fragment::OR_C:
+ case Fragment::OR_D:
+ case Fragment::OR_I:
+ return subs[0] || subs[1];
+ case Fragment::THRESH:
+ return std::count(subs.begin(), subs.end(), true) >= node.k;
+ default: // wrappers
+ assert(subs.size() == 1);
+ return subs[0];
+ }
+ });
+ }
+
//! Check whether this node is valid at all.
bool IsValid() const { return !(GetType() == ""_mst) && ScriptSize() <= MAX_STANDARD_P2WSH_SCRIPT_SIZE; }
@@ -904,6 +1226,18 @@ public:
//! Check whether this node is safe as a script on its own.
bool IsSane() const { return IsValidTopLevel() && IsSaneSubexpression() && NeedsSignature(); }
+ //! Produce a witness for this script, if possible and given the information available in the context.
+ //! The non-malleable satisfaction is guaranteed to be valid if it exists, and ValidSatisfaction()
+ //! is true. If IsSane() holds, this satisfaction is guaranteed to succeed in case the node's
+ //! conditions are satisfied (private keys and hash preimages available, locktimes satsified).
+ template<typename Ctx>
+ Availability Satisfy(const Ctx& ctx, std::vector<std::vector<unsigned char>>& stack, bool nonmalleable = true) const {
+ auto ret = ProduceInput(ctx);
+ if (nonmalleable && (ret.sat.malleable || !ret.sat.has_sig)) return Availability::NO;
+ stack = std::move(ret.sat.stack);
+ return ret.sat.available;
+ }
+
//! Equality testing.
bool operator==(const Node<Key>& arg) const { return Compare(*this, arg) == 0; }
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 70df9ee62c..85589fe86b 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -10,6 +10,7 @@
#include <policy/policy.h>
#include <primitives/transaction.h>
#include <script/keyorigin.h>
+#include <script/miniscript.h>
#include <script/signingprovider.h>
#include <script/standard.h>
#include <uint256.h>
@@ -380,6 +381,92 @@ static CScript PushAll(const std::vector<valtype>& values)
return result;
}
+template<typename M, typename K, typename V>
+miniscript::Availability MsLookupHelper(const M& map, const K& key, V& value)
+{
+ auto it = map.find(key);
+ if (it != map.end()) {
+ value = it->second;
+ return miniscript::Availability::YES;
+ }
+ return miniscript::Availability::NO;
+}
+
+/**
+ * Context for solving a Miniscript.
+ * If enough material (access to keys, hash preimages, ..) is given, produces a valid satisfaction.
+ */
+struct Satisfier {
+ typedef CPubKey Key;
+
+ const SigningProvider& m_provider;
+ SignatureData& m_sig_data;
+ const BaseSignatureCreator& m_creator;
+ const CScript& m_witness_script;
+
+ explicit Satisfier(const SigningProvider& provider LIFETIMEBOUND, SignatureData& sig_data LIFETIMEBOUND,
+ const BaseSignatureCreator& creator LIFETIMEBOUND,
+ const CScript& witscript LIFETIMEBOUND) : m_provider(provider),
+ m_sig_data(sig_data),
+ m_creator(creator),
+ m_witness_script(witscript) {}
+
+ static bool KeyCompare(const Key& a, const Key& b) {
+ return a < b;
+ }
+
+ //! Conversion from a raw public key.
+ template <typename I>
+ std::optional<Key> FromPKBytes(I first, I last) const
+ {
+ Key pubkey{first, last};
+ if (pubkey.IsValid()) return pubkey;
+ return {};
+ }
+
+ //! Conversion from a raw public key hash.
+ template<typename I>
+ std::optional<Key> FromPKHBytes(I first, I last) const {
+ assert(last - first == 20);
+ Key pubkey;
+ CKeyID key_id;
+ std::copy(first, last, key_id.begin());
+ if (GetPubKey(m_provider, m_sig_data, key_id, pubkey)) return pubkey;
+ m_sig_data.missing_pubkeys.push_back(key_id);
+ return {};
+ }
+
+ //! Conversion to raw public key.
+ std::vector<unsigned char> ToPKBytes(const CPubKey& key) const { return {key.begin(), key.end()}; }
+
+ //! Satisfy a signature check.
+ miniscript::Availability Sign(const CPubKey& key, std::vector<unsigned char>& sig) const {
+ if (CreateSig(m_creator, m_sig_data, m_provider, sig, key, m_witness_script, SigVersion::WITNESS_V0)) {
+ return miniscript::Availability::YES;
+ }
+ return miniscript::Availability::NO;
+ }
+
+ //! Time lock satisfactions.
+ bool CheckAfter(uint32_t value) const { return m_creator.Checker().CheckLockTime(CScriptNum(value)); }
+ bool CheckOlder(uint32_t value) const { return m_creator.Checker().CheckSequence(CScriptNum(value)); }
+
+
+ //! Hash preimage satisfactions.
+ miniscript::Availability SatSHA256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return MsLookupHelper(m_sig_data.sha256_preimages, hash, preimage);
+ }
+ miniscript::Availability SatRIPEMD160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return MsLookupHelper(m_sig_data.ripemd160_preimages, hash, preimage);
+ }
+ miniscript::Availability SatHASH256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return MsLookupHelper(m_sig_data.hash256_preimages, hash, preimage);
+ }
+ miniscript::Availability SatHASH160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return MsLookupHelper(m_sig_data.hash160_preimages, hash, preimage);
+ }
+};
+
bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreator& creator, const CScript& fromPubKey, SignatureData& sigdata)
{
if (sigdata.complete) return true;
@@ -415,9 +502,21 @@ bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreato
{
CScript witnessscript(result[0].begin(), result[0].end());
sigdata.witness_script = witnessscript;
- TxoutType subType;
+
+ TxoutType subType{TxoutType::NONSTANDARD};
solved = solved && SignStep(provider, creator, witnessscript, result, subType, SigVersion::WITNESS_V0, sigdata) && subType != TxoutType::SCRIPTHASH && subType != TxoutType::WITNESS_V0_SCRIPTHASH && subType != TxoutType::WITNESS_V0_KEYHASH;
+
+ // If we couldn't find a solution with the legacy satisfier, try satisfying the script using Miniscript.
+ // Note we need to check if the result stack is empty before, because it might be used even if the Script
+ // isn't fully solved. For instance the CHECKMULTISIG satisfaction in SignStep() pushes partial signatures
+ // and the extractor relies on this behaviour to combine witnesses.
+ if (!solved && result.empty()) {
+ Satisfier ms_satisfier{provider, sigdata, creator, witnessscript};
+ const auto ms = miniscript::FromScript(witnessscript, ms_satisfier);
+ solved = ms && ms->Satisfy(ms_satisfier, result) == miniscript::Availability::YES;
+ }
result.push_back(std::vector<unsigned char>(witnessscript.begin(), witnessscript.end()));
+
sigdata.scriptWitness.stack = result;
sigdata.witness = true;
result.clear();
@@ -563,26 +662,25 @@ void SignatureData::MergeSignatureData(SignatureData sigdata)
signatures.insert(std::make_move_iterator(sigdata.signatures.begin()), std::make_move_iterator(sigdata.signatures.end()));
}
-bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType)
+bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType, SignatureData& sig_data)
{
assert(nIn < txTo.vin.size());
MutableTransactionSignatureCreator creator(txTo, nIn, amount, nHashType);
- SignatureData sigdata;
- bool ret = ProduceSignature(provider, creator, fromPubKey, sigdata);
- UpdateInput(txTo.vin.at(nIn), sigdata);
+ bool ret = ProduceSignature(provider, creator, fromPubKey, sig_data);
+ UpdateInput(txTo.vin.at(nIn), sig_data);
return ret;
}
-bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType)
+bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType, SignatureData& sig_data)
{
assert(nIn < txTo.vin.size());
const CTxIn& txin = txTo.vin[nIn];
assert(txin.prevout.n < txFrom.vout.size());
const CTxOut& txout = txFrom.vout[txin.prevout.n];
- return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType);
+ return SignSignature(provider, txout.scriptPubKey, txTo, nIn, txout.nValue, nHashType, sig_data);
}
namespace {
@@ -591,8 +689,10 @@ class DummySignatureChecker final : public BaseSignatureChecker
{
public:
DummySignatureChecker() = default;
- bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return true; }
- bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) const override { return true; }
+ bool CheckECDSASignature(const std::vector<unsigned char>& sig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return sig.size() != 0; }
+ bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* serror) const override { return sig.size() != 0; }
+ bool CheckLockTime(const CScriptNum& nLockTime) const override { return true; }
+ bool CheckSequence(const CScriptNum& nSequence) const override { return true; }
};
}
diff --git a/src/script/sign.h b/src/script/sign.h
index 263fb61fc5..fe2354cad7 100644
--- a/src/script/sign.h
+++ b/src/script/sign.h
@@ -83,6 +83,10 @@ struct SignatureData {
std::vector<CKeyID> missing_sigs; ///< KeyIDs of pubkeys for signatures which could not be found
uint160 missing_redeem_script; ///< ScriptID of the missing redeemScript (if any)
uint256 missing_witness_script; ///< SHA256 of the missing witnessScript (if any)
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> sha256_preimages; ///< Mapping from a SHA256 hash to its preimage provided to solve a Script
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> hash256_preimages; ///< Mapping from a HASH256 hash to its preimage provided to solve a Script
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> ripemd160_preimages; ///< Mapping from a RIPEMD160 hash to its preimage provided to solve a Script
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> hash160_preimages; ///< Mapping from a HASH160 hash to its preimage provided to solve a Script
SignatureData() {}
explicit SignatureData(const CScript& script) : scriptSig(script) {}
@@ -92,9 +96,24 @@ struct SignatureData {
/** Produce a script signature using a generic signature creator. */
bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreator& creator, const CScript& scriptPubKey, SignatureData& sigdata);
-/** Produce a script signature for a transaction. */
-bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo, unsigned int nIn, const CAmount& amount, int nHashType);
-bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo, unsigned int nIn, int nHashType);
+/**
+ * Produce a satisfying script (scriptSig or witness).
+ *
+ * @param provider Utility containing the information necessary to solve a script.
+ * @param fromPubKey The script to produce a satisfaction for.
+ * @param txTo The spending transaction.
+ * @param nIn The index of the input in `txTo` refering the output being spent.
+ * @param amount The value of the output being spent.
+ * @param nHashType Signature hash type.
+ * @param sig_data Additional data provided to solve a script. Filled with the resulting satisfying
+ * script and whether the satisfaction is complete.
+ *
+ * @return True if the produced script is entirely satisfying `fromPubKey`.
+ **/
+bool SignSignature(const SigningProvider &provider, const CScript& fromPubKey, CMutableTransaction& txTo,
+ unsigned int nIn, const CAmount& amount, int nHashType, SignatureData& sig_data);
+bool SignSignature(const SigningProvider &provider, const CTransaction& txFrom, CMutableTransaction& txTo,
+ unsigned int nIn, int nHashType, SignatureData& sig_data);
/** Extract signature data from a transaction input, and insert it. */
SignatureData DataFromTransaction(const CMutableTransaction& tx, unsigned int nIn, const CTxOut& txout);
diff --git a/src/streams.h b/src/streams.h
index ed9af308c9..8788343809 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -358,14 +358,6 @@ public:
nType{nTypeIn},
nVersion{nVersionIn} {}
- template <typename... Args>
- CDataStream(int nTypeIn, int nVersionIn, Args&&... args)
- : nType{nTypeIn},
- nVersion{nVersionIn}
- {
- ::SerializeMany(*this, std::forward<Args>(args)...);
- }
-
int GetType() const { return nType; }
void SetVersion(int n) { nVersion = n; }
int GetVersion() const { return nVersion; }
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index 312f417129..55ecd41af1 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -131,8 +131,8 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
std::map<COutPoint, Coin> result;
// The cache stack.
- std::vector<CCoinsViewCacheTest*> stack; // A stack of CCoinsViewCaches on top.
- stack.push_back(new CCoinsViewCacheTest(base)); // Start with one cache.
+ std::vector<std::unique_ptr<CCoinsViewCacheTest>> stack; // A stack of CCoinsViewCaches on top.
+ stack.push_back(std::make_unique<CCoinsViewCacheTest>(base)); // Start with one cache.
// Use a limited set of random transaction ids, so we do test overwriting entries.
std::vector<uint256> txids;
@@ -218,7 +218,7 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
found_an_entry = true;
}
}
- for (const CCoinsViewCacheTest *test : stack) {
+ for (const auto& test : stack) {
test->SelfTest();
}
}
@@ -241,18 +241,17 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
bool should_erase = InsecureRandRange(4) < 3;
BOOST_CHECK(should_erase ? stack.back()->Flush() : stack.back()->Sync());
flushed_without_erase |= !should_erase;
- delete stack.back();
stack.pop_back();
}
if (stack.size() == 0 || (stack.size() < 4 && InsecureRandBool())) {
//Add a new cache
CCoinsView* tip = base;
if (stack.size() > 0) {
- tip = stack.back();
+ tip = stack.back().get();
} else {
removed_all_caches = true;
}
- stack.push_back(new CCoinsViewCacheTest(tip));
+ stack.push_back(std::make_unique<CCoinsViewCacheTest>(tip));
if (stack.size() == 4) {
reached_4_caches = true;
}
@@ -260,12 +259,6 @@ void SimulationTest(CCoinsView* base, bool fake_best_block)
}
}
- // Clean up the stack.
- while (stack.size() > 0) {
- delete stack.back();
- stack.pop_back();
- }
-
// Verify coverage.
BOOST_CHECK(removed_all_caches);
BOOST_CHECK(reached_4_caches);
@@ -321,8 +314,8 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
// The cache stack.
CCoinsViewTest base; // A CCoinsViewTest at the bottom.
- std::vector<CCoinsViewCacheTest*> stack; // A stack of CCoinsViewCaches on top.
- stack.push_back(new CCoinsViewCacheTest(&base)); // Start with one cache.
+ std::vector<std::unique_ptr<CCoinsViewCacheTest>> stack; // A stack of CCoinsViewCaches on top.
+ stack.push_back(std::make_unique<CCoinsViewCacheTest>(&base)); // Start with one cache.
// Track the txids we've used in various sets
std::set<COutPoint> coinbase_coins;
@@ -487,25 +480,18 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
// Every 100 iterations, change the cache stack.
if (stack.size() > 0 && InsecureRandBool() == 0) {
BOOST_CHECK(stack.back()->Flush());
- delete stack.back();
stack.pop_back();
}
if (stack.size() == 0 || (stack.size() < 4 && InsecureRandBool())) {
CCoinsView* tip = &base;
if (stack.size() > 0) {
- tip = stack.back();
+ tip = stack.back().get();
}
- stack.push_back(new CCoinsViewCacheTest(tip));
+ stack.push_back(std::make_unique<CCoinsViewCacheTest>(tip));
}
}
}
- // Clean up the stack.
- while (stack.size() > 0) {
- delete stack.back();
- stack.pop_back();
- }
-
// Verify coverage.
BOOST_CHECK(spent_a_duplicate_coinbase);
@@ -918,7 +904,7 @@ Coin MakeCoin()
void TestFlushBehavior(
CCoinsViewCacheTest* view,
CCoinsViewDB& base,
- std::vector<CCoinsViewCacheTest*>& all_caches,
+ std::vector<std::unique_ptr<CCoinsViewCacheTest>>& all_caches,
bool do_erasing_flush)
{
CAmount value;
@@ -928,7 +914,7 @@ void TestFlushBehavior(
auto flush_all = [&all_caches](bool erase) {
// Flush in reverse order to ensure that flushes happen from children up.
for (auto i = all_caches.rbegin(); i != all_caches.rend(); ++i) {
- auto cache = *i;
+ auto& cache = *i;
// hashBlock must be filled before flushing to disk; value is
// unimportant here. This is normally done during connect/disconnect block.
cache->SetBestBlock(InsecureRand256());
@@ -1079,19 +1065,13 @@ BOOST_AUTO_TEST_CASE(ccoins_flush_behavior)
{
// Create two in-memory caches atop a leveldb view.
CCoinsViewDB base{"test", /*nCacheSize=*/ 1 << 23, /*fMemory=*/ true, /*fWipe=*/ false};
- std::vector<CCoinsViewCacheTest*> caches;
- caches.push_back(new CCoinsViewCacheTest(&base));
- caches.push_back(new CCoinsViewCacheTest(caches.back()));
-
- for (CCoinsViewCacheTest* view : caches) {
- TestFlushBehavior(view, base, caches, /*do_erasing_flush=*/ false);
- TestFlushBehavior(view, base, caches, /*do_erasing_flush=*/ true);
- }
+ std::vector<std::unique_ptr<CCoinsViewCacheTest>> caches;
+ caches.push_back(std::make_unique<CCoinsViewCacheTest>(&base));
+ caches.push_back(std::make_unique<CCoinsViewCacheTest>(caches.back().get()));
- // Clean up the caches.
- while (caches.size() > 0) {
- delete caches.back();
- caches.pop_back();
+ for (const auto& view : caches) {
+ TestFlushBehavior(view.get(), base, caches, /*do_erasing_flush=*/false);
+ TestFlushBehavior(view.get(), base, caches, /*do_erasing_flush=*/true);
}
}
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index d3eef7beb7..ed851b5266 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -133,14 +133,14 @@ static void TestAES256CBC(const std::string &hexkey, const std::string &hexiv, b
static void TestChaCha20(const std::string &hex_message, const std::string &hexkey, uint64_t nonce, uint64_t seek, const std::string& hexout)
{
std::vector<unsigned char> key = ParseHex(hexkey);
+ assert(key.size() == 32);
std::vector<unsigned char> m = ParseHex(hex_message);
- ChaCha20 rng(key.data(), key.size());
+ ChaCha20 rng(key.data());
rng.SetIV(nonce);
- rng.Seek(seek);
- std::vector<unsigned char> out = ParseHex(hexout);
+ rng.Seek64(seek);
std::vector<unsigned char> outres;
- outres.resize(out.size());
- assert(hex_message.empty() || m.size() == out.size());
+ outres.resize(hexout.size() / 2);
+ assert(hex_message.empty() || m.size() * 2 == hexout.size());
// perform the ChaCha20 round(s), if message is provided it will output the encrypted ciphertext otherwise the keystream
if (!hex_message.empty()) {
@@ -148,17 +148,38 @@ static void TestChaCha20(const std::string &hex_message, const std::string &hexk
} else {
rng.Keystream(outres.data(), outres.size());
}
- BOOST_CHECK(out == outres);
+ BOOST_CHECK_EQUAL(hexout, HexStr(outres));
if (!hex_message.empty()) {
// Manually XOR with the keystream and compare the output
rng.SetIV(nonce);
- rng.Seek(seek);
+ rng.Seek64(seek);
std::vector<unsigned char> only_keystream(outres.size());
rng.Keystream(only_keystream.data(), only_keystream.size());
for (size_t i = 0; i != m.size(); i++) {
outres[i] = m[i] ^ only_keystream[i];
}
- BOOST_CHECK(out == outres);
+ BOOST_CHECK_EQUAL(hexout, HexStr(outres));
+ }
+
+ // Repeat 10x, but fragmented into 3 chunks, to exercise the ChaCha20 class's caching.
+ for (int i = 0; i < 10; ++i) {
+ size_t lens[3];
+ lens[0] = InsecureRandRange(hexout.size() / 2U + 1U);
+ lens[1] = InsecureRandRange(hexout.size() / 2U + 1U - lens[0]);
+ lens[2] = hexout.size() / 2U - lens[0] - lens[1];
+
+ rng.Seek64(seek);
+ outres.assign(hexout.size() / 2U, 0);
+ size_t pos = 0;
+ for (int j = 0; j < 3; ++j) {
+ if (!hex_message.empty()) {
+ rng.Crypt(m.data() + pos, outres.data() + pos, lens[j]);
+ } else {
+ rng.Keystream(outres.data() + pos, lens[j]);
+ }
+ pos += lens[j];
+ }
+ BOOST_CHECK_EQUAL(hexout, HexStr(outres));
}
}
@@ -460,7 +481,88 @@ BOOST_AUTO_TEST_CASE(aes_cbc_testvectors) {
BOOST_AUTO_TEST_CASE(chacha20_testvector)
{
- // Test vector from RFC 7539
+ // RFC 7539/8439 A.1 Test Vector #1:
+ TestChaCha20("",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, 0,
+ "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7"
+ "da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586");
+
+ // RFC 7539/8439 A.1 Test Vector #2:
+ TestChaCha20("",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, 1,
+ "9f07e7be5551387a98ba977c732d080dcb0f29a048e3656912c6533e32ee7aed"
+ "29b721769ce64e43d57133b074d839d531ed1f28510afb45ace10a1f4b794d6f");
+
+ // RFC 7539/8439 A.1 Test Vector #3:
+ TestChaCha20("",
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ 0, 1,
+ "3aeb5224ecf849929b9d828db1ced4dd832025e8018b8160b82284f3c949aa5a"
+ "8eca00bbb4a73bdad192b5c42f73f2fd4e273644c8b36125a64addeb006c13a0");
+
+ // RFC 7539/8439 A.1 Test Vector #4:
+ TestChaCha20("",
+ "00ff000000000000000000000000000000000000000000000000000000000000",
+ 0, 2,
+ "72d54dfbf12ec44b362692df94137f328fea8da73990265ec1bbbea1ae9af0ca"
+ "13b25aa26cb4a648cb9b9d1be65b2c0924a66c54d545ec1b7374f4872e99f096");
+
+ // RFC 7539/8439 A.1 Test Vector #5:
+ TestChaCha20("",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0x200000000000000, 0,
+ "c2c64d378cd536374ae204b9ef933fcd1a8b2288b3dfa49672ab765b54ee27c7"
+ "8a970e0e955c14f3a88e741b97c286f75f8fc299e8148362fa198a39531bed6d");
+
+ // RFC 7539/8439 A.2 Test Vector #1:
+ TestChaCha20("0000000000000000000000000000000000000000000000000000000000000000"
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ "0000000000000000000000000000000000000000000000000000000000000000",
+ 0, 0,
+ "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7"
+ "da41597c5157488d7724e03fb8d84a376a43b8f41518a11cc387b669b2ee6586");
+
+ // RFC 7539/8439 A.2 Test Vector #2:
+ TestChaCha20("416e79207375626d697373696f6e20746f20746865204945544620696e74656e"
+ "6465642062792074686520436f6e7472696275746f7220666f72207075626c69"
+ "636174696f6e20617320616c6c206f722070617274206f6620616e2049455446"
+ "20496e7465726e65742d4472616674206f722052464320616e6420616e792073"
+ "746174656d656e74206d6164652077697468696e2074686520636f6e74657874"
+ "206f6620616e204945544620616374697669747920697320636f6e7369646572"
+ "656420616e20224945544620436f6e747269627574696f6e222e205375636820"
+ "73746174656d656e747320696e636c756465206f72616c2073746174656d656e"
+ "747320696e20494554462073657373696f6e732c2061732077656c6c20617320"
+ "7772697474656e20616e6420656c656374726f6e696320636f6d6d756e696361"
+ "74696f6e73206d61646520617420616e792074696d65206f7220706c6163652c"
+ "207768696368206172652061646472657373656420746f",
+ "0000000000000000000000000000000000000000000000000000000000000001",
+ 0x200000000000000, 1,
+ "a3fbf07df3fa2fde4f376ca23e82737041605d9f4f4f57bd8cff2c1d4b7955ec"
+ "2a97948bd3722915c8f3d337f7d370050e9e96d647b7c39f56e031ca5eb6250d"
+ "4042e02785ececfa4b4bb5e8ead0440e20b6e8db09d881a7c6132f420e527950"
+ "42bdfa7773d8a9051447b3291ce1411c680465552aa6c405b7764d5e87bea85a"
+ "d00f8449ed8f72d0d662ab052691ca66424bc86d2df80ea41f43abf937d3259d"
+ "c4b2d0dfb48a6c9139ddd7f76966e928e635553ba76c5c879d7b35d49eb2e62b"
+ "0871cdac638939e25e8a1e0ef9d5280fa8ca328b351c3c765989cbcf3daa8b6c"
+ "cc3aaf9f3979c92b3720fc88dc95ed84a1be059c6499b9fda236e7e818b04b0b"
+ "c39c1e876b193bfe5569753f88128cc08aaa9b63d1a16f80ef2554d7189c411f"
+ "5869ca52c5b83fa36ff216b9c1d30062bebcfd2dc5bce0911934fda79a86f6e6"
+ "98ced759c3ff9b6477338f3da4f9cd8514ea9982ccafb341b2384dd902f3d1ab"
+ "7ac61dd29c6f21ba5b862f3730e37cfdc4fd806c22f221");
+
+ // RFC 7539/8439 A.2 Test Vector #3:
+ TestChaCha20("2754776173206272696c6c69672c20616e642074686520736c6974687920746f"
+ "7665730a446964206779726520616e642067696d626c6520696e207468652077"
+ "6162653a0a416c6c206d696d737920776572652074686520626f726f676f7665"
+ "732c0a416e6420746865206d6f6d65207261746873206f757467726162652e",
+ "1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cbc207075c0",
+ 0x200000000000000, 42,
+ "62e6347f95ed87a45ffae7426f27a1df5fb69110044c0d73118effa95b01e5cf"
+ "166d3df2d721caf9b21e5fb14c616871fd84c54f9d65b283196c7fe4f60553eb"
+ "f39c6402c42234e32a356b3e764312a61a5532055716ead6962568f87d3f3f77"
+ "04c6a8d1bcd1bf4d50d6154b6da731b187b58dfd728afa36757a797ac188d1");
// test encryption
TestChaCha20("4c616469657320616e642047656e746c656d656e206f662074686520636c617373206f66202739393a204966204920636f756"
@@ -477,27 +579,24 @@ BOOST_AUTO_TEST_CASE(chacha20_testvector)
"224f51f3401bd9e12fde276fb8631ded8c131f823d2c06e27e4fcaec9ef3cf788a3b0aa372600a92b57974cded2b9334794cb"
"a40c63e34cdea212c4cf07d41b769a6749f3f630f4122cafe28ec4dc47e26d4346d70b98c73f3e9c53ac40c5945398b6eda1a"
"832c89c167eacd901d7e2bf363");
+}
- // Test vectors from https://tools.ietf.org/html/draft-agl-tls-chacha20poly1305-04#section-7
- TestChaCha20("", "0000000000000000000000000000000000000000000000000000000000000000", 0, 0,
- "76b8e0ada0f13d90405d6ae55386bd28bdd219b8a08ded1aa836efcc8b770dc7da41597c5157488d7724e03fb8d84a376a43b"
- "8f41518a11cc387b669b2ee6586");
- TestChaCha20("", "0000000000000000000000000000000000000000000000000000000000000001", 0, 0,
- "4540f05a9f1fb296d7736e7b208e3c96eb4fe1834688d2604f450952ed432d41bbe2a0b6ea7566d2a5d1e7e20d42af2c53d79"
- "2b1c43fea817e9ad275ae546963");
- TestChaCha20("", "0000000000000000000000000000000000000000000000000000000000000000", 0x0100000000000000ULL, 0,
- "de9cba7bf3d69ef5e786dc63973f653a0b49e015adbff7134fcb7df137821031e85a050278a7084527214f73efc7fa5b52770"
- "62eb7a0433e445f41e3");
- TestChaCha20("", "0000000000000000000000000000000000000000000000000000000000000000", 1, 0,
- "ef3fdfd6c61578fbf5cf35bd3dd33b8009631634d21e42ac33960bd138e50d32111e4caf237ee53ca8ad6426194a88545ddc4"
- "97a0b466e7d6bbdb0041b2f586b");
- TestChaCha20("", "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f", 0x0706050403020100ULL, 0,
- "f798a189f195e66982105ffb640bb7757f579da31602fc93ec01ac56f85ac3c134a4547b733b46413042c9440049176905d3b"
- "e59ea1c53f15916155c2be8241a38008b9a26bc35941e2444177c8ade6689de95264986d95889fb60e84629c9bd9a5acb1cc1"
- "18be563eb9b3a4a472f82e09a7e778492b562ef7130e88dfe031c79db9d4f7c7a899151b9a475032b63fc385245fe054e3dd5"
- "a97a5f576fe064025d3ce042c566ab2c507b138db853e3d6959660996546cc9c4a6eafdc777c040d70eaf46f76dad3979e5c5"
- "360c3317166a1c894c94a371876a94df7628fe4eaaf2ccb27d5aaae0ad7ad0f9d4b6ad3b54098746d4524d38407a6deb3ab78"
- "fab78c9");
+BOOST_AUTO_TEST_CASE(chacha20_midblock)
+{
+ auto key = ParseHex("0000000000000000000000000000000000000000000000000000000000000000");
+ ChaCha20 c20{key.data()};
+ // get one block of keystream
+ unsigned char block[64];
+ c20.Keystream(block, CHACHA20_ROUND_OUTPUT);
+ unsigned char b1[5], b2[7], b3[52];
+ c20 = ChaCha20{key.data()};
+ c20.Keystream(b1, 5);
+ c20.Keystream(b2, 7);
+ c20.Keystream(b3, 52);
+
+ BOOST_CHECK_EQUAL(0, memcmp(b1, block, 5));
+ BOOST_CHECK_EQUAL(0, memcmp(b2, block + 5, 7));
+ BOOST_CHECK_EQUAL(0, memcmp(b3, block + 12, 52));
}
BOOST_AUTO_TEST_CASE(poly1305_testvector)
@@ -617,7 +716,7 @@ static void TestChaCha20Poly1305AEAD(bool must_succeed, unsigned int expected_aa
ChaCha20Poly1305AEAD aead(aead_K_1.data(), aead_K_1.size(), aead_K_2.data(), aead_K_2.size());
// create a chacha20 instance to compare against
- ChaCha20 cmp_ctx(aead_K_1.data(), 32);
+ ChaCha20 cmp_ctx(aead_K_1.data());
// encipher
bool res = aead.Crypt(seqnr_payload, seqnr_aad, aad_pos, ciphertext_buf.data(), ciphertext_buf.size(), plaintext_buf.data(), plaintext_buf.size(), true);
@@ -631,7 +730,7 @@ static void TestChaCha20Poly1305AEAD(bool must_succeed, unsigned int expected_aa
// manually construct the AAD keystream
cmp_ctx.SetIV(seqnr_aad);
- cmp_ctx.Seek(0);
+ cmp_ctx.Seek64(0);
cmp_ctx.Keystream(cmp_ctx_buffer.data(), 64);
BOOST_CHECK(memcmp(expected_aad_keystream.data(), cmp_ctx_buffer.data(), expected_aad_keystream.size()) == 0);
// crypt the 3 length bytes and compare the length
@@ -659,7 +758,7 @@ static void TestChaCha20Poly1305AEAD(bool must_succeed, unsigned int expected_aa
}
// set nonce and block counter, output the keystream
cmp_ctx.SetIV(seqnr_aad);
- cmp_ctx.Seek(0);
+ cmp_ctx.Seek64(0);
cmp_ctx.Keystream(cmp_ctx_buffer.data(), 64);
// crypt the 3 length bytes and compare the length
diff --git a/src/test/dbwrapper_tests.cpp b/src/test/dbwrapper_tests.cpp
index 2447c882ae..7ad123754b 100644
--- a/src/test/dbwrapper_tests.cpp
+++ b/src/test/dbwrapper_tests.cpp
@@ -5,6 +5,7 @@
#include <dbwrapper.h>
#include <test/util/setup_common.h>
#include <uint256.h>
+#include <util/string.h>
#include <memory>
@@ -324,12 +325,6 @@ struct StringContentsSerializer {
StringContentsSerializer() = default;
explicit StringContentsSerializer(const std::string& inp) : str(inp) {}
- StringContentsSerializer& operator+=(const std::string& s) {
- str += s;
- return *this;
- }
- StringContentsSerializer& operator+=(const StringContentsSerializer& s) { return *this += s.str; }
-
template<typename Stream>
void Serialize(Stream& s) const
{
@@ -343,44 +338,34 @@ struct StringContentsSerializer {
{
str.clear();
uint8_t c{0};
- while (true) {
- try {
- s >> c;
- str.push_back(c);
- } catch (const std::ios_base::failure&) {
- break;
- }
+ while (!s.eof()) {
+ s >> c;
+ str.push_back(c);
}
}
};
BOOST_AUTO_TEST_CASE(iterator_string_ordering)
{
- char buf[10];
-
fs::path ph = m_args.GetDataDirBase() / "iterator_string_ordering";
CDBWrapper dbw(ph, (1 << 20), true, false, false);
- for (int x=0x00; x<10; ++x) {
- for (int y = 0; y < 10; y++) {
- snprintf(buf, sizeof(buf), "%d", x);
- StringContentsSerializer key(buf);
- for (int z = 0; z < y; z++)
+ for (int x = 0; x < 10; ++x) {
+ for (int y = 0; y < 10; ++y) {
+ std::string key{ToString(x)};
+ for (int z = 0; z < y; ++z)
key += key;
uint32_t value = x*x;
- BOOST_CHECK(dbw.Write(key, value));
+ BOOST_CHECK(dbw.Write(StringContentsSerializer{key}, value));
}
}
std::unique_ptr<CDBIterator> it(const_cast<CDBWrapper&>(dbw).NewIterator());
for (const int seek_start : {0, 5}) {
- snprintf(buf, sizeof(buf), "%d", seek_start);
- StringContentsSerializer seek_key(buf);
- it->Seek(seek_key);
- for (unsigned int x=seek_start; x<10; ++x) {
- for (int y = 0; y < 10; y++) {
- snprintf(buf, sizeof(buf), "%d", x);
- std::string exp_key(buf);
- for (int z = 0; z < y; z++)
+ it->Seek(StringContentsSerializer{ToString(seek_start)});
+ for (unsigned int x = seek_start; x < 10; ++x) {
+ for (int y = 0; y < 10; ++y) {
+ std::string exp_key{ToString(x)};
+ for (int z = 0; z < y; ++z)
exp_key += exp_key;
StringContentsSerializer key;
uint32_t value;
diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp
index 6e4f6cdbab..c4b2b4c63b 100644
--- a/src/test/descriptor_tests.cpp
+++ b/src/test/descriptor_tests.cpp
@@ -45,6 +45,7 @@ constexpr int DERIVE_HARDENED = 16; // The final derivation is hardened, i.e. en
constexpr int MIXED_PUBKEYS = 32;
constexpr int XONLY_KEYS = 64; // X-only pubkeys are in use (and thus inferring/caching may swap parity of pubkeys/keyids)
constexpr int MISSING_PRIVKEYS = 128; // Not all private keys are available, so ToPrivateString will fail.
+constexpr int SIGNABLE_FAILS = 256; // We can sign with this descriptor, but actually trying to sign will fail
/** Compare two descriptors. If only one of them has a checksum, the checksum is ignored. */
bool EqualDescriptor(std::string a, std::string b)
@@ -126,8 +127,11 @@ std::set<std::pair<CPubKey, KeyOriginInfo>> GetKeyOriginData(const FlatSigningPr
return ret;
}
-void DoCheck(const std::string& prv, const std::string& pub, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY,
- bool replace_apostrophe_with_h_in_prv=false, bool replace_apostrophe_with_h_in_pub=false)
+void DoCheck(const std::string& prv, const std::string& pub, const std::string& norm_pub, int flags,
+ const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type,
+ const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, bool replace_apostrophe_with_h_in_prv=false,
+ bool replace_apostrophe_with_h_in_pub=false, uint32_t spender_nlocktime=0, uint32_t spender_nsequence=CTxIn::SEQUENCE_FINAL,
+ std::map<std::vector<uint8_t>, std::vector<uint8_t>> preimages={})
{
FlatSigningProvider keys_priv, keys_pub;
std::set<std::vector<uint32_t>> left_paths = paths;
@@ -303,16 +307,24 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string&
for (size_t n = 0; n < spks.size(); ++n) {
BOOST_CHECK_EQUAL(ref[n], HexStr(spks[n]));
- if (flags & SIGNABLE) {
+ if (flags & (SIGNABLE | SIGNABLE_FAILS)) {
CMutableTransaction spend;
+ spend.nLockTime = spender_nlocktime;
spend.vin.resize(1);
+ spend.vin[0].nSequence = spender_nsequence;
spend.vout.resize(1);
std::vector<CTxOut> utxos(1);
PrecomputedTransactionData txdata;
txdata.Init(spend, std::move(utxos), /*force=*/true);
MutableTransactionSignatureCreator creator{spend, 0, CAmount{0}, &txdata, SIGHASH_DEFAULT};
SignatureData sigdata;
- BOOST_CHECK_MESSAGE(ProduceSignature(FlatSigningProvider{keys_priv}.Merge(FlatSigningProvider{script_provider}), creator, spks[n], sigdata), prv);
+ // We assume there is no collision between the hashes (eg h1=SHA256(SHA256(x)) and h2=SHA256(x))
+ sigdata.sha256_preimages = preimages;
+ sigdata.hash256_preimages = preimages;
+ sigdata.ripemd160_preimages = preimages;
+ sigdata.hash160_preimages = preimages;
+ const auto prod_sig_res = ProduceSignature(FlatSigningProvider{keys_priv}.Merge(FlatSigningProvider{script_provider}), creator, spks[n], sigdata);
+ BOOST_CHECK_MESSAGE(prod_sig_res == !(flags & SIGNABLE_FAILS), prv);
}
/* Infer a descriptor from the generated script, and verify its solvability and that it roundtrips. */
@@ -340,29 +352,40 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string&
BOOST_CHECK_MESSAGE(left_paths.empty(), "Not all expected key paths found: " + prv);
}
-void Check(const std::string& prv, const std::string& pub, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY)
+void Check(const std::string& prv, const std::string& pub, const std::string& norm_pub, int flags,
+ const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type,
+ const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, uint32_t spender_nlocktime=0,
+ uint32_t spender_nsequence=CTxIn::SEQUENCE_FINAL, std::map<std::vector<uint8_t>, std::vector<uint8_t>> preimages={})
{
bool found_apostrophes_in_prv = false;
bool found_apostrophes_in_pub = false;
// Do not replace apostrophes with 'h' in prv and pub
- DoCheck(prv, pub, norm_pub, flags, scripts, type, paths);
+ DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /*replace_apostrophe_with_h_in_prv=*/false,
+ /*replace_apostrophe_with_h_in_pub=*/false, /*spender_nlocktime=*/spender_nlocktime,
+ /*spender_nsequence=*/spender_nsequence, /*preimages=*/preimages);
// Replace apostrophes with 'h' in prv but not in pub, if apostrophes are found in prv
if (prv.find('\'') != std::string::npos) {
found_apostrophes_in_prv = true;
- DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false);
+ DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /*replace_apostrophe_with_h_in_prv=*/true,
+ /*replace_apostrophe_with_h_in_pub=*/false, /*spender_nlocktime=*/spender_nlocktime,
+ /*spender_nsequence=*/spender_nsequence, /*preimages=*/preimages);
}
// Replace apostrophes with 'h' in pub but not in prv, if apostrophes are found in pub
if (pub.find('\'') != std::string::npos) {
found_apostrophes_in_pub = true;
- DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true);
+ DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /*replace_apostrophe_with_h_in_prv=*/false,
+ /*replace_apostrophe_with_h_in_pub=*/true, /*spender_nlocktime=*/spender_nlocktime,
+ /*spender_nsequence=*/spender_nsequence, /*preimages=*/preimages);
}
// Replace apostrophes with 'h' both in prv and in pub, if apostrophes are found in both
if (found_apostrophes_in_prv && found_apostrophes_in_pub) {
- DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true);
+ DoCheck(prv, pub, norm_pub, flags, scripts, type, paths, /*replace_apostrophe_with_h_in_prv=*/true,
+ /*replace_apostrophe_with_h_in_pub=*/true, /*spender_nlocktime=*/spender_nlocktime,
+ /*spender_nsequence=*/spender_nsequence, /*preimages=*/preimages);
}
}
@@ -528,9 +551,31 @@ BOOST_AUTO_TEST_CASE(descriptor_test)
CheckUnparsable("wsh(and_b(and_b(older(1),a:older(100000000)),s:pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd)))", "wsh(and_b(and_b(older(1),a:older(100000000)),s:pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204)))", "and_b(older(1),a:older(100000000)) is not sane: contains mixes of timelocks expressed in blocks and seconds");
CheckUnparsable("wsh(and_b(or_b(pkh(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),s:pk(Kx9HCDjGiwFcgVNhTrS5z5NeZdD6veeam61eDxLDCkGWujvL4Gnn)),s:pk(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd)))", "wsh(and_b(or_b(pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),s:pk(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0)),s:pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204)))", "and_b(or_b(pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),s:pk(032707170c71d8f75e4ca4e3fce870b9409dcaf12b051d3bcadff74747fa7619c0)),s:pk(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204)) is not sane: contains duplicate public keys");
// Valid with extended keys.
- Check("wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)))", "wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", "wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", UNSOLVABLE, {{"0020acf425291b98a1d7e0d4690139442abc289175be32ef1f75945e339924246d73"}}, OutputType::BECH32, {{},{0}});
- // Valid under sh(wsh()) and with a mix of xpubs and raw keys
- Check("sh(wsh(thresh(1,pkh(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(1,pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(1,pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", UNSOLVABLE | MIXED_PUBKEYS, {{"a914767e9119ff3b3ac0cb6dcfe21de1842ccf85f1c487"}}, OutputType::P2SH_SEGWIT, {{},{0}});
+ Check("wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)))", "wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", "wsh(and_v(v:ripemd160(095ff41131e5946f3c85f79e44adbcf8e27e080e),multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", DEFAULT, {{"0020acf425291b98a1d7e0d4690139442abc289175be32ef1f75945e339924246d73"}}, OutputType::BECH32, {{},{0}});
+ // Valid under sh(wsh()) and with a mix of xpubs and raw keys.
+ Check("sh(wsh(thresh(1,pkh(L4gM1FBdyHNpkzsFh9ipnofLhpZRp2mwobpeULy1a6dBTvw8Ywtd),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(1,pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(1,pkh(03cdabb7f2dce7bfbd8a0b9570c6fd1e712e5d64045e9d6b517b3d5072251dc204),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", SIGNABLE | MIXED_PUBKEYS, {{"a914767e9119ff3b3ac0cb6dcfe21de1842ccf85f1c487"}}, OutputType::P2SH_SEGWIT, {{},{0}});
+ // An exotic multisig, we can sign for both branches
+ Check("wsh(thresh(1,pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc),a:pkh(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)))", "wsh(thresh(1,pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL),a:pkh(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", "wsh(thresh(1,pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL),a:pkh(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)))", SIGNABLE, {{"00204a4528fbc0947e02e921b54bd476fc8cc2ebb5c6ae2ccf10ed29fe2937fb6892"}}, OutputType::BECH32, {{},{0}});
+ // We can sign for a script requiring the two kinds of timelock.
+ // But if we don't set a sequence high enough, we'll fail.
+ Check("sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", SIGNABLE_FAILS, {{"a914099f400961f930d4c16c3b33c0e2a58ef53ac38f87"}}, OutputType::P2SH_SEGWIT, {{},{0}}, /*spender_nlocktime=*/1000, /*spender_nsequence=*/1);
+ // And same for the nLockTime.
+ Check("sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", SIGNABLE_FAILS, {{"a914099f400961f930d4c16c3b33c0e2a58ef53ac38f87"}}, OutputType::P2SH_SEGWIT, {{},{0}}, /*spender_nlocktime=*/999, /*spender_nsequence=*/2);
+ // But if both are set to (at least) the required value, we'll succeed.
+ Check("sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", "sh(wsh(thresh(2,ndv:after(1000),a:and_n(multi(1,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0),n:older(2)))))", SIGNABLE, {{"a914099f400961f930d4c16c3b33c0e2a58ef53ac38f87"}}, OutputType::P2SH_SEGWIT, {{},{0}}, /*spender_nlocktime=*/1000, /*spender_nsequence=*/2);
+ // We can't sign for a script requiring a ripemd160 preimage without providing it.
+ Check("wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"002001549deda34cbc4a5982263191380f522695a2ddc2f99fc3a65c736264bd6cab"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {});
+ // But if we provide it, we can.
+ Check("wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:ripemd160(ff9aa1829c90d26e73301383f549e1497b7d6325),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"002001549deda34cbc4a5982263191380f522695a2ddc2f99fc3a65c736264bd6cab"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{ParseHex("ff9aa1829c90d26e73301383f549e1497b7d6325"), ParseHex("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")}});
+ // Same for sha256
+ Check("wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"002071f7283dbbb9a55ed43a54cda16ba0efd0f16dc48fe200f299e57bb5d7be8dd4"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {});
+ Check("wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:sha256(7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"002071f7283dbbb9a55ed43a54cda16ba0efd0f16dc48fe200f299e57bb5d7be8dd4"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{ParseHex("7426ba0604c3f8682c7016b44673f85c5bd9da2fa6c1080810cf53ae320c9863"), ParseHex("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")}});
+ // Same for hash160
+ Check("wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"00209b9d5b45735d0e15df5b41d6594602d3de472262f7b75edc6cf5f3e3fa4e3ae4"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {});
+ Check("wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash160(292e2df59e3a22109200beed0cdc84b12e66793e),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"00209b9d5b45735d0e15df5b41d6594602d3de472262f7b75edc6cf5f3e3fa4e3ae4"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{ParseHex("292e2df59e3a22109200beed0cdc84b12e66793e"), ParseHex("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")}});
+ // Same for hash256
+ Check("wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE_FAILS, {{"0020cf62bf97baf977aec69cbc290c372899f913337a9093e8f066ab59b8657a365c"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {});
+ Check("wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", "wsh(and_v(v:hash256(ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588),pk(xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)))", SIGNABLE, {{"0020cf62bf97baf977aec69cbc290c372899f913337a9093e8f066ab59b8657a365c"}}, OutputType::BECH32, {{}}, /*spender_nlocktime=*/0, /*spender_nsequence=*/CTxIn::SEQUENCE_FINAL, {{ParseHex("ae253ca2a54debcac7ecf414f6734f48c56421a08bb59182ff9f39a6fffdb588"), ParseHex("000000000019d6689c085ae165831e934ff763ae46a2a6c172b3f1b60a8ce26f")}});
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp
index e75dc3ce91..e80c772aa4 100644
--- a/src/test/fuzz/coins_view.cpp
+++ b/src/test/fuzz/coins_view.cpp
@@ -46,7 +46,7 @@ FUZZ_TARGET_INIT(coins_view, initialize_coins_view)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
CCoinsView backend_coins_view;
- CCoinsViewCache coins_view_cache{&backend_coins_view};
+ CCoinsViewCache coins_view_cache{&backend_coins_view, /*deterministic=*/true};
COutPoint random_out_point;
Coin random_coin;
CMutableTransaction random_mutable_transaction;
diff --git a/src/test/fuzz/coinscache_sim.cpp b/src/test/fuzz/coinscache_sim.cpp
new file mode 100644
index 0000000000..f350c9d032
--- /dev/null
+++ b/src/test/fuzz/coinscache_sim.cpp
@@ -0,0 +1,478 @@
+// Copyright (c) 2023 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <coins.h>
+#include <crypto/sha256.h>
+#include <primitives/transaction.h>
+#include <test/fuzz/fuzz.h>
+#include <test/fuzz/FuzzedDataProvider.h>
+#include <test/fuzz/util.h>
+
+#include <assert.h>
+#include <optional>
+#include <memory>
+#include <stdint.h>
+#include <vector>
+
+namespace {
+
+/** Number of distinct COutPoint values used in this test. */
+constexpr uint32_t NUM_OUTPOINTS = 256;
+/** Number of distinct Coin values used in this test (ignoring nHeight). */
+constexpr uint32_t NUM_COINS = 256;
+/** Maximum number CCoinsViewCache objects used in this test. */
+constexpr uint32_t MAX_CACHES = 4;
+/** Data type large enough to hold NUM_COINS-1. */
+using coinidx_type = uint8_t;
+
+struct PrecomputedData
+{
+ //! Randomly generated COutPoint values.
+ COutPoint outpoints[NUM_OUTPOINTS];
+
+ //! Randomly generated Coin values.
+ Coin coins[NUM_COINS];
+
+ PrecomputedData()
+ {
+ static const uint8_t PREFIX_O[1] = {'o'}; /** Hash prefix for outpoint hashes. */
+ static const uint8_t PREFIX_S[1] = {'s'}; /** Hash prefix for coins scriptPubKeys. */
+ static const uint8_t PREFIX_M[1] = {'m'}; /** Hash prefix for coins nValue/fCoinBase. */
+
+ for (uint32_t i = 0; i < NUM_OUTPOINTS; ++i) {
+ uint32_t idx = (i * 1200U) >> 12; /* Map 3 or 4 entries to same txid. */
+ const uint8_t ser[4] = {uint8_t(idx), uint8_t(idx >> 8), uint8_t(idx >> 16), uint8_t(idx >> 24)};
+ CSHA256().Write(PREFIX_O, 1).Write(ser, sizeof(ser)).Finalize(outpoints[i].hash.begin());
+ outpoints[i].n = i;
+ }
+
+ for (uint32_t i = 0; i < NUM_COINS; ++i) {
+ const uint8_t ser[4] = {uint8_t(i), uint8_t(i >> 8), uint8_t(i >> 16), uint8_t(i >> 24)};
+ uint256 hash;
+ CSHA256().Write(PREFIX_S, 1).Write(ser, sizeof(ser)).Finalize(hash.begin());
+ /* Convert hash to scriptPubkeys (of different lengths, so SanityCheck's cached memory
+ * usage check has a chance to detect mismatches). */
+ switch (i % 5U) {
+ case 0: /* P2PKH */
+ coins[i].out.scriptPubKey.resize(25);
+ coins[i].out.scriptPubKey[0] = OP_DUP;
+ coins[i].out.scriptPubKey[1] = OP_HASH160;
+ coins[i].out.scriptPubKey[2] = 20;
+ std::copy(hash.begin(), hash.begin() + 20, coins[i].out.scriptPubKey.begin() + 3);
+ coins[i].out.scriptPubKey[23] = OP_EQUALVERIFY;
+ coins[i].out.scriptPubKey[24] = OP_CHECKSIG;
+ break;
+ case 1: /* P2SH */
+ coins[i].out.scriptPubKey.resize(23);
+ coins[i].out.scriptPubKey[0] = OP_HASH160;
+ coins[i].out.scriptPubKey[1] = 20;
+ std::copy(hash.begin(), hash.begin() + 20, coins[i].out.scriptPubKey.begin() + 2);
+ coins[i].out.scriptPubKey[12] = OP_EQUAL;
+ break;
+ case 2: /* P2WPKH */
+ coins[i].out.scriptPubKey.resize(22);
+ coins[i].out.scriptPubKey[0] = OP_0;
+ coins[i].out.scriptPubKey[1] = 20;
+ std::copy(hash.begin(), hash.begin() + 20, coins[i].out.scriptPubKey.begin() + 2);
+ break;
+ case 3: /* P2WSH */
+ coins[i].out.scriptPubKey.resize(34);
+ coins[i].out.scriptPubKey[0] = OP_0;
+ coins[i].out.scriptPubKey[1] = 32;
+ std::copy(hash.begin(), hash.begin() + 32, coins[i].out.scriptPubKey.begin() + 2);
+ break;
+ case 4: /* P2TR */
+ coins[i].out.scriptPubKey.resize(34);
+ coins[i].out.scriptPubKey[0] = OP_1;
+ coins[i].out.scriptPubKey[1] = 32;
+ std::copy(hash.begin(), hash.begin() + 32, coins[i].out.scriptPubKey.begin() + 2);
+ break;
+ }
+ /* Hash again to construct nValue and fCoinBase. */
+ CSHA256().Write(PREFIX_M, 1).Write(ser, sizeof(ser)).Finalize(hash.begin());
+ coins[i].out.nValue = CAmount(hash.GetUint64(0) % MAX_MONEY);
+ coins[i].fCoinBase = (hash.GetUint64(1) & 7) == 0;
+ coins[i].nHeight = 0; /* Real nHeight used in simulation is set dynamically. */
+ }
+ }
+};
+
+enum class EntryType : uint8_t
+{
+ /* This entry in the cache does not exist (so we'd have to look in the parent cache). */
+ NONE,
+
+ /* This entry in the cache corresponds to an unspent coin. */
+ UNSPENT,
+
+ /* This entry in the cache corresponds to a spent coin. */
+ SPENT,
+};
+
+struct CacheEntry
+{
+ /* Type of entry. */
+ EntryType entrytype;
+
+ /* Index in the coins array this entry corresponds to (only if entrytype == UNSPENT). */
+ coinidx_type coinidx;
+
+ /* nHeight value for this entry (so the coins[coinidx].nHeight value is ignored; only if entrytype == UNSPENT). */
+ uint32_t height;
+};
+
+struct CacheLevel
+{
+ CacheEntry entry[NUM_OUTPOINTS];
+
+ void Wipe() {
+ for (uint32_t i = 0; i < NUM_OUTPOINTS; ++i) {
+ entry[i].entrytype = EntryType::NONE;
+ }
+ }
+};
+
+/** Class for the base of the hierarchy (roughly simulating a memory-backed CCoinsViewDB).
+ *
+ * The initial state consists of the empty UTXO set, though coins whose output index
+ * is 3 (mod 5) always have GetCoin() succeed (but returning an IsSpent() coin unless a UTXO
+ * exists). Coins whose output index is 4 (mod 5) have GetCoin() always succeed after being spent.
+ * This exercises code paths with spent, non-DIRTY cache entries.
+ */
+class CoinsViewBottom final : public CCoinsView
+{
+ std::map<COutPoint, Coin> m_data;
+
+public:
+ bool GetCoin(const COutPoint& outpoint, Coin& coin) const final
+ {
+ auto it = m_data.find(outpoint);
+ if (it == m_data.end()) {
+ if ((outpoint.n % 5) == 3) {
+ coin.Clear();
+ return true;
+ }
+ return false;
+ } else {
+ coin = it->second;
+ return true;
+ }
+ }
+
+ bool HaveCoin(const COutPoint& outpoint) const final
+ {
+ return m_data.count(outpoint);
+ }
+
+ uint256 GetBestBlock() const final { return {}; }
+ std::vector<uint256> GetHeadBlocks() const final { return {}; }
+ std::unique_ptr<CCoinsViewCursor> Cursor() const final { return {}; }
+ size_t EstimateSize() const final { return m_data.size(); }
+
+ bool BatchWrite(CCoinsMap& data, const uint256&, bool erase) final
+ {
+ for (auto it = data.begin(); it != data.end(); it = erase ? data.erase(it) : std::next(it)) {
+ if (it->second.flags & CCoinsCacheEntry::DIRTY) {
+ if (it->second.coin.IsSpent() && (it->first.n % 5) != 4) {
+ m_data.erase(it->first);
+ } else if (erase) {
+ m_data[it->first] = std::move(it->second.coin);
+ } else {
+ m_data[it->first] = it->second.coin;
+ }
+ } else {
+ /* For non-dirty entries being written, compare them with what we have. */
+ auto it2 = m_data.find(it->first);
+ if (it->second.coin.IsSpent()) {
+ assert(it2 == m_data.end() || it2->second.IsSpent());
+ } else {
+ assert(it2 != m_data.end());
+ assert(it->second.coin.out == it2->second.out);
+ assert(it->second.coin.fCoinBase == it2->second.fCoinBase);
+ assert(it->second.coin.nHeight == it2->second.nHeight);
+ }
+ }
+ }
+ return true;
+ }
+};
+
+} // namespace
+
+FUZZ_TARGET(coinscache_sim)
+{
+ /** Precomputed COutPoint and CCoins values. */
+ static const PrecomputedData data;
+
+ /** Dummy coinsview instance (base of the hierarchy). */
+ CoinsViewBottom bottom;
+ /** Real CCoinsViewCache objects. */
+ std::vector<std::unique_ptr<CCoinsViewCache>> caches;
+ /** Simulated cache data (sim_caches[0] matches bottom, sim_caches[i+1] matches caches[i]). */
+ CacheLevel sim_caches[MAX_CACHES + 1];
+ /** Current height in the simulation. */
+ uint32_t current_height = 1U;
+
+ // Initialize bottom simulated cache.
+ sim_caches[0].Wipe();
+
+ /** Helper lookup function in the simulated cache stack. */
+ auto lookup = [&](uint32_t outpointidx, int sim_idx = -1) -> std::optional<std::pair<coinidx_type, uint32_t>> {
+ uint32_t cache_idx = sim_idx == -1 ? caches.size() : sim_idx;
+ while (true) {
+ const auto& entry = sim_caches[cache_idx].entry[outpointidx];
+ if (entry.entrytype == EntryType::UNSPENT) {
+ return {{entry.coinidx, entry.height}};
+ } else if (entry.entrytype == EntryType::SPENT) {
+ return std::nullopt;
+ };
+ if (cache_idx == 0) break;
+ --cache_idx;
+ }
+ return std::nullopt;
+ };
+
+ /** Flush changes in top cache to the one below. */
+ auto flush = [&]() {
+ assert(caches.size() >= 1);
+ auto& cache = sim_caches[caches.size()];
+ auto& prev_cache = sim_caches[caches.size() - 1];
+ for (uint32_t outpointidx = 0; outpointidx < NUM_OUTPOINTS; ++outpointidx) {
+ if (cache.entry[outpointidx].entrytype != EntryType::NONE) {
+ prev_cache.entry[outpointidx] = cache.entry[outpointidx];
+ cache.entry[outpointidx].entrytype = EntryType::NONE;
+ }
+ }
+ };
+
+ // Main simulation loop: read commands from the fuzzer input, and apply them
+ // to both the real cache stack and the simulation.
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ LIMITED_WHILE(provider.remaining_bytes(), 10000) {
+ // Every operation (except "Change height") moves current height forward,
+ // so it functions as a kind of epoch, making ~all UTXOs unique.
+ ++current_height;
+ // Make sure there is always at least one CCoinsViewCache.
+ if (caches.empty()) {
+ caches.emplace_back(new CCoinsViewCache(&bottom, /*deterministic=*/true));
+ sim_caches[caches.size()].Wipe();
+ }
+
+ // Execute command.
+ CallOneOf(
+ provider,
+
+ [&]() { // GetCoin
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Look up in simulation data.
+ auto sim = lookup(outpointidx);
+ // Look up in real caches.
+ Coin realcoin;
+ auto real = caches.back()->GetCoin(data.outpoints[outpointidx], realcoin);
+ // Compare results.
+ if (!sim.has_value()) {
+ assert(!real || realcoin.IsSpent());
+ } else {
+ assert(real && !realcoin.IsSpent());
+ const auto& simcoin = data.coins[sim->first];
+ assert(realcoin.out == simcoin.out);
+ assert(realcoin.fCoinBase == simcoin.fCoinBase);
+ assert(realcoin.nHeight == sim->second);
+ }
+ },
+
+ [&]() { // HaveCoin
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Look up in simulation data.
+ auto sim = lookup(outpointidx);
+ // Look up in real caches.
+ auto real = caches.back()->HaveCoin(data.outpoints[outpointidx]);
+ // Compare results.
+ assert(sim.has_value() == real);
+ },
+
+ [&]() { // HaveCoinInCache
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Invoke on real cache (there is no equivalent in simulation, so nothing to compare result with).
+ (void)caches.back()->HaveCoinInCache(data.outpoints[outpointidx]);
+ },
+
+ [&]() { // AccessCoin
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Look up in simulation data.
+ auto sim = lookup(outpointidx);
+ // Look up in real caches.
+ const auto& realcoin = caches.back()->AccessCoin(data.outpoints[outpointidx]);
+ // Compare results.
+ if (!sim.has_value()) {
+ assert(realcoin.IsSpent());
+ } else {
+ assert(!realcoin.IsSpent());
+ const auto& simcoin = data.coins[sim->first];
+ assert(simcoin.out == realcoin.out);
+ assert(simcoin.fCoinBase == realcoin.fCoinBase);
+ assert(realcoin.nHeight == sim->second);
+ }
+ },
+
+ [&]() { // AddCoin (only possible_overwrite if necessary)
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ uint32_t coinidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_COINS - 1);
+ // Look up in simulation data (to know whether we must set possible_overwrite or not).
+ auto sim = lookup(outpointidx);
+ // Invoke on real caches.
+ Coin coin = data.coins[coinidx];
+ coin.nHeight = current_height;
+ caches.back()->AddCoin(data.outpoints[outpointidx], std::move(coin), sim.has_value());
+ // Apply to simulation data.
+ auto& entry = sim_caches[caches.size()].entry[outpointidx];
+ entry.entrytype = EntryType::UNSPENT;
+ entry.coinidx = coinidx;
+ entry.height = current_height;
+ },
+
+ [&]() { // AddCoin (always possible_overwrite)
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ uint32_t coinidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_COINS - 1);
+ // Invoke on real caches.
+ Coin coin = data.coins[coinidx];
+ coin.nHeight = current_height;
+ caches.back()->AddCoin(data.outpoints[outpointidx], std::move(coin), true);
+ // Apply to simulation data.
+ auto& entry = sim_caches[caches.size()].entry[outpointidx];
+ entry.entrytype = EntryType::UNSPENT;
+ entry.coinidx = coinidx;
+ entry.height = current_height;
+ },
+
+ [&]() { // SpendCoin (moveto = nullptr)
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Invoke on real caches.
+ caches.back()->SpendCoin(data.outpoints[outpointidx], nullptr);
+ // Apply to simulation data.
+ sim_caches[caches.size()].entry[outpointidx].entrytype = EntryType::SPENT;
+ },
+
+ [&]() { // SpendCoin (with moveto)
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Look up in simulation data (to compare the returned *moveto with).
+ auto sim = lookup(outpointidx);
+ // Invoke on real caches.
+ Coin realcoin;
+ caches.back()->SpendCoin(data.outpoints[outpointidx], &realcoin);
+ // Apply to simulation data.
+ sim_caches[caches.size()].entry[outpointidx].entrytype = EntryType::SPENT;
+ // Compare *moveto with the value expected based on simulation data.
+ if (!sim.has_value()) {
+ assert(realcoin.IsSpent());
+ } else {
+ assert(!realcoin.IsSpent());
+ const auto& simcoin = data.coins[sim->first];
+ assert(simcoin.out == realcoin.out);
+ assert(simcoin.fCoinBase == realcoin.fCoinBase);
+ assert(realcoin.nHeight == sim->second);
+ }
+ },
+
+ [&]() { // Uncache
+ uint32_t outpointidx = provider.ConsumeIntegralInRange<uint32_t>(0, NUM_OUTPOINTS - 1);
+ // Apply to real caches (there is no equivalent in our simulation).
+ caches.back()->Uncache(data.outpoints[outpointidx]);
+ },
+
+ [&]() { // Add a cache level (if not already at the max).
+ if (caches.size() != MAX_CACHES) {
+ // Apply to real caches.
+ caches.emplace_back(new CCoinsViewCache(&*caches.back(), /*deterministic=*/true));
+ // Apply to simulation data.
+ sim_caches[caches.size()].Wipe();
+ }
+ },
+
+ [&]() { // Remove a cache level.
+ // Apply to real caches (this reduces caches.size(), implicitly doing the same on the simulation data).
+ caches.back()->SanityCheck();
+ caches.pop_back();
+ },
+
+ [&]() { // Flush.
+ // Apply to simulation data.
+ flush();
+ // Apply to real caches.
+ caches.back()->Flush();
+ },
+
+ [&]() { // Sync.
+ // Apply to simulation data (note that in our simulation, syncing and flushing is the same thing).
+ flush();
+ // Apply to real caches.
+ caches.back()->Sync();
+ },
+
+ [&]() { // Flush + ReallocateCache.
+ // Apply to simulation data.
+ flush();
+ // Apply to real caches.
+ caches.back()->Flush();
+ caches.back()->ReallocateCache();
+ },
+
+ [&]() { // GetCacheSize
+ (void)caches.back()->GetCacheSize();
+ },
+
+ [&]() { // DynamicMemoryUsage
+ (void)caches.back()->DynamicMemoryUsage();
+ },
+
+ [&]() { // Change height
+ current_height = provider.ConsumeIntegralInRange<uint32_t>(1, current_height - 1);
+ }
+ );
+ }
+
+ // Sanity check all the remaining caches
+ for (const auto& cache : caches) {
+ cache->SanityCheck();
+ }
+
+ // Full comparison between caches and simulation data, from bottom to top,
+ // as AccessCoin on a higher cache may affect caches below it.
+ for (unsigned sim_idx = 1; sim_idx <= caches.size(); ++sim_idx) {
+ auto& cache = *caches[sim_idx - 1];
+ size_t cache_size = 0;
+
+ for (uint32_t outpointidx = 0; outpointidx < NUM_OUTPOINTS; ++outpointidx) {
+ cache_size += cache.HaveCoinInCache(data.outpoints[outpointidx]);
+ const auto& real = cache.AccessCoin(data.outpoints[outpointidx]);
+ auto sim = lookup(outpointidx, sim_idx);
+ if (!sim.has_value()) {
+ assert(real.IsSpent());
+ } else {
+ assert(!real.IsSpent());
+ assert(real.out == data.coins[sim->first].out);
+ assert(real.fCoinBase == data.coins[sim->first].fCoinBase);
+ assert(real.nHeight == sim->second);
+ }
+ }
+
+ // HaveCoinInCache ignores spent coins, so GetCacheSize() may exceed it. */
+ assert(cache.GetCacheSize() >= cache_size);
+ }
+
+ // Compare the bottom coinsview (not a CCoinsViewCache) with sim_cache[0].
+ for (uint32_t outpointidx = 0; outpointidx < NUM_OUTPOINTS; ++outpointidx) {
+ Coin realcoin;
+ bool real = bottom.GetCoin(data.outpoints[outpointidx], realcoin);
+ auto sim = lookup(outpointidx, 0);
+ if (!sim.has_value()) {
+ assert(!real || realcoin.IsSpent());
+ } else {
+ assert(real && !realcoin.IsSpent());
+ assert(realcoin.out == data.coins[sim->first].out);
+ assert(realcoin.fCoinBase == data.coins[sim->first].fCoinBase);
+ assert(realcoin.nHeight == sim->second);
+ }
+ }
+}
diff --git a/src/test/fuzz/crypto_chacha20.cpp b/src/test/fuzz/crypto_chacha20.cpp
index 3f552a8cda..3fa445096a 100644
--- a/src/test/fuzz/crypto_chacha20.cpp
+++ b/src/test/fuzz/crypto_chacha20.cpp
@@ -6,6 +6,7 @@
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <test/util/xoroshiro128plusplus.h>
#include <cstdint>
#include <vector>
@@ -16,21 +17,21 @@ FUZZ_TARGET(crypto_chacha20)
ChaCha20 chacha20;
if (fuzzed_data_provider.ConsumeBool()) {
- const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32));
- chacha20 = ChaCha20{key.data(), key.size()};
+ const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32);
+ chacha20 = ChaCha20{key.data()};
}
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), 10000) {
CallOneOf(
fuzzed_data_provider,
[&] {
- const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32));
- chacha20.SetKey(key.data(), key.size());
+ std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32);
+ chacha20.SetKey32(key.data());
},
[&] {
chacha20.SetIV(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
},
[&] {
- chacha20.Seek(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
+ chacha20.Seek64(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
},
[&] {
std::vector<uint8_t> output(fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096));
@@ -43,3 +44,110 @@ FUZZ_TARGET(crypto_chacha20)
});
}
}
+
+namespace
+{
+
+/** Fuzzer that invokes ChaCha20::Crypt() or ChaCha20::Keystream multiple times:
+ once for a large block at once, and then the same data in chunks, comparing
+ the outcome.
+
+ If UseCrypt, seeded Xoroshiro128++ output is used as input to Crypt().
+ If not, Keystream() is used directly, or sequences of 0x00 are encrypted.
+*/
+template<bool UseCrypt>
+void ChaCha20SplitFuzz(FuzzedDataProvider& provider)
+{
+ // Determine key, iv, start position, length.
+ unsigned char key[32] = {0};
+ auto key_bytes = provider.ConsumeBytes<unsigned char>(32);
+ std::copy(key_bytes.begin(), key_bytes.end(), key);
+ uint64_t iv = provider.ConsumeIntegral<uint64_t>();
+ uint64_t total_bytes = provider.ConsumeIntegralInRange<uint64_t>(0, 1000000);
+ /* ~x = 2^64 - 1 - x, so ~(total_bytes >> 6) is the maximal seek position. */
+ uint64_t seek = provider.ConsumeIntegralInRange<uint64_t>(0, ~(total_bytes >> 6));
+
+ // Initialize two ChaCha20 ciphers, with the same key/iv/position.
+ ChaCha20 crypt1(key);
+ ChaCha20 crypt2(key);
+ crypt1.SetIV(iv);
+ crypt1.Seek64(seek);
+ crypt2.SetIV(iv);
+ crypt2.Seek64(seek);
+
+ // Construct vectors with data.
+ std::vector<unsigned char> data1, data2;
+ data1.resize(total_bytes);
+ data2.resize(total_bytes);
+
+ // If using Crypt(), initialize data1 and data2 with the same Xoroshiro128++ based
+ // stream.
+ if constexpr (UseCrypt) {
+ uint64_t seed = provider.ConsumeIntegral<uint64_t>();
+ XoRoShiRo128PlusPlus rng(seed);
+ uint64_t bytes = 0;
+ while (bytes < (total_bytes & ~uint64_t{7})) {
+ uint64_t val = rng();
+ WriteLE64(data1.data() + bytes, val);
+ WriteLE64(data2.data() + bytes, val);
+ bytes += 8;
+ }
+ if (bytes < total_bytes) {
+ unsigned char valbytes[8];
+ uint64_t val = rng();
+ WriteLE64(valbytes, val);
+ std::copy(valbytes, valbytes + (total_bytes - bytes), data1.data() + bytes);
+ std::copy(valbytes, valbytes + (total_bytes - bytes), data2.data() + bytes);
+ }
+ }
+
+ // Whether UseCrypt is used or not, the two byte arrays must match.
+ assert(data1 == data2);
+
+ // Encrypt data1, the whole array at once.
+ if constexpr (UseCrypt) {
+ crypt1.Crypt(data1.data(), data1.data(), total_bytes);
+ } else {
+ crypt1.Keystream(data1.data(), total_bytes);
+ }
+
+ // Encrypt data2, in at most 256 chunks.
+ uint64_t bytes2 = 0;
+ int iter = 0;
+ while (true) {
+ bool is_last = (iter == 255) || (bytes2 == total_bytes) || provider.ConsumeBool();
+ ++iter;
+ // Determine how many bytes to encrypt in this chunk: a fuzzer-determined
+ // amount for all but the last chunk (which processes all remaining bytes).
+ uint64_t now = is_last ? total_bytes - bytes2 :
+ provider.ConsumeIntegralInRange<uint64_t>(0, total_bytes - bytes2);
+ // For each chunk, consider using Crypt() even when UseCrypt is false.
+ // This tests that Keystream() has the same behavior as Crypt() applied
+ // to 0x00 input bytes.
+ if (UseCrypt || provider.ConsumeBool()) {
+ crypt2.Crypt(data2.data() + bytes2, data2.data() + bytes2, now);
+ } else {
+ crypt2.Keystream(data2.data() + bytes2, now);
+ }
+ bytes2 += now;
+ if (is_last) break;
+ }
+ // We should have processed everything now.
+ assert(bytes2 == total_bytes);
+ // And the result should match.
+ assert(data1 == data2);
+}
+
+} // namespace
+
+FUZZ_TARGET(chacha20_split_crypt)
+{
+ FuzzedDataProvider provider{buffer.data(), buffer.size()};
+ ChaCha20SplitFuzz<true>(provider);
+}
+
+FUZZ_TARGET(chacha20_split_keystream)
+{
+ FuzzedDataProvider provider{buffer.data(), buffer.size()};
+ ChaCha20SplitFuzz<false>(provider);
+}
diff --git a/src/test/fuzz/crypto_diff_fuzz_chacha20.cpp b/src/test/fuzz/crypto_diff_fuzz_chacha20.cpp
index 1b89d55773..78fee48de6 100644
--- a/src/test/fuzz/crypto_diff_fuzz_chacha20.cpp
+++ b/src/test/fuzz/crypto_diff_fuzz_chacha20.cpp
@@ -267,32 +267,33 @@ void ECRYPT_keystream_bytes(ECRYPT_ctx* x, u8* stream, u32 bytes)
FUZZ_TARGET(crypto_diff_fuzz_chacha20)
{
+ static const unsigned char ZEROKEY[32] = {0};
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
ChaCha20 chacha20;
ECRYPT_ctx ctx;
- // D. J. Bernstein doesn't initialise ctx to 0 while Bitcoin Core initialises chacha20 to 0 in the constructor
- for (int i = 0; i < 16; i++) {
- ctx.input[i] = 0;
- }
if (fuzzed_data_provider.ConsumeBool()) {
- const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32));
- chacha20 = ChaCha20{key.data(), key.size()};
+ const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32);
+ chacha20 = ChaCha20{key.data()};
ECRYPT_keysetup(&ctx, key.data(), key.size() * 8, 0);
- // ECRYPT_keysetup() doesn't set the counter and nonce to 0 while SetKey() does
- uint8_t iv[8] = {0, 0, 0, 0, 0, 0, 0, 0};
- ECRYPT_ivsetup(&ctx, iv);
+ } else {
+ // The default ChaCha20 constructor is equivalent to using the all-0 key.
+ ECRYPT_keysetup(&ctx, ZEROKEY, 256, 0);
}
+ // ECRYPT_keysetup() doesn't set the counter and nonce to 0 while SetKey32() does
+ static const uint8_t iv[8] = {0, 0, 0, 0, 0, 0, 0, 0};
+ ECRYPT_ivsetup(&ctx, iv);
+
LIMITED_WHILE (fuzzed_data_provider.ConsumeBool(), 3000) {
CallOneOf(
fuzzed_data_provider,
[&] {
- const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, fuzzed_data_provider.ConsumeIntegralInRange<size_t>(16, 32));
- chacha20.SetKey(key.data(), key.size());
+ const std::vector<unsigned char> key = ConsumeFixedLengthByteVector(fuzzed_data_provider, 32);
+ chacha20.SetKey32(key.data());
ECRYPT_keysetup(&ctx, key.data(), key.size() * 8, 0);
- // ECRYPT_keysetup() doesn't set the counter and nonce to 0 while SetKey() does
+ // ECRYPT_keysetup() doesn't set the counter and nonce to 0 while SetKey32() does
uint8_t iv[8] = {0, 0, 0, 0, 0, 0, 0, 0};
ECRYPT_ivsetup(&ctx, iv);
},
@@ -304,26 +305,32 @@ FUZZ_TARGET(crypto_diff_fuzz_chacha20)
},
[&] {
uint64_t counter = fuzzed_data_provider.ConsumeIntegral<uint64_t>();
- chacha20.Seek(counter);
+ chacha20.Seek64(counter);
ctx.input[12] = counter;
ctx.input[13] = counter >> 32;
},
[&] {
uint32_t integralInRange = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096);
+ // DJB's version seeks forward to a multiple of 64 bytes after every operation. Correct for that.
+ uint64_t pos = ctx.input[12] + (((uint64_t)ctx.input[13]) << 32) + ((integralInRange + 63) >> 6);
std::vector<uint8_t> output(integralInRange);
chacha20.Keystream(output.data(), output.size());
std::vector<uint8_t> djb_output(integralInRange);
ECRYPT_keystream_bytes(&ctx, djb_output.data(), djb_output.size());
assert(output == djb_output);
+ chacha20.Seek64(pos);
},
[&] {
uint32_t integralInRange = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096);
+ // DJB's version seeks forward to a multiple of 64 bytes after every operation. Correct for that.
+ uint64_t pos = ctx.input[12] + (((uint64_t)ctx.input[13]) << 32) + ((integralInRange + 63) >> 6);
std::vector<uint8_t> output(integralInRange);
const std::vector<uint8_t> input = ConsumeFixedLengthByteVector(fuzzed_data_provider, output.size());
chacha20.Crypt(input.data(), output.data(), input.size());
std::vector<uint8_t> djb_output(integralInRange);
ECRYPT_encrypt_bytes(&ctx, input.data(), djb_output.data(), input.size());
assert(output == djb_output);
+ chacha20.Seek64(pos);
});
}
}
diff --git a/src/test/fuzz/miniscript.cpp b/src/test/fuzz/miniscript.cpp
index d5667e0cf3..1b791fc19c 100644
--- a/src/test/fuzz/miniscript.cpp
+++ b/src/test/fuzz/miniscript.cpp
@@ -14,14 +14,25 @@
namespace {
-//! Some pre-computed data for more efficient string roundtrips.
+//! Some pre-computed data for more efficient string roundtrips and to simulate challenges.
struct TestData {
typedef CPubKey Key;
- // Precomputed public keys.
+ // Precomputed public keys, and a dummy signature for each of them.
std::vector<Key> dummy_keys;
std::map<Key, int> dummy_key_idx_map;
std::map<CKeyID, Key> dummy_keys_map;
+ std::map<Key, std::pair<std::vector<unsigned char>, bool>> dummy_sigs;
+
+ // Precomputed hashes of each kind.
+ std::vector<std::vector<unsigned char>> sha256;
+ std::vector<std::vector<unsigned char>> ripemd160;
+ std::vector<std::vector<unsigned char>> hash256;
+ std::vector<std::vector<unsigned char>> hash160;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> sha256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> ripemd160_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash160_preimages;
//! Set the precomputed data.
void Init() {
@@ -35,6 +46,28 @@ struct TestData {
dummy_keys.push_back(pubkey);
dummy_key_idx_map.emplace(pubkey, i);
dummy_keys_map.insert({pubkey.GetID(), pubkey});
+
+ std::vector<unsigned char> sig;
+ privkey.Sign(uint256S(""), sig);
+ sig.push_back(1); // SIGHASH_ALL
+ dummy_sigs.insert({pubkey, {sig, i & 1}});
+
+ std::vector<unsigned char> hash;
+ hash.resize(32);
+ CSHA256().Write(keydata, 32).Finalize(hash.data());
+ sha256.push_back(hash);
+ if (i & 1) sha256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ CHash256().Write(keydata).Finalize(hash);
+ hash256.push_back(hash);
+ if (i & 1) hash256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ hash.resize(20);
+ CRIPEMD160().Write(keydata, 32).Finalize(hash.data());
+ assert(hash.size() == 20);
+ ripemd160.push_back(hash);
+ if (i & 1) ripemd160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
+ CHash160().Write(keydata).Finalize(hash);
+ hash160.push_back(hash);
+ if (i & 1) hash160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
}
}
} TEST_DATA;
@@ -59,6 +92,17 @@ struct ParserContext {
return HexStr(Span{&idx, 1});
}
+ std::vector<unsigned char> ToPKBytes(const Key& key) const
+ {
+ return {key.begin(), key.end()};
+ }
+
+ std::vector<unsigned char> ToPKHBytes(const Key& key) const
+ {
+ const auto h = Hash160(key);
+ return {h.begin(), h.end()};
+ }
+
template<typename I>
std::optional<Key> FromString(I first, I last) const {
if (last - first != 2) return {};
@@ -69,7 +113,7 @@ struct ParserContext {
template<typename I>
std::optional<Key> FromPKBytes(I first, I last) const {
- Key key;
+ CPubKey key;
key.Set(first, last);
if (!key.IsValid()) return {};
return key;
@@ -130,6 +174,732 @@ struct ScriptParserContext {
}
} SCRIPT_PARSER_CONTEXT;
+//! Context to produce a satisfaction for a Miniscript node using the pre-computed data.
+struct SatisfierContext: ParserContext {
+ // Timelock challenges satisfaction. Make the value (deterministically) vary to explore different
+ // paths.
+ bool CheckAfter(uint32_t value) const { return value % 2; }
+ bool CheckOlder(uint32_t value) const { return value % 2; }
+
+ // Signature challenges fulfilled with a dummy signature, if it was one of our dummy keys.
+ miniscript::Availability Sign(const CPubKey& key, std::vector<unsigned char>& sig) const {
+ const auto it = TEST_DATA.dummy_sigs.find(key);
+ if (it == TEST_DATA.dummy_sigs.end()) return miniscript::Availability::NO;
+ if (it->second.second) {
+ // Key is "available"
+ sig = it->second.first;
+ return miniscript::Availability::YES;
+ } else {
+ return miniscript::Availability::NO;
+ }
+ }
+
+ //! Lookup generalization for all the hash satisfactions below
+ miniscript::Availability LookupHash(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage,
+ const std::map<std::vector<unsigned char>, std::vector<unsigned char>>& map) const
+ {
+ const auto it = map.find(hash);
+ if (it == map.end()) return miniscript::Availability::NO;
+ preimage = it->second;
+ return miniscript::Availability::YES;
+ }
+ miniscript::Availability SatSHA256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.sha256_preimages);
+ }
+ miniscript::Availability SatRIPEMD160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.ripemd160_preimages);
+ }
+ miniscript::Availability SatHASH256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.hash256_preimages);
+ }
+ miniscript::Availability SatHASH160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const {
+ return LookupHash(hash, preimage, TEST_DATA.hash160_preimages);
+ }
+} SATISFIER_CTX;
+
+//! Context to check a satisfaction against the pre-computed data.
+struct CheckerContext: BaseSignatureChecker {
+ TestData *test_data;
+
+ // Signature checker methods. Checks the right dummy signature is used.
+ bool CheckECDSASignature(const std::vector<unsigned char>& sig, const std::vector<unsigned char>& vchPubKey,
+ const CScript& scriptCode, SigVersion sigversion) const override
+ {
+ const CPubKey key{vchPubKey};
+ const auto it = TEST_DATA.dummy_sigs.find(key);
+ if (it == TEST_DATA.dummy_sigs.end()) return false;
+ return it->second.first == sig;
+ }
+ bool CheckLockTime(const CScriptNum& nLockTime) const override { return nLockTime.GetInt64() & 1; }
+ bool CheckSequence(const CScriptNum& nSequence) const override { return nSequence.GetInt64() & 1; }
+} CHECKER_CTX;
+
+//! Context to check for duplicates when instancing a Node.
+struct KeyComparator {
+ bool KeyCompare(const CPubKey& a, const CPubKey& b) const {
+ return a < b;
+ }
+} KEY_COMP;
+
+// A dummy scriptsig to pass to VerifyScript (we always use Segwit v0).
+const CScript DUMMY_SCRIPTSIG;
+
+using Fragment = miniscript::Fragment;
+using NodeRef = miniscript::NodeRef<CPubKey>;
+using Node = miniscript::Node<CPubKey>;
+using Type = miniscript::Type;
+// https://github.com/llvm/llvm-project/issues/53444
+// NOLINTNEXTLINE(misc-unused-using-decls)
+using miniscript::operator"" _mst;
+
+//! Construct a miniscript node as a shared_ptr.
+template<typename... Args> NodeRef MakeNodeRef(Args&&... args) { return miniscript::MakeNodeRef<CPubKey>(KEY_COMP, std::forward<Args>(args)...); }
+
+/** Information about a yet to be constructed Miniscript node. */
+struct NodeInfo {
+ //! The type of this node
+ Fragment fragment;
+ //! Number of subs of this node
+ uint8_t n_subs;
+ //! The timelock value for older() and after(), the threshold value for multi() and thresh()
+ uint32_t k;
+ //! Keys for this node, if it has some
+ std::vector<CPubKey> keys;
+ //! The hash value for this node, if it has one
+ std::vector<unsigned char> hash;
+ //! The type requirements for the children of this node.
+ std::vector<Type> subtypes;
+
+ NodeInfo(Fragment frag): fragment(frag), n_subs(0), k(0) {}
+ NodeInfo(Fragment frag, CPubKey key): fragment(frag), n_subs(0), k(0), keys({key}) {}
+ NodeInfo(Fragment frag, uint32_t _k): fragment(frag), n_subs(0), k(_k) {}
+ NodeInfo(Fragment frag, std::vector<unsigned char> h): fragment(frag), n_subs(0), k(0), hash(std::move(h)) {}
+ NodeInfo(uint8_t subs, Fragment frag): fragment(frag), n_subs(subs), k(0), subtypes(subs, ""_mst) {}
+ NodeInfo(uint8_t subs, Fragment frag, uint32_t _k): fragment(frag), n_subs(subs), k(_k), subtypes(subs, ""_mst) {}
+ NodeInfo(std::vector<Type> subt, Fragment frag): fragment(frag), n_subs(subt.size()), k(0), subtypes(std::move(subt)) {}
+ NodeInfo(std::vector<Type> subt, Fragment frag, uint32_t _k): fragment(frag), n_subs(subt.size()), k(_k), subtypes(std::move(subt)) {}
+ NodeInfo(Fragment frag, uint32_t _k, std::vector<CPubKey> _keys): fragment(frag), n_subs(0), k(_k), keys(std::move(_keys)) {}
+};
+
+/** Pick an index in a collection from a single byte in the fuzzer's output. */
+template<typename T, typename A>
+T ConsumeIndex(FuzzedDataProvider& provider, A& col) {
+ const uint8_t i = provider.ConsumeIntegral<uint8_t>();
+ return col[i];
+}
+
+CPubKey ConsumePubKey(FuzzedDataProvider& provider) {
+ return ConsumeIndex<CPubKey>(provider, TEST_DATA.dummy_keys);
+}
+
+std::vector<unsigned char> ConsumeSha256(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.sha256);
+}
+
+std::vector<unsigned char> ConsumeHash256(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.hash256);
+}
+
+std::vector<unsigned char> ConsumeRipemd160(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.ripemd160);
+}
+
+std::vector<unsigned char> ConsumeHash160(FuzzedDataProvider& provider) {
+ return ConsumeIndex<std::vector<unsigned char>>(provider, TEST_DATA.hash160);
+}
+
+std::optional<uint32_t> ConsumeTimeLock(FuzzedDataProvider& provider) {
+ const uint32_t k = provider.ConsumeIntegral<uint32_t>();
+ if (k == 0 || k >= 0x80000000) return {};
+ return k;
+}
+
+/**
+ * Consume a Miniscript node from the fuzzer's output.
+ *
+ * This version is intended to have a fixed, stable, encoding for Miniscript nodes:
+ * - The first byte sets the type of the fragment. 0, 1 and all non-leaf fragments but thresh() are a
+ * single byte.
+ * - For the other leaf fragments, the following bytes depend on their type.
+ * - For older() and after(), the next 4 bytes define the timelock value.
+ * - For pk_k(), pk_h(), and all hashes, the next byte defines the index of the value in the test data.
+ * - For multi(), the next 2 bytes define respectively the threshold and the number of keys. Then as many
+ * bytes as the number of keys define the index of each key in the test data.
+ * - For thresh(), the next byte defines the threshold value and the following one the number of subs.
+ */
+std::optional<NodeInfo> ConsumeNodeStable(FuzzedDataProvider& provider) {
+ switch (provider.ConsumeIntegral<uint8_t>()) {
+ case 0: return {{Fragment::JUST_0}};
+ case 1: return {{Fragment::JUST_1}};
+ case 2: return {{Fragment::PK_K, ConsumePubKey(provider)}};
+ case 3: return {{Fragment::PK_H, ConsumePubKey(provider)}};
+ case 4: {
+ const auto k = ConsumeTimeLock(provider);
+ if (!k) return {};
+ return {{Fragment::OLDER, *k}};
+ }
+ case 5: {
+ const auto k = ConsumeTimeLock(provider);
+ if (!k) return {};
+ return {{Fragment::AFTER, *k}};
+ }
+ case 6: return {{Fragment::SHA256, ConsumeSha256(provider)}};
+ case 7: return {{Fragment::HASH256, ConsumeHash256(provider)}};
+ case 8: return {{Fragment::RIPEMD160, ConsumeRipemd160(provider)}};
+ case 9: return {{Fragment::HASH160, ConsumeHash160(provider)}};
+ case 10: {
+ const auto k = provider.ConsumeIntegral<uint8_t>();
+ const auto n_keys = provider.ConsumeIntegral<uint8_t>();
+ if (n_keys > 20 || k == 0 || k > n_keys) return {};
+ std::vector<CPubKey> keys{n_keys};
+ for (auto& key: keys) key = ConsumePubKey(provider);
+ return {{Fragment::MULTI, k, std::move(keys)}};
+ }
+ case 11: return {{3, Fragment::ANDOR}};
+ case 12: return {{2, Fragment::AND_V}};
+ case 13: return {{2, Fragment::AND_B}};
+ case 15: return {{2, Fragment::OR_B}};
+ case 16: return {{2, Fragment::OR_C}};
+ case 17: return {{2, Fragment::OR_D}};
+ case 18: return {{2, Fragment::OR_I}};
+ case 19: {
+ auto k = provider.ConsumeIntegral<uint8_t>();
+ auto n_subs = provider.ConsumeIntegral<uint8_t>();
+ if (k == 0 || k > n_subs) return {};
+ return {{n_subs, Fragment::THRESH, k}};
+ }
+ case 20: return {{1, Fragment::WRAP_A}};
+ case 21: return {{1, Fragment::WRAP_S}};
+ case 22: return {{1, Fragment::WRAP_C}};
+ case 23: return {{1, Fragment::WRAP_D}};
+ case 24: return {{1, Fragment::WRAP_V}};
+ case 25: return {{1, Fragment::WRAP_J}};
+ case 26: return {{1, Fragment::WRAP_N}};
+ default:
+ break;
+ }
+ return {};
+}
+
+/* This structure contains a table which for each "target" Type a list of recipes
+ * to construct it, automatically inferred from the behavior of ComputeType.
+ * Note that the Types here are not the final types of the constructed Nodes, but
+ * just the subset that are required. For example, a recipe for the "Bo" type
+ * might construct a "Bondu" sha256() NodeInfo, but cannot construct a "Bz" older().
+ * Each recipe is a Fragment together with a list of required types for its subnodes.
+ */
+struct SmartInfo
+{
+ using recipe = std::pair<Fragment, std::vector<Type>>;
+ std::map<Type, std::vector<recipe>> table;
+
+ void Init()
+ {
+ /* Construct a set of interesting type requirements to reason with (sections of BKVWzondu). */
+ std::vector<Type> types;
+ for (int base = 0; base < 4; ++base) { /* select from B,K,V,W */
+ Type type_base = base == 0 ? "B"_mst : base == 1 ? "K"_mst : base == 2 ? "V"_mst : "W"_mst;
+ for (int zo = 0; zo < 3; ++zo) { /* select from z,o,(none) */
+ Type type_zo = zo == 0 ? "z"_mst : zo == 1 ? "o"_mst : ""_mst;
+ for (int n = 0; n < 2; ++n) { /* select from (none),n */
+ if (zo == 0 && n == 1) continue; /* z conflicts with n */
+ if (base == 3 && n == 1) continue; /* W conficts with n */
+ Type type_n = n == 0 ? ""_mst : "n"_mst;
+ for (int d = 0; d < 2; ++d) { /* select from (none),d */
+ if (base == 2 && d == 1) continue; /* V conflicts with d */
+ Type type_d = d == 0 ? ""_mst : "d"_mst;
+ for (int u = 0; u < 2; ++u) { /* select from (none),u */
+ if (base == 2 && u == 1) continue; /* V conflicts with u */
+ Type type_u = u == 0 ? ""_mst : "u"_mst;
+ Type type = type_base | type_zo | type_n | type_d | type_u;
+ types.push_back(type);
+ }
+ }
+ }
+ }
+ }
+
+ /* We define a recipe a to be a super-recipe of recipe b if they use the same
+ * fragment, the same number of subexpressions, and each of a's subexpression
+ * types is a supertype of the corresponding subexpression type of b.
+ * Within the set of recipes for the construction of a given type requirement,
+ * no recipe should be a super-recipe of another (as the super-recipe is
+ * applicable in every place the sub-recipe is, the sub-recipe is redundant). */
+ auto is_super_of = [](const recipe& a, const recipe& b) {
+ if (a.first != b.first) return false;
+ if (a.second.size() != b.second.size()) return false;
+ for (size_t i = 0; i < a.second.size(); ++i) {
+ if (!(b.second[i] << a.second[i])) return false;
+ }
+ return true;
+ };
+
+ /* Sort the type requirements. Subtypes will always sort later (e.g. Bondu will
+ * sort after Bo or Bu). As we'll be constructing recipes using these types, in
+ * order, in what follows, we'll construct super-recipes before sub-recipes.
+ * That means we never need to go back and delete a sub-recipe because a
+ * super-recipe got added. */
+ std::sort(types.begin(), types.end());
+
+ // Iterate over all possible fragments.
+ for (int fragidx = 0; fragidx <= int(Fragment::MULTI); ++fragidx) {
+ int sub_count = 0; //!< The minimum number of child nodes this recipe has.
+ int sub_range = 1; //!< The maximum number of child nodes for this recipe is sub_count+sub_range-1.
+ size_t data_size = 0;
+ size_t n_keys = 0;
+ uint32_t k = 0;
+ Fragment frag{fragidx};
+
+ // Based on the fragment, determine #subs/data/k/keys to pass to ComputeType. */
+ switch (frag) {
+ case Fragment::PK_K:
+ case Fragment::PK_H:
+ n_keys = 1;
+ break;
+ case Fragment::MULTI:
+ n_keys = 1;
+ k = 1;
+ break;
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ k = 1;
+ break;
+ case Fragment::SHA256:
+ case Fragment::HASH256:
+ data_size = 32;
+ break;
+ case Fragment::RIPEMD160:
+ case Fragment::HASH160:
+ data_size = 20;
+ break;
+ case Fragment::JUST_0:
+ case Fragment::JUST_1:
+ break;
+ case Fragment::WRAP_A:
+ case Fragment::WRAP_S:
+ case Fragment::WRAP_C:
+ case Fragment::WRAP_D:
+ case Fragment::WRAP_V:
+ case Fragment::WRAP_J:
+ case Fragment::WRAP_N:
+ sub_count = 1;
+ break;
+ case Fragment::AND_V:
+ case Fragment::AND_B:
+ case Fragment::OR_B:
+ case Fragment::OR_C:
+ case Fragment::OR_D:
+ case Fragment::OR_I:
+ sub_count = 2;
+ break;
+ case Fragment::ANDOR:
+ sub_count = 3;
+ break;
+ case Fragment::THRESH:
+ // Thresh logic is executed for 1 and 2 arguments. Larger numbers use ad-hoc code to extend.
+ sub_count = 1;
+ sub_range = 2;
+ k = 1;
+ break;
+ }
+
+ // Iterate over the number of subnodes (sub_count...sub_count+sub_range-1).
+ std::vector<Type> subt;
+ for (int subs = sub_count; subs < sub_count + sub_range; ++subs) {
+ // Iterate over the possible subnode types (at most 3).
+ for (Type x : types) {
+ for (Type y : types) {
+ for (Type z : types) {
+ // Compute the resulting type of a node with the selected fragment / subnode types.
+ subt.clear();
+ if (subs > 0) subt.push_back(x);
+ if (subs > 1) subt.push_back(y);
+ if (subs > 2) subt.push_back(z);
+ Type res = miniscript::internal::ComputeType(frag, x, y, z, subt, k, data_size, subs, n_keys);
+ // Continue if the result is not a valid node.
+ if ((res << "K"_mst) + (res << "V"_mst) + (res << "B"_mst) + (res << "W"_mst) != 1) continue;
+
+ recipe entry{frag, subt};
+ auto super_of_entry = [&](const recipe& rec) { return is_super_of(rec, entry); };
+ // Iterate over all supertypes of res (because if e.g. our selected fragment/subnodes result
+ // in a Bondu, they can form a recipe that is also applicable for constructing a B, Bou, Bdu, ...).
+ for (Type s : types) {
+ if ((res & "BKVWzondu"_mst) << s) {
+ auto& recipes = table[s];
+ // If we don't already have a super-recipe to the new one, add it.
+ if (!std::any_of(recipes.begin(), recipes.end(), super_of_entry)) {
+ recipes.push_back(entry);
+ }
+ }
+ }
+
+ if (subs <= 2) break;
+ }
+ if (subs <= 1) break;
+ }
+ if (subs <= 0) break;
+ }
+ }
+ }
+
+ /* Find which types are useful. The fuzzer logic only cares about constructing
+ * B,V,K,W nodes, so any type that isn't needed in any recipe (directly or
+ * indirectly) for the construction of those is uninteresting. */
+ std::set<Type> useful_types{"B"_mst, "V"_mst, "K"_mst, "W"_mst};
+ // Find the transitive closure by adding types until the set of types does not change.
+ while (true) {
+ size_t set_size = useful_types.size();
+ for (const auto& [type, recipes] : table) {
+ if (useful_types.count(type) != 0) {
+ for (const auto& [_, subtypes] : recipes) {
+ for (auto subtype : subtypes) useful_types.insert(subtype);
+ }
+ }
+ }
+ if (useful_types.size() == set_size) break;
+ }
+ // Remove all rules that construct uninteresting types.
+ for (auto type_it = table.begin(); type_it != table.end();) {
+ if (useful_types.count(type_it->first) == 0) {
+ type_it = table.erase(type_it);
+ } else {
+ ++type_it;
+ }
+ }
+
+ /* Find which types are constructible. A type is constructible if there is a leaf
+ * node recipe for constructing it, or a recipe whose subnodes are all constructible.
+ * Types can be non-constructible because they have no recipes to begin with,
+ * because they can only be constructed using recipes that involve otherwise
+ * non-constructible types, or because they require infinite recursion. */
+ std::set<Type> constructible_types{};
+ auto known_constructible = [&](Type type) { return constructible_types.count(type) != 0; };
+ // Find the transitive closure by adding types until the set of types does not change.
+ while (true) {
+ size_t set_size = constructible_types.size();
+ // Iterate over all types we have recipes for.
+ for (const auto& [type, recipes] : table) {
+ if (!known_constructible(type)) {
+ // For not (yet known to be) constructible types, iterate over their recipes.
+ for (const auto& [_, subt] : recipes) {
+ // If any recipe involves only (already known to be) constructible types,
+ // add the recipe's type to the set.
+ if (std::all_of(subt.begin(), subt.end(), known_constructible)) {
+ constructible_types.insert(type);
+ break;
+ }
+ }
+ }
+ }
+ if (constructible_types.size() == set_size) break;
+ }
+ for (auto type_it = table.begin(); type_it != table.end();) {
+ // Remove all recipes which involve non-constructible types.
+ type_it->second.erase(std::remove_if(type_it->second.begin(), type_it->second.end(),
+ [&](const recipe& rec) {
+ return !std::all_of(rec.second.begin(), rec.second.end(), known_constructible);
+ }), type_it->second.end());
+ // Delete types entirely which have no recipes left.
+ if (type_it->second.empty()) {
+ type_it = table.erase(type_it);
+ } else {
+ ++type_it;
+ }
+ }
+
+ for (auto& [type, recipes] : table) {
+ // Sort recipes for determinism, and place those using fewer subnodes first.
+ // This avoids runaway expansion (when reaching the end of the fuzz input,
+ // all zeroes are read, resulting in the first available recipe being picked).
+ std::sort(recipes.begin(), recipes.end(),
+ [](const recipe& a, const recipe& b) {
+ if (a.second.size() < b.second.size()) return true;
+ if (a.second.size() > b.second.size()) return false;
+ return a < b;
+ }
+ );
+ }
+ }
+} SMARTINFO;
+
+/**
+ * Consume a Miniscript node from the fuzzer's output.
+ *
+ * This is similar to ConsumeNodeStable, but uses a precomputed table with permitted
+ * fragments/subnode type for each required type. It is intended to more quickly explore
+ * interesting miniscripts, at the cost of higher implementation complexity (which could
+ * cause it miss things if incorrect), and with less regard for stability of the seeds
+ * (as improvements to the tables or changes to the typing rules could invalidate
+ * everything).
+ */
+std::optional<NodeInfo> ConsumeNodeSmart(FuzzedDataProvider& provider, Type type_needed) {
+ /** Table entry for the requested type. */
+ auto recipes_it = SMARTINFO.table.find(type_needed);
+ assert(recipes_it != SMARTINFO.table.end());
+ /** Pick one recipe from the available ones for that type. */
+ const auto& [frag, subt] = PickValue(provider, recipes_it->second);
+
+ // Based on the fragment the recipe uses, fill in other data (k, keys, data).
+ switch (frag) {
+ case Fragment::PK_K:
+ case Fragment::PK_H:
+ return {{frag, ConsumePubKey(provider)}};
+ case Fragment::MULTI: {
+ const auto n_keys = provider.ConsumeIntegralInRange<uint8_t>(1, 20);
+ const auto k = provider.ConsumeIntegralInRange<uint8_t>(1, n_keys);
+ std::vector<CPubKey> keys{n_keys};
+ for (auto& key: keys) key = ConsumePubKey(provider);
+ return {{frag, k, std::move(keys)}};
+ }
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ return {{frag, provider.ConsumeIntegralInRange<uint32_t>(1, 0x7FFFFFF)}};
+ case Fragment::SHA256:
+ return {{frag, PickValue(provider, TEST_DATA.sha256)}};
+ case Fragment::HASH256:
+ return {{frag, PickValue(provider, TEST_DATA.hash256)}};
+ case Fragment::RIPEMD160:
+ return {{frag, PickValue(provider, TEST_DATA.ripemd160)}};
+ case Fragment::HASH160:
+ return {{frag, PickValue(provider, TEST_DATA.hash160)}};
+ case Fragment::JUST_0:
+ case Fragment::JUST_1:
+ case Fragment::WRAP_A:
+ case Fragment::WRAP_S:
+ case Fragment::WRAP_C:
+ case Fragment::WRAP_D:
+ case Fragment::WRAP_V:
+ case Fragment::WRAP_J:
+ case Fragment::WRAP_N:
+ case Fragment::AND_V:
+ case Fragment::AND_B:
+ case Fragment::OR_B:
+ case Fragment::OR_C:
+ case Fragment::OR_D:
+ case Fragment::OR_I:
+ case Fragment::ANDOR:
+ return {{subt, frag}};
+ case Fragment::THRESH: {
+ uint32_t children;
+ if (subt.size() < 2) {
+ children = subt.size();
+ } else {
+ // If we hit a thresh with 2 subnodes, artificially extend it to any number
+ // (2 or larger) by replicating the type of the last subnode.
+ children = provider.ConsumeIntegralInRange<uint32_t>(2, MAX_OPS_PER_SCRIPT / 2);
+ }
+ auto k = provider.ConsumeIntegralInRange<uint32_t>(1, children);
+ std::vector<Type> subs = subt;
+ while (subs.size() < children) subs.push_back(subs.back());
+ return {{std::move(subs), frag, k}};
+ }
+ }
+
+ assert(false);
+}
+
+/**
+ * Generate a Miniscript node based on the fuzzer's input.
+ *
+ * - ConsumeNode is a function object taking a Type, and returning an std::optional<NodeInfo>.
+ * - root_type is the required type properties of the constructed NodeRef.
+ * - strict_valid sets whether ConsumeNode is expected to guarantee a NodeInfo that results in
+ * a NodeRef whose Type() matches the type fed to ConsumeNode.
+ */
+template<typename F>
+NodeRef GenNode(F ConsumeNode, Type root_type = ""_mst, bool strict_valid = false) {
+ /** A stack of miniscript Nodes being built up. */
+ std::vector<NodeRef> stack;
+ /** The queue of instructions. */
+ std::vector<std::pair<Type, std::optional<NodeInfo>>> todo{{root_type, {}}};
+
+ while (!todo.empty()) {
+ // The expected type we have to construct.
+ auto type_needed = todo.back().first;
+ if (!todo.back().second) {
+ // Fragment/children have not been decided yet. Decide them.
+ auto node_info = ConsumeNode(type_needed);
+ if (!node_info) return {};
+ auto subtypes = node_info->subtypes;
+ todo.back().second = std::move(node_info);
+ todo.reserve(todo.size() + subtypes.size());
+ // As elements on the todo stack are processed back to front, construct
+ // them in reverse order (so that the first subnode is generated first).
+ for (size_t i = 0; i < subtypes.size(); ++i) {
+ todo.emplace_back(*(subtypes.rbegin() + i), std::nullopt);
+ }
+ } else {
+ // The back of todo has fragment and number of children decided, and
+ // those children have been constructed at the back of stack. Pop
+ // that entry off todo, and use it to construct a new NodeRef on
+ // stack.
+ NodeInfo& info = *todo.back().second;
+ // Gather children from the back of stack.
+ std::vector<NodeRef> sub;
+ sub.reserve(info.n_subs);
+ for (size_t i = 0; i < info.n_subs; ++i) {
+ sub.push_back(std::move(*(stack.end() - info.n_subs + i)));
+ }
+ stack.erase(stack.end() - info.n_subs, stack.end());
+ // Construct new NodeRef.
+ NodeRef node;
+ if (info.keys.empty()) {
+ node = MakeNodeRef(info.fragment, std::move(sub), std::move(info.hash), info.k);
+ } else {
+ assert(sub.empty());
+ assert(info.hash.empty());
+ node = MakeNodeRef(info.fragment, std::move(info.keys), info.k);
+ }
+ // Verify acceptability.
+ if (!node || !(node->GetType() << type_needed)) {
+ assert(!strict_valid);
+ return {};
+ }
+ if (!node->IsValid()) return {};
+ // Move it to the stack.
+ stack.push_back(std::move(node));
+ todo.pop_back();
+ }
+ }
+ assert(stack.size() == 1);
+ return std::move(stack[0]);
+}
+
+/** Perform various applicable tests on a miniscript Node. */
+void TestNode(const NodeRef& node, FuzzedDataProvider& provider)
+{
+ if (!node) return;
+
+ // Check that it roundtrips to text representation
+ std::optional<std::string> str{node->ToString(PARSER_CTX)};
+ assert(str);
+ auto parsed = miniscript::FromString(*str, PARSER_CTX);
+ assert(parsed);
+ assert(*parsed == *node);
+
+ // Check consistency between script size estimation and real size.
+ auto script = node->ToScript(PARSER_CTX);
+ assert(node->ScriptSize() == script.size());
+
+ // Check consistency of "x" property with the script (type K is excluded, because it can end
+ // with a push of a key, which could match these opcodes).
+ if (!(node->GetType() << "K"_mst)) {
+ bool ends_in_verify = !(node->GetType() << "x"_mst);
+ assert(ends_in_verify == (script.back() == OP_CHECKSIG || script.back() == OP_CHECKMULTISIG || script.back() == OP_EQUAL));
+ }
+
+ // The rest of the checks only apply when testing a valid top-level script.
+ if (!node->IsValidTopLevel()) return;
+
+ // Check roundtrip to script
+ auto decoded = miniscript::FromScript(script, PARSER_CTX);
+ assert(decoded);
+ // Note we can't use *decoded == *node because the miniscript representation may differ, so we check that:
+ // - The script corresponding to that decoded form matchs exactly
+ // - The type matches exactly
+ assert(decoded->ToScript(PARSER_CTX) == script);
+ assert(decoded->GetType() == node->GetType());
+
+ if (provider.ConsumeBool() && node->GetOps() < MAX_OPS_PER_SCRIPT && node->ScriptSize() < MAX_STANDARD_P2WSH_SCRIPT_SIZE) {
+ // Optionally pad the script with OP_NOPs to max op the ops limit of the constructed script.
+ // This makes the script obviously not actually miniscript-compatible anymore, but the
+ // signatures constructed in this test don't commit to the script anyway, so the same
+ // miniscript satisfier will work. This increases the sensitivity of the test to the ops
+ // counting logic being too low, especially for simple scripts.
+ // Do this optionally because we're not solely interested in cases where the number of ops is
+ // maximal.
+ // Do not pad more than what would cause MAX_STANDARD_P2WSH_SCRIPT_SIZE to be reached, however,
+ // as that also invalidates scripts.
+ int add = std::min<int>(
+ MAX_OPS_PER_SCRIPT - node->GetOps(),
+ MAX_STANDARD_P2WSH_SCRIPT_SIZE - node->ScriptSize());
+ for (int i = 0; i < add; ++i) script.push_back(OP_NOP);
+ }
+
+ // Run malleable satisfaction algorithm.
+ const CScript script_pubkey = CScript() << OP_0 << WitnessV0ScriptHash(script);
+ CScriptWitness witness_mal;
+ const bool mal_success = node->Satisfy(SATISFIER_CTX, witness_mal.stack, false) == miniscript::Availability::YES;
+ witness_mal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ // Run non-malleable satisfaction algorithm.
+ CScriptWitness witness_nonmal;
+ const bool nonmal_success = node->Satisfy(SATISFIER_CTX, witness_nonmal.stack, true) == miniscript::Availability::YES;
+ witness_nonmal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ if (nonmal_success) {
+ // Non-malleable satisfactions are bounded by GetStackSize().
+ assert(witness_nonmal.stack.size() <= node->GetStackSize());
+ // If a non-malleable satisfaction exists, the malleable one must also exist, and be identical to it.
+ assert(mal_success);
+ assert(witness_nonmal.stack == witness_mal.stack);
+
+ // Test non-malleable satisfaction.
+ ScriptError serror;
+ bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror);
+ // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions().
+ if (node->ValidSatisfactions()) assert(res);
+ // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed),
+ // or with a stack size error (if CheckStackSize check failed).
+ assert(res ||
+ (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) ||
+ (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE));
+ }
+
+ if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) {
+ // Test malleable satisfaction only if it's different from the non-malleable one.
+ ScriptError serror;
+ bool res = VerifyScript(DUMMY_SCRIPTSIG, script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, CHECKER_CTX, &serror);
+ // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only
+ // fail due to stack or ops limits.
+ assert(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE);
+ }
+
+ if (node->IsSane()) {
+ // For sane nodes, the two algorithms behave identically.
+ assert(mal_success == nonmal_success);
+ }
+
+ // Verify that if a node is policy-satisfiable, the malleable satisfaction
+ // algorithm succeeds. Given that under IsSane() both satisfactions
+ // are identical, this implies that for such nodes, the non-malleable
+ // satisfaction will also match the expected policy.
+ bool satisfiable = node->IsSatisfiable([](const Node& node) -> bool {
+ switch (node.fragment) {
+ case Fragment::PK_K:
+ case Fragment::PK_H: {
+ auto it = TEST_DATA.dummy_sigs.find(node.keys[0]);
+ assert(it != TEST_DATA.dummy_sigs.end());
+ return it->second.second;
+ }
+ case Fragment::MULTI: {
+ size_t sats = 0;
+ for (const auto& key : node.keys) {
+ auto it = TEST_DATA.dummy_sigs.find(key);
+ assert(it != TEST_DATA.dummy_sigs.end());
+ sats += it->second.second;
+ }
+ return sats >= node.k;
+ }
+ case Fragment::OLDER:
+ case Fragment::AFTER:
+ return node.k & 1;
+ case Fragment::SHA256:
+ return TEST_DATA.sha256_preimages.count(node.data);
+ case Fragment::HASH256:
+ return TEST_DATA.hash256_preimages.count(node.data);
+ case Fragment::RIPEMD160:
+ return TEST_DATA.ripemd160_preimages.count(node.data);
+ case Fragment::HASH160:
+ return TEST_DATA.hash160_preimages.count(node.data);
+ default:
+ assert(false);
+ }
+ return false;
+ });
+ assert(mal_success == satisfiable);
+}
+
} // namespace
void FuzzInit()
@@ -138,6 +908,33 @@ void FuzzInit()
TEST_DATA.Init();
}
+void FuzzInitSmart()
+{
+ FuzzInit();
+ SMARTINFO.Init();
+}
+
+/** Fuzz target that runs TestNode on nodes generated using ConsumeNodeStable. */
+FUZZ_TARGET_INIT(miniscript_stable, FuzzInit)
+{
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ TestNode(GenNode([&](Type) {
+ return ConsumeNodeStable(provider);
+ }), provider);
+}
+
+/** Fuzz target that runs TestNode on nodes generated using ConsumeNodeSmart. */
+FUZZ_TARGET_INIT(miniscript_smart, FuzzInitSmart)
+{
+ /** The set of types we aim to construct nodes for. Together they cover all. */
+ static constexpr std::array<Type, 4> BASE_TYPES{"B"_mst, "V"_mst, "K"_mst, "W"_mst};
+
+ FuzzedDataProvider provider(buffer.data(), buffer.size());
+ TestNode(GenNode([&](Type needed_type) {
+ return ConsumeNodeSmart(provider, needed_type);
+ }, PickValue(provider, BASE_TYPES), true), provider);
+}
+
/* Fuzz tests that test parsing from a string, and roundtripping via string. */
FUZZ_TARGET_INIT(miniscript_string, FuzzInit)
{
diff --git a/src/test/fuzz/script_sign.cpp b/src/test/fuzz/script_sign.cpp
index 3cef81c251..c78c22e6cc 100644
--- a/src/test/fuzz/script_sign.cpp
+++ b/src/test/fuzz/script_sign.cpp
@@ -108,10 +108,12 @@ FUZZ_TARGET_INIT(script_sign, initialize_script_sign)
CMutableTransaction script_tx_to = tx_to;
CMutableTransaction sign_transaction_tx_to = tx_to;
if (n_in < tx_to.vin.size() && tx_to.vin[n_in].prevout.n < tx_from.vout.size()) {
- (void)SignSignature(provider, tx_from, tx_to, n_in, fuzzed_data_provider.ConsumeIntegral<int>());
+ SignatureData empty;
+ (void)SignSignature(provider, tx_from, tx_to, n_in, fuzzed_data_provider.ConsumeIntegral<int>(), empty);
}
if (n_in < script_tx_to.vin.size()) {
- (void)SignSignature(provider, ConsumeScript(fuzzed_data_provider), script_tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int>());
+ SignatureData empty;
+ (void)SignSignature(provider, ConsumeScript(fuzzed_data_provider), script_tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int>(), empty);
MutableTransactionSignatureCreator signature_creator{tx_to, n_in, ConsumeMoney(fuzzed_data_provider), fuzzed_data_provider.ConsumeIntegral<int>()};
std::vector<unsigned char> vch_sig;
CKeyID address;
diff --git a/src/test/logging_tests.cpp b/src/test/logging_tests.cpp
index 022e33f99d..beb9398c74 100644
--- a/src/test/logging_tests.cpp
+++ b/src/test/logging_tests.cpp
@@ -75,20 +75,9 @@ struct LogSetup : public BasicTestingSetup {
BOOST_AUTO_TEST_CASE(logging_timer)
{
- SetMockTime(1);
auto micro_timer = BCLog::Timer<std::chrono::microseconds>("tests", "end_msg");
- SetMockTime(2);
- BOOST_CHECK_EQUAL(micro_timer.LogMsg("test micros"), "tests: test micros (1000000μs)");
-
- SetMockTime(1);
- auto ms_timer = BCLog::Timer<std::chrono::milliseconds>("tests", "end_msg");
- SetMockTime(2);
- BOOST_CHECK_EQUAL(ms_timer.LogMsg("test ms"), "tests: test ms (1000.00ms)");
-
- SetMockTime(1);
- auto sec_timer = BCLog::Timer<std::chrono::seconds>("tests", "end_msg");
- SetMockTime(2);
- BOOST_CHECK_EQUAL(sec_timer.LogMsg("test secs"), "tests: test secs (1.00s)");
+ const std::string_view result_prefix{"tests: msg ("};
+ BOOST_CHECK_EQUAL(micro_timer.LogMsg("msg").substr(0, result_prefix.size()), result_prefix);
}
BOOST_FIXTURE_TEST_CASE(logging_LogPrintf_, LogSetup)
diff --git a/src/test/miniscript_tests.cpp b/src/test/miniscript_tests.cpp
index 3181c9cf28..655d6d7828 100644
--- a/src/test/miniscript_tests.cpp
+++ b/src/test/miniscript_tests.cpp
@@ -2,18 +2,23 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
+#include <stdint.h>
#include <string>
+#include <vector>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
+#include <core_io.h>
#include <hash.h>
#include <pubkey.h>
#include <uint256.h>
#include <crypto/ripemd160.h>
#include <crypto/sha256.h>
+#include <script/interpreter.h>
#include <script/miniscript.h>
+#include <script/standard.h>
+#include <script/script_error.h>
namespace {
@@ -24,15 +29,22 @@ struct TestData {
//! A map from the public keys to their CKeyIDs (faster than hashing every time).
std::map<CPubKey, CKeyID> pkhashes;
std::map<CKeyID, CPubKey> pkmap;
+ std::map<CPubKey, std::vector<unsigned char>> signatures;
// Various precomputed hashes
std::vector<std::vector<unsigned char>> sha256;
std::vector<std::vector<unsigned char>> ripemd160;
std::vector<std::vector<unsigned char>> hash256;
std::vector<std::vector<unsigned char>> hash160;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> sha256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> ripemd160_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash256_preimages;
+ std::map<std::vector<unsigned char>, std::vector<unsigned char>> hash160_preimages;
TestData()
{
+ // All our signatures sign (and are required to sign) this constant message.
+ auto const MESSAGE_HASH = uint256S("f5cd94e18b6fe77dd7aca9e35c2b0c9cbd86356c80a71065");
// We generate 255 public keys and 255 hashes of each type.
for (int i = 1; i <= 255; ++i) {
// This 32-byte array functions as both private key data and hash preimage (31 zero bytes plus any nonzero byte).
@@ -48,18 +60,28 @@ struct TestData {
pkhashes.emplace(pubkey, keyid);
pkmap.emplace(keyid, pubkey);
+ // Compute ECDSA signatures on MESSAGE_HASH with the private keys.
+ std::vector<unsigned char> sig;
+ BOOST_CHECK(key.Sign(MESSAGE_HASH, sig));
+ sig.push_back(1); // sighash byte
+ signatures.emplace(pubkey, sig);
+
// Compute various hashes
std::vector<unsigned char> hash;
hash.resize(32);
CSHA256().Write(keydata, 32).Finalize(hash.data());
sha256.push_back(hash);
+ sha256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
CHash256().Write(keydata).Finalize(hash);
hash256.push_back(hash);
+ hash256_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
hash.resize(20);
CRIPEMD160().Write(keydata, 32).Finalize(hash.data());
ripemd160.push_back(hash);
+ ripemd160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
CHash160().Write(keydata).Finalize(hash);
hash160.push_back(hash);
+ hash160_preimages[hash] = std::vector<unsigned char>(keydata, keydata + 32);
}
}
};
@@ -67,7 +89,27 @@ struct TestData {
//! Global TestData object
std::unique_ptr<const TestData> g_testdata;
-/** A class encapsulating conversion routing for CPubKey. */
+//! A classification of leaf conditions in miniscripts (excluding true/false).
+enum class ChallengeType {
+ SHA256,
+ RIPEMD160,
+ HASH256,
+ HASH160,
+ OLDER,
+ AFTER,
+ PK
+};
+
+/* With each leaf condition we associate a challenge number.
+ * For hashes it's just the first 4 bytes of the hash. For pubkeys, it's the last 4 bytes.
+ */
+uint32_t ChallengeNumber(const CPubKey& pubkey) { return ReadLE32(pubkey.data() + 29); }
+uint32_t ChallengeNumber(const std::vector<unsigned char>& hash) { return ReadLE32(hash.data()); }
+
+//! A Challenge is a combination of type of leaf condition and its challenge number.
+typedef std::pair<ChallengeType, uint32_t> Challenge;
+
+/** A class encapulating conversion routing for CPubKey. */
struct KeyConverter {
typedef CPubKey Key;
@@ -117,12 +159,197 @@ struct KeyConverter {
}
};
+/** A class that encapsulates all signing/hash revealing operations. */
+struct Satisfier : public KeyConverter {
+ //! Which keys/timelocks/hash preimages are available.
+ std::set<Challenge> supported;
+
+ //! Implement simplified CLTV logic: stack value must exactly match an entry in `supported`.
+ bool CheckAfter(uint32_t value) const {
+ return supported.count(Challenge(ChallengeType::AFTER, value));
+ }
+
+ //! Implement simplified CSV logic: stack value must exactly match an entry in `supported`.
+ bool CheckOlder(uint32_t value) const {
+ return supported.count(Challenge(ChallengeType::OLDER, value));
+ }
+
+ //! Produce a signature for the given key.
+ miniscript::Availability Sign(const CPubKey& key, std::vector<unsigned char>& sig) const {
+ if (supported.count(Challenge(ChallengeType::PK, ChallengeNumber(key)))) {
+ auto it = g_testdata->signatures.find(key);
+ if (it == g_testdata->signatures.end()) return miniscript::Availability::NO;
+ sig = it->second;
+ return miniscript::Availability::YES;
+ }
+ return miniscript::Availability::NO;
+ }
+
+ //! Helper function for the various hash based satisfactions.
+ miniscript::Availability SatHash(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage, ChallengeType chtype) const {
+ if (!supported.count(Challenge(chtype, ChallengeNumber(hash)))) return miniscript::Availability::NO;
+ const auto& m =
+ chtype == ChallengeType::SHA256 ? g_testdata->sha256_preimages :
+ chtype == ChallengeType::HASH256 ? g_testdata->hash256_preimages :
+ chtype == ChallengeType::RIPEMD160 ? g_testdata->ripemd160_preimages :
+ g_testdata->hash160_preimages;
+ auto it = m.find(hash);
+ if (it == m.end()) return miniscript::Availability::NO;
+ preimage = it->second;
+ return miniscript::Availability::YES;
+ }
+
+ // Functions that produce the preimage for hashes of various types.
+ miniscript::Availability SatSHA256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const { return SatHash(hash, preimage, ChallengeType::SHA256); }
+ miniscript::Availability SatRIPEMD160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const { return SatHash(hash, preimage, ChallengeType::RIPEMD160); }
+ miniscript::Availability SatHASH256(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const { return SatHash(hash, preimage, ChallengeType::HASH256); }
+ miniscript::Availability SatHASH160(const std::vector<unsigned char>& hash, std::vector<unsigned char>& preimage) const { return SatHash(hash, preimage, ChallengeType::HASH160); }
+};
+
+/** Mocking signature/timelock checker.
+ *
+ * It holds a pointer to a Satisfier object, to determine which timelocks are supposed to be available.
+ */
+class TestSignatureChecker : public BaseSignatureChecker {
+ const Satisfier& ctx;
+
+public:
+ TestSignatureChecker(const Satisfier& in_ctx LIFETIMEBOUND) : ctx(in_ctx) {}
+
+ bool CheckECDSASignature(const std::vector<unsigned char>& sig, const std::vector<unsigned char>& pubkey, const CScript& scriptcode, SigVersion sigversion) const override {
+ CPubKey pk(pubkey);
+ if (!pk.IsValid()) return false;
+ // Instead of actually running signature validation, check if the signature matches the precomputed one for this key.
+ auto it = g_testdata->signatures.find(pk);
+ if (it == g_testdata->signatures.end()) return false;
+ return sig == it->second;
+ }
+
+ bool CheckLockTime(const CScriptNum& locktime) const override {
+ // Delegate to Satisfier.
+ return ctx.CheckAfter(locktime.GetInt64());
+ }
+
+ bool CheckSequence(const CScriptNum& sequence) const override {
+ // Delegate to Satisfier.
+ return ctx.CheckOlder(sequence.GetInt64());
+ }
+};
+
//! Singleton instance of KeyConverter.
const KeyConverter CONVERTER{};
+using Fragment = miniscript::Fragment;
+using NodeRef = miniscript::NodeRef<CPubKey>;
// https://github.com/llvm/llvm-project/issues/53444
// NOLINTNEXTLINE(misc-unused-using-decls)
using miniscript::operator"" _mst;
+using Node = miniscript::Node<CPubKey>;
+
+/** Compute all challenges (pubkeys, hashes, timelocks) that occur in a given Miniscript. */
+std::set<Challenge> FindChallenges(const NodeRef& ref) {
+ std::set<Challenge> chal;
+ for (const auto& key : ref->keys) {
+ chal.emplace(ChallengeType::PK, ChallengeNumber(key));
+ }
+ if (ref->fragment == miniscript::Fragment::OLDER) {
+ chal.emplace(ChallengeType::OLDER, ref->k);
+ } else if (ref->fragment == miniscript::Fragment::AFTER) {
+ chal.emplace(ChallengeType::AFTER, ref->k);
+ } else if (ref->fragment == miniscript::Fragment::SHA256) {
+ chal.emplace(ChallengeType::SHA256, ChallengeNumber(ref->data));
+ } else if (ref->fragment == miniscript::Fragment::RIPEMD160) {
+ chal.emplace(ChallengeType::RIPEMD160, ChallengeNumber(ref->data));
+ } else if (ref->fragment == miniscript::Fragment::HASH256) {
+ chal.emplace(ChallengeType::HASH256, ChallengeNumber(ref->data));
+ } else if (ref->fragment == miniscript::Fragment::HASH160) {
+ chal.emplace(ChallengeType::HASH160, ChallengeNumber(ref->data));
+ }
+ for (const auto& sub : ref->subs) {
+ auto sub_chal = FindChallenges(sub);
+ chal.insert(sub_chal.begin(), sub_chal.end());
+ }
+ return chal;
+}
+
+/** Run random satisfaction tests. */
+void TestSatisfy(const std::string& testcase, const NodeRef& node) {
+ auto script = node->ToScript(CONVERTER);
+ auto challenges = FindChallenges(node); // Find all challenges in the generated miniscript.
+ std::vector<Challenge> challist(challenges.begin(), challenges.end());
+ for (int iter = 0; iter < 3; ++iter) {
+ Shuffle(challist.begin(), challist.end(), g_insecure_rand_ctx);
+ Satisfier satisfier;
+ TestSignatureChecker checker(satisfier);
+ bool prev_mal_success = false, prev_nonmal_success = false;
+ // Go over all challenges involved in this miniscript in random order.
+ for (int add = -1; add < (int)challist.size(); ++add) {
+ if (add >= 0) satisfier.supported.insert(challist[add]); // The first iteration does not add anything
+
+ // Run malleable satisfaction algorithm.
+ const CScript script_pubkey = CScript() << OP_0 << WitnessV0ScriptHash(script);
+ CScriptWitness witness_mal;
+ const bool mal_success = node->Satisfy(satisfier, witness_mal.stack, false) == miniscript::Availability::YES;
+ witness_mal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ // Run non-malleable satisfaction algorithm.
+ CScriptWitness witness_nonmal;
+ const bool nonmal_success = node->Satisfy(satisfier, witness_nonmal.stack, true) == miniscript::Availability::YES;
+ witness_nonmal.stack.push_back(std::vector<unsigned char>(script.begin(), script.end()));
+
+ if (nonmal_success) {
+ // Non-malleable satisfactions are bounded by GetStackSize().
+ BOOST_CHECK(witness_nonmal.stack.size() <= node->GetStackSize());
+ // If a non-malleable satisfaction exists, the malleable one must also exist, and be identical to it.
+ BOOST_CHECK(mal_success);
+ BOOST_CHECK(witness_nonmal.stack == witness_mal.stack);
+
+ // Test non-malleable satisfaction.
+ ScriptError serror;
+ bool res = VerifyScript(CScript(), script_pubkey, &witness_nonmal, STANDARD_SCRIPT_VERIFY_FLAGS, checker, &serror);
+ // Non-malleable satisfactions are guaranteed to be valid if ValidSatisfactions().
+ if (node->ValidSatisfactions()) BOOST_CHECK(res);
+ // More detailed: non-malleable satisfactions must be valid, or could fail with ops count error (if CheckOpsLimit failed),
+ // or with a stack size error (if CheckStackSize check fails).
+ BOOST_CHECK(res ||
+ (!node->CheckOpsLimit() && serror == ScriptError::SCRIPT_ERR_OP_COUNT) ||
+ (!node->CheckStackSize() && serror == ScriptError::SCRIPT_ERR_STACK_SIZE));
+ }
+
+ if (mal_success && (!nonmal_success || witness_mal.stack != witness_nonmal.stack)) {
+ // Test malleable satisfaction only if it's different from the non-malleable one.
+ ScriptError serror;
+ bool res = VerifyScript(CScript(), script_pubkey, &witness_mal, STANDARD_SCRIPT_VERIFY_FLAGS, checker, &serror);
+ // Malleable satisfactions are not guaranteed to be valid under any conditions, but they can only
+ // fail due to stack or ops limits.
+ BOOST_CHECK(res || serror == ScriptError::SCRIPT_ERR_OP_COUNT || serror == ScriptError::SCRIPT_ERR_STACK_SIZE);
+ }
+
+ if (node->IsSane()) {
+ // For sane nodes, the two algorithms behave identically.
+ BOOST_CHECK_EQUAL(mal_success, nonmal_success);
+ }
+
+ // Adding more satisfied conditions can never remove our ability to produce a satisfaction.
+ BOOST_CHECK(mal_success >= prev_mal_success);
+ // For nonmalleable solutions this is only true if the added condition is PK;
+ // for other conditions, adding one may make an valid satisfaction become malleable. If the script
+ // is sane, this cannot happen however.
+ if (node->IsSane() || add < 0 || challist[add].first == ChallengeType::PK) {
+ BOOST_CHECK(nonmal_success >= prev_nonmal_success);
+ }
+ // Remember results for the next added challenge.
+ prev_mal_success = mal_success;
+ prev_nonmal_success = nonmal_success;
+ }
+
+ bool satisfiable = node->IsSatisfiable([](const Node&) { return true; });
+ // If the miniscript was satisfiable at all, a satisfaction must be found after all conditions are added.
+ BOOST_CHECK_EQUAL(prev_mal_success, satisfiable);
+ // If the miniscript is sane and satisfiable, a nonmalleable satisfaction must eventually be found.
+ if (node->IsSane()) BOOST_CHECK_EQUAL(prev_nonmal_success, satisfiable);
+ }
+}
enum TestMode : int {
TESTMODE_INVALID = 0,
@@ -152,6 +379,7 @@ void Test(const std::string& ms, const std::string& hexscript, int mode, int ops
BOOST_CHECK_MESSAGE(inferred_miniscript->ToScript(CONVERTER) == computed_script, "Roundtrip failure: miniscript->script != miniscript->script->miniscript->script: " + ms);
if (opslimit != -1) BOOST_CHECK_MESSAGE((int)node->GetOps() == opslimit, "Ops limit mismatch: " << ms << " (" << node->GetOps() << " vs " << opslimit << ")");
if (stacklimit != -1) BOOST_CHECK_MESSAGE((int)node->GetStackSize() == stacklimit, "Stack limit mismatch: " << ms << " (" << node->GetStackSize() << " vs " << stacklimit << ")");
+ TestSatisfy(ms, node);
}
}
} // namespace
diff --git a/src/test/multisig_tests.cpp b/src/test/multisig_tests.cpp
index 1e1a9932ad..7a3e8e3a47 100644
--- a/src/test/multisig_tests.cpp
+++ b/src/test/multisig_tests.cpp
@@ -217,7 +217,8 @@ BOOST_AUTO_TEST_CASE(multisig_Sign)
for (int i = 0; i < 3; i++)
{
- BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL), strprintf("SignSignature %d", i));
+ SignatureData empty;
+ BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL, empty), strprintf("SignSignature %d", i));
}
}
diff --git a/src/test/orphanage_tests.cpp b/src/test/orphanage_tests.cpp
index a55b0bbcd0..d95b9711d0 100644
--- a/src/test/orphanage_tests.cpp
+++ b/src/test/orphanage_tests.cpp
@@ -88,7 +88,8 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
tx.vout.resize(1);
tx.vout[0].nValue = 1*CENT;
tx.vout[0].scriptPubKey = GetScriptForDestination(PKHash(key.GetPubKey()));
- BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL));
+ SignatureData empty;
+ BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL, empty));
orphanage.AddTx(MakeTransactionRef(tx), i);
}
@@ -108,7 +109,8 @@ BOOST_AUTO_TEST_CASE(DoS_mapOrphans)
tx.vin[j].prevout.n = j;
tx.vin[j].prevout.hash = txPrev->GetHash();
}
- BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL));
+ SignatureData empty;
+ BOOST_CHECK(SignSignature(keystore, *txPrev, tx, 0, SIGHASH_ALL, empty));
// Re-use same signature for other inputs
// (they don't have to be valid for this test)
for (unsigned int j = 1; j < tx.vin.size(); j++)
diff --git a/src/test/script_p2sh_tests.cpp b/src/test/script_p2sh_tests.cpp
index e439ff3519..c9f002b324 100644
--- a/src/test/script_p2sh_tests.cpp
+++ b/src/test/script_p2sh_tests.cpp
@@ -102,7 +102,8 @@ BOOST_AUTO_TEST_CASE(sign)
}
for (int i = 0; i < 8; i++)
{
- BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL), strprintf("SignSignature %d", i));
+ SignatureData empty;
+ BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL, empty), strprintf("SignSignature %d", i));
}
// All of the above should be OK, and the txTos have valid signatures
// Check to make sure signature verification fails if we use the wrong ScriptSig:
@@ -197,7 +198,8 @@ BOOST_AUTO_TEST_CASE(set)
}
for (int i = 0; i < 4; i++)
{
- BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL), strprintf("SignSignature %d", i));
+ SignatureData empty;
+ BOOST_CHECK_MESSAGE(SignSignature(keystore, CTransaction(txFrom), txTo[i], 0, SIGHASH_ALL, empty), strprintf("SignSignature %d", i));
BOOST_CHECK_MESSAGE(IsStandardTx(CTransaction(txTo[i]), reason), strprintf("txTo[%d].IsStandard", i));
}
}
@@ -334,9 +336,12 @@ BOOST_AUTO_TEST_CASE(AreInputsStandard)
txTo.vin[i].prevout.n = i;
txTo.vin[i].prevout.hash = txFrom.GetHash();
}
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 1, SIGHASH_ALL));
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 2, SIGHASH_ALL));
+ SignatureData empty;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, empty));
+ SignatureData empty_b;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 1, SIGHASH_ALL, empty_b));
+ SignatureData empty_c;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 2, SIGHASH_ALL, empty_c));
// SignSignature doesn't know how to sign these. We're
// not testing validating signatures, so just create
// dummy signatures that DO include the correct P2SH scripts:
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 22f6cfd164..b16f63d685 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -1180,7 +1180,8 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
BOOST_CHECK(combined.scriptSig.empty());
// Single signature case:
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL)); // changes scriptSig
+ SignatureData dummy;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy)); // changes scriptSig
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
@@ -1188,7 +1189,8 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
SignatureData scriptSigCopy = scriptSig;
// Signing again will give a different, valid signature:
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
+ SignatureData dummy_b;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy_b));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSigCopy, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSigCopy.scriptSig || combined.scriptSig == scriptSig.scriptSig);
@@ -1197,14 +1199,16 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
CScript pkSingle; pkSingle << ToByteVector(keys[0].GetPubKey()) << OP_CHECKSIG;
BOOST_CHECK(keystore.AddCScript(pkSingle));
scriptPubKey = GetScriptForDestination(ScriptHash(pkSingle));
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
+ SignatureData dummy_c;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy_c));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
combined = CombineSignatures(txFrom.vout[0], txTo, empty, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
scriptSigCopy = scriptSig;
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
+ SignatureData dummy_d;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy_d));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSigCopy, scriptSig);
BOOST_CHECK(combined.scriptSig == scriptSigCopy.scriptSig || combined.scriptSig == scriptSig.scriptSig);
@@ -1212,7 +1216,8 @@ BOOST_AUTO_TEST_CASE(script_combineSigs)
// Hardest case: Multisig 2-of-3
scriptPubKey = GetScriptForMultisig(2, pubkeys);
BOOST_CHECK(keystore.AddCScript(scriptPubKey));
- BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL));
+ SignatureData dummy_e;
+ BOOST_CHECK(SignSignature(keystore, CTransaction(txFrom), txTo, 0, SIGHASH_ALL, dummy_e));
scriptSig = DataFromTransaction(txTo, 0, txFrom.vout[0]);
combined = CombineSignatures(txFrom.vout[0], txTo, scriptSig, empty);
BOOST_CHECK(combined.scriptSig == scriptSig.scriptSig);
diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp
index f583109e16..09f77d2b61 100644
--- a/src/test/serialize_tests.cpp
+++ b/src/test/serialize_tests.cpp
@@ -237,7 +237,8 @@ BOOST_AUTO_TEST_CASE(class_methods)
BOOST_CHECK(methodtest2 == methodtest3);
BOOST_CHECK(methodtest3 == methodtest4);
- CDataStream ss2(SER_DISK, PROTOCOL_VERSION, intval, boolval, stringval, charstrval, txval);
+ CDataStream ss2{SER_DISK, PROTOCOL_VERSION};
+ ss2 << intval << boolval << stringval << charstrval << txval;
ss2 >> methodtest3;
BOOST_CHECK(methodtest3 == methodtest4);
}
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index d00de9df9b..507284a566 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -433,7 +433,8 @@ static void CreateCreditAndSpend(const FillableSigningProvider& keystore, const
inputm.vout.resize(1);
inputm.vout[0].nValue = 1;
inputm.vout[0].scriptPubKey = CScript();
- bool ret = SignSignature(keystore, *output, inputm, 0, SIGHASH_ALL);
+ SignatureData empty;
+ bool ret = SignSignature(keystore, *output, inputm, 0, SIGHASH_ALL, empty);
assert(ret == success);
CDataStream ssin(SER_NETWORK, PROTOCOL_VERSION);
ssin << inputm;
@@ -517,7 +518,8 @@ BOOST_AUTO_TEST_CASE(test_big_witness_transaction)
// sign all inputs
for(uint32_t i = 0; i < mtx.vin.size(); i++) {
- bool hashSigned = SignSignature(keystore, scriptPubKey, mtx, i, 1000, sigHashes.at(i % sigHashes.size()));
+ SignatureData empty;
+ bool hashSigned = SignSignature(keystore, scriptPubKey, mtx, i, 1000, sigHashes.at(i % sigHashes.size()), empty);
assert(hashSigned);
}
@@ -935,23 +937,58 @@ BOOST_AUTO_TEST_CASE(test_IsStandard)
CheckIsNotStandard(t, "bare-multisig");
g_bare_multi = DEFAULT_PERMIT_BAREMULTISIG;
+ // Check compressed P2PK outputs dust threshold (must have leading 02 or 03)
+ t.vout[0].scriptPubKey = CScript() << std::vector<unsigned char>(33, 0x02) << OP_CHECKSIG;
+ t.vout[0].nValue = 576;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 575;
+ CheckIsNotStandard(t, "dust");
+
+ // Check uncompressed P2PK outputs dust threshold (must have leading 04/06/07)
+ t.vout[0].scriptPubKey = CScript() << std::vector<unsigned char>(65, 0x04) << OP_CHECKSIG;
+ t.vout[0].nValue = 672;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 671;
+ CheckIsNotStandard(t, "dust");
+
+ // Check P2PKH outputs dust threshold
+ t.vout[0].scriptPubKey = CScript() << OP_DUP << OP_HASH160 << std::vector<unsigned char>(20, 0) << OP_EQUALVERIFY << OP_CHECKSIG;
+ t.vout[0].nValue = 546;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 545;
+ CheckIsNotStandard(t, "dust");
+
+ // Check P2SH outputs dust threshold
+ t.vout[0].scriptPubKey = CScript() << OP_HASH160 << std::vector<unsigned char>(20, 0) << OP_EQUAL;
+ t.vout[0].nValue = 540;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 539;
+ CheckIsNotStandard(t, "dust");
+
// Check P2WPKH outputs dust threshold
- t.vout[0].scriptPubKey = CScript() << OP_0 << ParseHex("ffffffffffffffffffffffffffffffffffffffff");
+ t.vout[0].scriptPubKey = CScript() << OP_0 << std::vector<unsigned char>(20, 0);
t.vout[0].nValue = 294;
CheckIsStandard(t);
t.vout[0].nValue = 293;
CheckIsNotStandard(t, "dust");
// Check P2WSH outputs dust threshold
- t.vout[0].scriptPubKey = CScript() << OP_0 << ParseHex("ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffff");
+ t.vout[0].scriptPubKey = CScript() << OP_0 << std::vector<unsigned char>(32, 0);
+ t.vout[0].nValue = 330;
+ CheckIsStandard(t);
+ t.vout[0].nValue = 329;
+ CheckIsNotStandard(t, "dust");
+
+ // Check P2TR outputs dust threshold (Invalid xonly key ok!)
+ t.vout[0].scriptPubKey = CScript() << OP_1 << std::vector<unsigned char>(32, 0);
t.vout[0].nValue = 330;
CheckIsStandard(t);
t.vout[0].nValue = 329;
CheckIsNotStandard(t, "dust");
- // Check future Witness Program versions dust threshold
- for (int op = OP_2; op <= OP_16; op += 1) {
- t.vout[0].scriptPubKey = CScript() << (opcodetype)op << ParseHex("ffff");
+ // Check future Witness Program versions dust threshold (non-32-byte pushes are undefined for version 1)
+ for (int op = OP_1; op <= OP_16; op += 1) {
+ t.vout[0].scriptPubKey = CScript() << (opcodetype)op << std::vector<unsigned char>(2, 0);
t.vout[0].nValue = 240;
CheckIsStandard(t);
diff --git a/src/test/util/xoroshiro128plusplus.h b/src/test/util/xoroshiro128plusplus.h
new file mode 100644
index 0000000000..ac9f59b3f5
--- /dev/null
+++ b/src/test/util/xoroshiro128plusplus.h
@@ -0,0 +1,71 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H
+#define BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H
+
+#include <cstdint>
+#include <limits>
+
+/** xoroshiro128++ PRNG. Extremely fast, not appropriate for cryptographic purposes.
+ *
+ * Memory footprint is 128bit, period is 2^128 - 1.
+ * This class is not thread-safe.
+ *
+ * Reference implementation available at https://prng.di.unimi.it/xoroshiro128plusplus.c
+ * See https://prng.di.unimi.it/
+ */
+class XoRoShiRo128PlusPlus
+{
+ uint64_t m_s0;
+ uint64_t m_s1;
+
+ [[nodiscard]] constexpr static uint64_t rotl(uint64_t x, int n)
+ {
+ return (x << n) | (x >> (64 - n));
+ }
+
+ [[nodiscard]] constexpr static uint64_t SplitMix64(uint64_t& seedval) noexcept
+ {
+ uint64_t z = (seedval += UINT64_C(0x9e3779b97f4a7c15));
+ z = (z ^ (z >> 30U)) * UINT64_C(0xbf58476d1ce4e5b9);
+ z = (z ^ (z >> 27U)) * UINT64_C(0x94d049bb133111eb);
+ return z ^ (z >> 31U);
+ }
+
+public:
+ using result_type = uint64_t;
+
+ constexpr explicit XoRoShiRo128PlusPlus(uint64_t seedval) noexcept
+ : m_s0(SplitMix64(seedval)), m_s1(SplitMix64(seedval))
+ {
+ }
+
+ // no copy - that is dangerous, we don't want accidentally copy the RNG and then have two streams
+ // with exactly the same results. If you need a copy, call copy().
+ XoRoShiRo128PlusPlus(const XoRoShiRo128PlusPlus&) = delete;
+ XoRoShiRo128PlusPlus& operator=(const XoRoShiRo128PlusPlus&) = delete;
+
+ // allow moves
+ XoRoShiRo128PlusPlus(XoRoShiRo128PlusPlus&&) = default;
+ XoRoShiRo128PlusPlus& operator=(XoRoShiRo128PlusPlus&&) = default;
+
+ ~XoRoShiRo128PlusPlus() = default;
+
+ constexpr result_type operator()() noexcept
+ {
+ uint64_t s0 = m_s0, s1 = m_s1;
+ const uint64_t result = rotl(s0 + s1, 17) + s0;
+ s1 ^= s0;
+ m_s0 = rotl(s0, 49) ^ s1 ^ (s1 << 21);
+ m_s1 = rotl(s1, 28);
+ return result;
+ }
+
+ static constexpr result_type min() noexcept { return std::numeric_limits<result_type>::min(); }
+ static constexpr result_type max() noexcept { return std::numeric_limits<result_type>::max(); }
+ static constexpr double entropy() noexcept { return 0.0; }
+};
+
+#endif // BITCOIN_TEST_UTIL_XOROSHIRO128PLUSPLUS_H
diff --git a/src/test/xoroshiro128plusplus_tests.cpp b/src/test/xoroshiro128plusplus_tests.cpp
new file mode 100644
index 0000000000..ea1b3e355f
--- /dev/null
+++ b/src/test/xoroshiro128plusplus_tests.cpp
@@ -0,0 +1,29 @@
+// Copyright (c) 2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <test/util/setup_common.h>
+#include <test/util/xoroshiro128plusplus.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(xoroshiro128plusplus_tests, BasicTestingSetup)
+
+BOOST_AUTO_TEST_CASE(reference_values)
+{
+ // numbers generated from reference implementation
+ XoRoShiRo128PlusPlus rng(0);
+ BOOST_TEST(0x6f68e1e7e2646ee1 == rng());
+ BOOST_TEST(0xbf971b7f454094ad == rng());
+ BOOST_TEST(0x48f2de556f30de38 == rng());
+ BOOST_TEST(0x6ea7c59f89bbfc75 == rng());
+
+ // seed with a random number
+ rng = XoRoShiRo128PlusPlus(0x1a26f3fa8546b47a);
+ BOOST_TEST(0xc8dc5e08d844ac7d == rng());
+ BOOST_TEST(0x5b5f1f6d499dad1b == rng());
+ BOOST_TEST(0xbeb0031f93313d6f == rng());
+ BOOST_TEST(0xbfbcf4f43a264497 == rng());
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/uint256.cpp b/src/uint256.cpp
index cd9cbb566a..7f81c3c448 100644
--- a/src/uint256.cpp
+++ b/src/uint256.cpp
@@ -7,15 +7,6 @@
#include <util/strencodings.h>
-#include <string.h>
-
-template <unsigned int BITS>
-base_blob<BITS>::base_blob(const std::vector<unsigned char>& vch)
-{
- assert(vch.size() == sizeof(m_data));
- memcpy(m_data, vch.data(), sizeof(m_data));
-}
-
template <unsigned int BITS>
std::string base_blob<BITS>::GetHex() const
{
@@ -29,7 +20,7 @@ std::string base_blob<BITS>::GetHex() const
template <unsigned int BITS>
void base_blob<BITS>::SetHex(const char* psz)
{
- memset(m_data, 0, sizeof(m_data));
+ std::fill(m_data.begin(), m_data.end(), 0);
// skip leading spaces
while (IsSpace(*psz))
@@ -43,7 +34,7 @@ void base_blob<BITS>::SetHex(const char* psz)
size_t digits = 0;
while (::HexDigit(psz[digits]) != -1)
digits++;
- unsigned char* p1 = (unsigned char*)m_data;
+ unsigned char* p1 = m_data.data();
unsigned char* pend = p1 + WIDTH;
while (digits > 0 && p1 < pend) {
*p1 = ::HexDigit(psz[--digits]);
@@ -67,14 +58,12 @@ std::string base_blob<BITS>::ToString() const
}
// Explicit instantiations for base_blob<160>
-template base_blob<160>::base_blob(const std::vector<unsigned char>&);
template std::string base_blob<160>::GetHex() const;
template std::string base_blob<160>::ToString() const;
template void base_blob<160>::SetHex(const char*);
template void base_blob<160>::SetHex(const std::string&);
// Explicit instantiations for base_blob<256>
-template base_blob<256>::base_blob(const std::vector<unsigned char>&);
template std::string base_blob<256>::GetHex() const;
template std::string base_blob<256>::ToString() const;
template void base_blob<256>::SetHex(const char*);
diff --git a/src/uint256.h b/src/uint256.h
index 58e595c4ca..1cc3721487 100644
--- a/src/uint256.h
+++ b/src/uint256.h
@@ -9,11 +9,12 @@
#include <crypto/common.h>
#include <span.h>
-#include <assert.h>
+#include <algorithm>
+#include <array>
+#include <cassert>
#include <cstring>
#include <stdint.h>
#include <string>
-#include <vector>
/** Template base class for fixed-sized opaque blobs. */
template<unsigned int BITS>
@@ -21,7 +22,9 @@ class base_blob
{
protected:
static constexpr int WIDTH = BITS / 8;
- uint8_t m_data[WIDTH];
+ std::array<uint8_t, WIDTH> m_data;
+ static_assert(WIDTH == sizeof(m_data), "Sanity check");
+
public:
/* construct 0 value by default */
constexpr base_blob() : m_data() {}
@@ -29,64 +32,47 @@ public:
/* constructor for constants between 1 and 255 */
constexpr explicit base_blob(uint8_t v) : m_data{v} {}
- explicit base_blob(const std::vector<unsigned char>& vch);
+ constexpr explicit base_blob(Span<const unsigned char> vch)
+ {
+ assert(vch.size() == WIDTH);
+ std::copy(vch.begin(), vch.end(), m_data.begin());
+ }
- bool IsNull() const
+ constexpr bool IsNull() const
{
- for (int i = 0; i < WIDTH; i++)
- if (m_data[i] != 0)
- return false;
- return true;
+ return std::all_of(m_data.begin(), m_data.end(), [](uint8_t val) {
+ return val == 0;
+ });
}
- void SetNull()
+ constexpr void SetNull()
{
- memset(m_data, 0, sizeof(m_data));
+ std::fill(m_data.begin(), m_data.end(), 0);
}
- inline int Compare(const base_blob& other) const { return memcmp(m_data, other.m_data, sizeof(m_data)); }
+ constexpr int Compare(const base_blob& other) const { return std::memcmp(m_data.data(), other.m_data.data(), WIDTH); }
- friend inline bool operator==(const base_blob& a, const base_blob& b) { return a.Compare(b) == 0; }
- friend inline bool operator!=(const base_blob& a, const base_blob& b) { return a.Compare(b) != 0; }
- friend inline bool operator<(const base_blob& a, const base_blob& b) { return a.Compare(b) < 0; }
+ friend constexpr bool operator==(const base_blob& a, const base_blob& b) { return a.Compare(b) == 0; }
+ friend constexpr bool operator!=(const base_blob& a, const base_blob& b) { return a.Compare(b) != 0; }
+ friend constexpr bool operator<(const base_blob& a, const base_blob& b) { return a.Compare(b) < 0; }
std::string GetHex() const;
void SetHex(const char* psz);
void SetHex(const std::string& str);
std::string ToString() const;
- const unsigned char* data() const { return m_data; }
- unsigned char* data() { return m_data; }
+ constexpr const unsigned char* data() const { return m_data.data(); }
+ constexpr unsigned char* data() { return m_data.data(); }
- unsigned char* begin()
- {
- return &m_data[0];
- }
+ constexpr unsigned char* begin() { return m_data.data(); }
+ constexpr unsigned char* end() { return m_data.data() + WIDTH; }
- unsigned char* end()
- {
- return &m_data[WIDTH];
- }
+ constexpr const unsigned char* begin() const { return m_data.data(); }
+ constexpr const unsigned char* end() const { return m_data.data() + WIDTH; }
- const unsigned char* begin() const
- {
- return &m_data[0];
- }
+ static constexpr unsigned int size() { return WIDTH; }
- const unsigned char* end() const
- {
- return &m_data[WIDTH];
- }
-
- static constexpr unsigned int size()
- {
- return sizeof(m_data);
- }
-
- uint64_t GetUint64(int pos) const
- {
- return ReadLE64(m_data + pos * 8);
- }
+ constexpr uint64_t GetUint64(int pos) const { return ReadLE64(m_data.data() + pos * 8); }
template<typename Stream>
void Serialize(Stream& s) const
@@ -107,8 +93,8 @@ public:
*/
class uint160 : public base_blob<160> {
public:
- constexpr uint160() {}
- explicit uint160(const std::vector<unsigned char>& vch) : base_blob<160>(vch) {}
+ constexpr uint160() = default;
+ constexpr explicit uint160(Span<const unsigned char> vch) : base_blob<160>(vch) {}
};
/** 256-bit opaque blob.
@@ -118,9 +104,9 @@ public:
*/
class uint256 : public base_blob<256> {
public:
- constexpr uint256() {}
+ constexpr uint256() = default;
constexpr explicit uint256(uint8_t v) : base_blob<256>(v) {}
- explicit uint256(const std::vector<unsigned char>& vch) : base_blob<256>(vch) {}
+ constexpr explicit uint256(Span<const unsigned char> vch) : base_blob<256>(vch) {}
static const uint256 ZERO;
static const uint256 ONE;
};
diff --git a/src/util/hasher.cpp b/src/util/hasher.cpp
index a3a3f7a429..81e9b990e1 100644
--- a/src/util/hasher.cpp
+++ b/src/util/hasher.cpp
@@ -9,7 +9,10 @@
SaltedTxidHasher::SaltedTxidHasher() : k0(GetRand<uint64_t>()), k1(GetRand<uint64_t>()) {}
-SaltedOutpointHasher::SaltedOutpointHasher() : k0(GetRand<uint64_t>()), k1(GetRand<uint64_t>()) {}
+SaltedOutpointHasher::SaltedOutpointHasher(bool deterministic) :
+ k0(deterministic ? 0x8e819f2607a18de6 : GetRand<uint64_t>()),
+ k1(deterministic ? 0xf4020d2e3983b0eb : GetRand<uint64_t>())
+{}
SaltedSipHasher::SaltedSipHasher() : m_k0(GetRand<uint64_t>()), m_k1(GetRand<uint64_t>()) {}
diff --git a/src/util/hasher.h b/src/util/hasher.h
index 82d278b086..506ae9415d 100644
--- a/src/util/hasher.h
+++ b/src/util/hasher.h
@@ -36,7 +36,7 @@ private:
const uint64_t k0, k1;
public:
- SaltedOutpointHasher();
+ SaltedOutpointHasher(bool deterministic = false);
/**
* Having the hash noexcept allows libstdc++'s unordered_map to recalculate
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 0cea283ece..e72c970157 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -1360,6 +1360,11 @@ void SetupEnvironment()
SetConsoleCP(CP_UTF8);
SetConsoleOutputCP(CP_UTF8);
#endif
+
+#ifndef WIN32
+ constexpr mode_t private_umask = 0077;
+ umask(private_umask);
+#endif
}
bool SetupNetworking()
diff --git a/src/validation.cpp b/src/validation.cpp
index 62ce1f1162..f0ffb748dd 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -4081,7 +4081,8 @@ bool CVerifyDB::VerifyDB(
int nGoodTransactions = 0;
BlockValidationState state;
int reportDone = 0;
- LogPrintf("[0%%]..."); /* Continued */
+ bool skipped_l3_checks{false};
+ LogPrintf("Verification progress: 0%%\n");
const bool is_snapshot_cs{!chainstate.m_from_snapshot_blockhash};
@@ -4089,7 +4090,7 @@ bool CVerifyDB::VerifyDB(
const int percentageDone = std::max(1, std::min(99, (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * (nCheckLevel >= 4 ? 50 : 100))));
if (reportDone < percentageDone / 10) {
// report every 10% step
- LogPrintf("[%d%%]...", percentageDone); /* Continued */
+ LogPrintf("Verification progress: %d%%\n", percentageDone);
reportDone = percentageDone / 10;
}
uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
@@ -4124,17 +4125,21 @@ bool CVerifyDB::VerifyDB(
// check level 3: check for inconsistencies during memory-only disconnect of tip blocks
size_t curr_coins_usage = coins.DynamicMemoryUsage() + chainstate.CoinsTip().DynamicMemoryUsage();
- if (nCheckLevel >= 3 && curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
- assert(coins.GetBestBlock() == pindex->GetBlockHash());
- DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
- if (res == DISCONNECT_FAILED) {
- return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
- }
- if (res == DISCONNECT_UNCLEAN) {
- nGoodTransactions = 0;
- pindexFailure = pindex;
+ if (nCheckLevel >= 3) {
+ if (curr_coins_usage <= chainstate.m_coinstip_cache_size_bytes) {
+ assert(coins.GetBestBlock() == pindex->GetBlockHash());
+ DisconnectResult res = chainstate.DisconnectBlock(block, pindex, coins);
+ if (res == DISCONNECT_FAILED) {
+ return error("VerifyDB(): *** irrecoverable inconsistency in block data at %d, hash=%s", pindex->nHeight, pindex->GetBlockHash().ToString());
+ }
+ if (res == DISCONNECT_UNCLEAN) {
+ nGoodTransactions = 0;
+ pindexFailure = pindex;
+ } else {
+ nGoodTransactions += block.vtx.size();
+ }
} else {
- nGoodTransactions += block.vtx.size();
+ skipped_l3_checks = true;
}
}
if (ShutdownRequested()) return true;
@@ -4142,17 +4147,19 @@ bool CVerifyDB::VerifyDB(
if (pindexFailure) {
return error("VerifyDB(): *** coin database inconsistencies found (last %i blocks, %i good transactions before that)\n", chainstate.m_chain.Height() - pindexFailure->nHeight + 1, nGoodTransactions);
}
-
+ if (skipped_l3_checks) {
+ LogPrintf("Skipped verification of level >=3 (insufficient database cache size). Consider increasing -dbcache.\n");
+ }
// store block count as we move pindex at check level >= 4
int block_count = chainstate.m_chain.Height() - pindex->nHeight;
// check level 4: try reconnecting blocks
- if (nCheckLevel >= 4) {
+ if (nCheckLevel >= 4 && !skipped_l3_checks) {
while (pindex != chainstate.m_chain.Tip()) {
const int percentageDone = std::max(1, std::min(99, 100 - (int)(((double)(chainstate.m_chain.Height() - pindex->nHeight)) / (double)nCheckDepth * 50)));
if (reportDone < percentageDone / 10) {
// report every 10% step
- LogPrintf("[%d%%]...", percentageDone); /* Continued */
+ LogPrintf("Verification progress: %d%%\n", percentageDone);
reportDone = percentageDone / 10;
}
uiInterface.ShowProgress(_("Verifying blocks…").translated, percentageDone, false);
@@ -4167,8 +4174,7 @@ bool CVerifyDB::VerifyDB(
}
}
- LogPrintf("[DONE].\n");
- LogPrintf("No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
+ LogPrintf("Verification: No coin database inconsistencies in last %i blocks (%i transactions)\n", block_count, nGoodTransactions);
return true;
}
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index bd158b5985..37a704bfa4 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -155,7 +155,7 @@ bool TransactionCanBeBumped(const CWallet& wallet, const uint256& txid)
}
Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCoinControl& coin_control, std::vector<bilingual_str>& errors,
- CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx, bool require_mine)
+ CAmount& old_fee, CAmount& new_fee, CMutableTransaction& mtx, bool require_mine, const std::vector<CTxOut>& outputs)
{
// We are going to modify coin control later, copy to re-use
CCoinControl new_coin_control(coin_control);
@@ -222,11 +222,19 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
return result;
}
- // Fill in recipients(and preserve a single change key if there is one)
- // While we're here, calculate the output amount
- std::vector<CRecipient> recipients;
+ // Calculate the old output amount.
CAmount output_value = 0;
- for (const auto& output : wtx.tx->vout) {
+ for (const auto& old_output : wtx.tx->vout) {
+ output_value += old_output.nValue;
+ }
+
+ old_fee = input_value - output_value;
+
+ // Fill in recipients (and preserve a single change key if there
+ // is one). If outputs vector is non-empty, replace original
+ // outputs with its contents, otherwise use original outputs.
+ std::vector<CRecipient> recipients;
+ for (const auto& output : outputs.empty() ? wtx.tx->vout : outputs) {
if (!OutputIsChange(wallet, output)) {
CRecipient recipient = {output.scriptPubKey, output.nValue, false};
recipients.push_back(recipient);
@@ -235,11 +243,8 @@ Result CreateRateBumpTransaction(CWallet& wallet, const uint256& txid, const CCo
ExtractDestination(output.scriptPubKey, change_dest);
new_coin_control.destChange = change_dest;
}
- output_value += output.nValue;
}
- old_fee = input_value - output_value;
-
if (coin_control.m_feerate) {
// The user provided a feeRate argument.
// We calculate this here to avoid compiler warning on the cs_wallet lock
diff --git a/src/wallet/feebumper.h b/src/wallet/feebumper.h
index a96871b26f..53cf16e0f1 100644
--- a/src/wallet/feebumper.h
+++ b/src/wallet/feebumper.h
@@ -51,7 +51,8 @@ Result CreateRateBumpTransaction(CWallet& wallet,
CAmount& old_fee,
CAmount& new_fee,
CMutableTransaction& mtx,
- bool require_mine);
+ bool require_mine,
+ const std::vector<CTxOut>& outputs);
//! Sign the new transaction,
//! @return false if the tx couldn't be found or if it was
diff --git a/src/wallet/init.cpp b/src/wallet/init.cpp
index 773f094274..5403e38950 100644
--- a/src/wallet/init.cpp
+++ b/src/wallet/init.cpp
@@ -122,9 +122,6 @@ bool WalletInit::ParameterInteraction() const
return InitError(Untranslated("-zapwallettxes has been removed. If you are attempting to remove a stuck transaction from your wallet, please use abandontransaction instead."));
}
- if (gArgs.GetBoolArg("-sysperms", false))
- return InitError(Untranslated("-sysperms is not allowed in combination with enabled wallet functionality"));
-
return true;
}
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index 68dd3da9b5..1a76e46c54 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -291,7 +291,8 @@ public:
CAmount& new_fee,
CMutableTransaction& mtx) override
{
- return feebumper::CreateRateBumpTransaction(*m_wallet.get(), txid, coin_control, errors, old_fee, new_fee, mtx, /* require_mine= */ true) == feebumper::Result::OK;
+ std::vector<CTxOut> outputs; // just an empty list of new recipients for now
+ return feebumper::CreateRateBumpTransaction(*m_wallet.get(), txid, coin_control, errors, old_fee, new_fee, mtx, /* require_mine= */ true, outputs) == feebumper::Result::OK;
}
bool signBumpTransaction(CMutableTransaction& mtx) override { return feebumper::SignTransaction(*m_wallet.get(), mtx); }
bool commitBumpTransaction(const uint256& txid,
diff --git a/src/wallet/rpc/spend.cpp b/src/wallet/rpc/spend.cpp
index cab797bbce..88ee6e96b0 100644
--- a/src/wallet/rpc/spend.cpp
+++ b/src/wallet/rpc/spend.cpp
@@ -956,6 +956,26 @@ RPCHelpMan signrawtransactionwithwallet()
};
}
+// Definition of allowed formats of specifying transaction outputs in
+// `bumpfee`, `psbtbumpfee`, `send` and `walletcreatefundedpsbt` RPCs.
+static std::vector<RPCArg> OutputsDoc()
+{
+ return
+ {
+ {"", RPCArg::Type::OBJ_USER_KEYS, RPCArg::Optional::OMITTED, "",
+ {
+ {"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "A key-value pair. The key (string) is the bitcoin address,\n"
+ "the value (float or string) is the amount in " + CURRENCY_UNIT + ""},
+ },
+ },
+ {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
+ {
+ {"data", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "A key-value pair. The key must be \"data\", the value is hex-encoded data"},
+ },
+ },
+ };
+}
+
static RPCHelpMan bumpfee_helper(std::string method_name)
{
const bool want_psbt = method_name == "psbtbumpfee";
@@ -992,7 +1012,12 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
"still be replaceable in practice, for example if it has unconfirmed ancestors which\n"
"are replaceable).\n"},
{"estimate_mode", RPCArg::Type::STR, RPCArg::Default{"unset"}, "The fee estimate mode, must be one of (case insensitive):\n"
- "\"" + FeeModes("\"\n\"") + "\""},
+ "\"" + FeeModes("\"\n\"") + "\""},
+ {"outputs", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "New outputs (key-value pairs) which will replace\n"
+ "the original ones, if provided. Each address can only appear once and there can\n"
+ "only be one \"data\" object.\n",
+ OutputsDoc(),
+ RPCArgOptions{.skip_type_check = true}},
},
RPCArgOptions{.oneline_description="options"}},
},
@@ -1029,6 +1054,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
coin_control.fAllowWatchOnly = pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
// optional parameters
coin_control.m_signal_bip125_rbf = true;
+ std::vector<CTxOut> outputs;
if (!request.params[1].isNull()) {
UniValue options = request.params[1];
@@ -1039,6 +1065,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
{"fee_rate", UniValueType()}, // will be checked by AmountFromValue() in SetFeeEstimateMode()
{"replaceable", UniValueType(UniValue::VBOOL)},
{"estimate_mode", UniValueType(UniValue::VSTR)},
+ {"outputs", UniValueType()}, // will be checked by AddOutputs()
},
true, true);
@@ -1052,6 +1079,16 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
coin_control.m_signal_bip125_rbf = options["replaceable"].get_bool();
}
SetFeeEstimateMode(*pwallet, coin_control, conf_target, options["estimate_mode"], options["fee_rate"], /*override_min_fee=*/false);
+
+ // Prepare new outputs by creating a temporary tx and calling AddOutputs().
+ if (!options["outputs"].isNull()) {
+ if (options["outputs"].isArray() && options["outputs"].empty()) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid parameter, output argument cannot be an empty array");
+ }
+ CMutableTransaction tempTx;
+ AddOutputs(tempTx, options["outputs"]);
+ outputs = tempTx.vout;
+ }
}
// Make sure the results are valid at least up to the most recent block
@@ -1069,7 +1106,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name)
CMutableTransaction mtx;
feebumper::Result res;
// Targeting feerate bump.
- res = feebumper::CreateRateBumpTransaction(*pwallet, hash, coin_control, errors, old_fee, new_fee, mtx, /*require_mine=*/ !want_psbt);
+ res = feebumper::CreateRateBumpTransaction(*pwallet, hash, coin_control, errors, old_fee, new_fee, mtx, /*require_mine=*/ !want_psbt, outputs);
if (res != feebumper::Result::OK) {
switch(res) {
case feebumper::Result::INVALID_ADDRESS_OR_KEY:
@@ -1144,18 +1181,7 @@ RPCHelpMan send()
{"outputs", RPCArg::Type::ARR, RPCArg::Optional::NO, "The outputs (key-value pairs), where none of the keys are duplicated.\n"
"That is, each address can only appear once and there can only be one 'data' object.\n"
"For convenience, a dictionary, which holds the key-value pairs directly, is also accepted.",
- {
- {"", RPCArg::Type::OBJ_USER_KEYS, RPCArg::Optional::OMITTED, "",
- {
- {"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "A key-value pair. The key (string) is the bitcoin address, the value (float or string) is the amount in " + CURRENCY_UNIT + ""},
- },
- },
- {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
- {
- {"data", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "A key-value pair. The key must be \"data\", the value is hex-encoded data"},
- },
- },
- },
+ OutputsDoc(),
RPCArgOptions{.skip_type_check = true}},
{"conf_target", RPCArg::Type::NUM, RPCArg::DefaultHint{"wallet -txconfirmtarget"}, "Confirmation target in blocks"},
{"estimate_mode", RPCArg::Type::STR, RPCArg::Default{"unset"}, "The fee estimate mode, must be one of (case insensitive):\n"
@@ -1606,19 +1632,8 @@ RPCHelpMan walletcreatefundedpsbt()
"That is, each address can only appear once and there can only be one 'data' object.\n"
"For compatibility reasons, a dictionary, which holds the key-value pairs directly, is also\n"
"accepted as second parameter.",
- {
- {"", RPCArg::Type::OBJ_USER_KEYS, RPCArg::Optional::OMITTED, "",
- {
- {"address", RPCArg::Type::AMOUNT, RPCArg::Optional::NO, "A key-value pair. The key (string) is the bitcoin address, the value (float or string) is the amount in " + CURRENCY_UNIT + ""},
- },
- },
- {"", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
- {
- {"data", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "A key-value pair. The key must be \"data\", the value is hex-encoded data"},
- },
- },
- },
- RPCArgOptions{.skip_type_check = true}},
+ OutputsDoc(),
+ RPCArgOptions{.skip_type_check = true}},
{"locktime", RPCArg::Type::NUM, RPCArg::Default{0}, "Raw locktime. Non-0 value also locktime-activates inputs"},
{"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED, "",
Cat<std::vector<RPCArg>>(
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index e66ff8c97c..0ef56ab8ed 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -306,9 +306,7 @@ CoinsResult AvailableCoins(const CWallet& wallet,
std::unique_ptr<SigningProvider> provider = wallet.GetSolvingProvider(output.scriptPubKey);
int input_bytes = CalculateMaximumSignedInputSize(output, COutPoint(), provider.get(), coinControl);
- // Because CalculateMaximumSignedInputSize just uses ProduceSignature and makes a dummy signature,
- // it is safe to assume that this input is solvable if input_bytes is greater -1.
- bool solvable = input_bytes > -1;
+ bool solvable = provider ? InferDescriptor(output.scriptPubKey, *provider)->IsSolvable() : false;
bool spendable = ((mine & ISMINE_SPENDABLE) != ISMINE_NO) || (((mine & ISMINE_WATCH_ONLY) != ISMINE_NO) && (coinControl && coinControl->fAllowWatchOnly && solvable));
// Filter by spendable outputs only
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index 79c3d235ef..1c731b95e5 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -232,17 +232,6 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
BOOST_CHECK_EQUAL(result5->GetSelectedValue(), 10 * CENT);
expected_result.Clear();
- // Negative effective value
- // Select 10 Cent but have 1 Cent not be possible because too small
- add_coin(5 * CENT, 5, expected_result);
- add_coin(3 * CENT, 3, expected_result);
- add_coin(2 * CENT, 2, expected_result);
- const auto result6 = SelectCoinsBnB(GroupCoins(utxo_pool), 10 * CENT, 5000);
- BOOST_CHECK(result6);
- BOOST_CHECK_EQUAL(result6->GetSelectedValue(), 10 * CENT);
- // FIXME: this test is redundant with the above, because 1 Cent is selected, not "too small"
- // BOOST_CHECK(EquivalentResult(expected_result, *result));
-
// Select 0.25 Cent, not possible
BOOST_CHECK(!SelectCoinsBnB(GroupCoins(utxo_pool), 0.25 * CENT, 0.5 * CENT));
expected_result.Clear();
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index b6d4f0ac37..eed40f9462 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -26,6 +26,7 @@
#include <script/descriptor.h>
#include <script/script.h>
#include <script/signingprovider.h>
+#include <support/cleanse.h>
#include <txmempool.h>
#include <util/bip32.h>
#include <util/check.h>
@@ -3412,7 +3413,10 @@ bool CWallet::Lock()
{
LOCK(cs_wallet);
- vMasterKey.clear();
+ if (!vMasterKey.empty()) {
+ memory_cleanse(vMasterKey.data(), vMasterKey.size() * sizeof(decltype(vMasterKey)::value_type));
+ vMasterKey.clear();
+ }
}
NotifyStatusChanged(this);
@@ -3876,10 +3880,7 @@ std::optional<MigrationData> CWallet::GetDescriptorsForLegacy(bilingual_str& err
AssertLockHeld(cs_wallet);
LegacyScriptPubKeyMan* legacy_spkm = GetLegacyScriptPubKeyMan();
- if (!legacy_spkm) {
- error = _("Error: This wallet is already a descriptor wallet");
- return std::nullopt;
- }
+ assert(legacy_spkm);
std::optional<MigrationData> res = legacy_spkm->MigrateToDescriptor();
if (res == std::nullopt) {
@@ -4175,6 +4176,11 @@ bool DoMigration(CWallet& wallet, WalletContext& context, bilingual_str& error,
util::Result<MigrationResult> MigrateLegacyToDescriptor(std::shared_ptr<CWallet>&& wallet, WalletContext& context)
{
+ // Before anything else, check if there is something to migrate.
+ if (!wallet->GetLegacyScriptPubKeyMan()) {
+ return util::Error{_("Error: This wallet is already a descriptor wallet")};
+ }
+
MigrationResult res;
bilingual_str error;
std::vector<bilingual_str> warnings;
diff --git a/test/functional/data/rpc_decodescript.json b/test/functional/data/rpc_decodescript.json
index 4a15ae8792..5f3e725d4c 100644
--- a/test/functional/data/rpc_decodescript.json
+++ b/test/functional/data/rpc_decodescript.json
@@ -69,7 +69,7 @@
"p2sh": "2N34iiGoUUkVSPiaaTFpJjB1FR9TXQu3PGM",
"segwit": {
"asm": "0 96c2368fc30514a438a8bd909f93c49a1549d77198ccbdb792043b666cb24f42",
- "desc": "addr(bcrt1qjmprdr7rq522gw9ghkgfly7yng25n4m3nrxtmdujqsakvm9jfapqk795l5)#5akkdska",
+ "desc": "wsh(raw(02eeee))#gtay4y0z",
"hex": "002096c2368fc30514a438a8bd909f93c49a1549d77198ccbdb792043b666cb24f42",
"address": "bcrt1qjmprdr7rq522gw9ghkgfly7yng25n4m3nrxtmdujqsakvm9jfapqk795l5",
"type": "witness_v0_scripthash",
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 22b1918b85..1080e77c40 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -161,7 +161,7 @@ class FullBlockTest(BitcoinTestFramework):
self.log.info(f"Reject block with invalid tx: {TxTemplate.__name__}")
blockname = f"for_invalid.{TxTemplate.__name__}"
- badblock = self.next_block(blockname)
+ self.next_block(blockname)
badtx = template.get_tx()
if TxTemplate != invalid_txs.InputMissing:
self.sign_tx(badtx, attempt_spend_tx)
@@ -473,7 +473,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Check P2SH SIGOPS are correctly counted")
self.move_tip(35)
- b39 = self.next_block(39)
+ self.next_block(39)
b39_outputs = 0
b39_sigops_per_output = 6
@@ -672,7 +672,7 @@ class FullBlockTest(BitcoinTestFramework):
self.log.info("Reject a block with two coinbase transactions")
self.move_tip(44)
- b51 = self.next_block(51)
+ self.next_block(51)
cb2 = create_coinbase(51, self.coinbase_pubkey)
b51 = self.update_block(51, [cb2])
self.send_blocks([b51], success=False, reject_reason='bad-cb-multiple', reconnect=True)
@@ -752,7 +752,7 @@ class FullBlockTest(BitcoinTestFramework):
# b57 - a good block with 2 txs, don't submit until end
self.move_tip(55)
- b57 = self.next_block(57)
+ self.next_block(57)
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
b57 = self.update_block(57, [tx, tx1])
@@ -769,7 +769,7 @@ class FullBlockTest(BitcoinTestFramework):
# b57p2 - a good block with 6 tx'es, don't submit until end
self.move_tip(55)
- b57p2 = self.next_block("57p2")
+ self.next_block("57p2")
tx = self.create_and_sign_transaction(out[16], 1)
tx1 = self.create_tx(tx, 0, 1)
tx2 = self.create_tx(tx1, 0, 1)
@@ -803,7 +803,7 @@ class FullBlockTest(BitcoinTestFramework):
# tx with prevout.n out of range
self.log.info("Reject a block with a transaction with prevout.n out of range")
self.move_tip(57)
- b58 = self.next_block(58, spend=out[17])
+ self.next_block(58, spend=out[17])
tx = CTransaction()
assert len(out[17].vout) < 42
tx.vin.append(CTxIn(COutPoint(out[17].sha256, 42), CScript([OP_TRUE]), SEQUENCE_FINAL))
@@ -815,7 +815,7 @@ class FullBlockTest(BitcoinTestFramework):
# tx with output value > input value
self.log.info("Reject a block with a transaction with outputs > inputs")
self.move_tip(57)
- b59 = self.next_block(59)
+ self.next_block(59)
tx = self.create_and_sign_transaction(out[17], 51 * COIN)
b59 = self.update_block(59, [tx])
self.send_blocks([b59], success=False, reject_reason='bad-txns-in-belowout', reconnect=True)
@@ -851,7 +851,7 @@ class FullBlockTest(BitcoinTestFramework):
# \-> b_spend_dup_cb (b_dup_cb) -> b_dup_2 ()
#
self.move_tip(57)
- b_spend_dup_cb = self.next_block('spend_dup_cb')
+ self.next_block('spend_dup_cb')
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(duplicate_tx.sha256, 0)))
tx.vout.append(CTxOut(0, CScript([OP_TRUE])))
@@ -876,7 +876,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Reject a block with a transaction with a nonfinal locktime")
self.move_tip('dup_2')
- b62 = self.next_block(62)
+ self.next_block(62)
tx = CTransaction()
tx.nLockTime = 0xffffffff # this locktime is non-final
tx.vin.append(CTxIn(COutPoint(out[18].sha256, 0))) # don't set nSequence
@@ -957,7 +957,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Accept a block with a transaction spending an output created in the same block")
self.move_tip(64)
- b65 = self.next_block(65)
+ self.next_block(65)
tx1 = self.create_and_sign_transaction(out[19], out[19].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 0)
b65 = self.update_block(65, [tx1, tx2])
@@ -970,7 +970,7 @@ class FullBlockTest(BitcoinTestFramework):
# \-> b66 (20)
self.log.info("Reject a block with a transaction spending an output created later in the same block")
self.move_tip(65)
- b66 = self.next_block(66)
+ self.next_block(66)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
b66 = self.update_block(66, [tx2, tx1])
@@ -984,7 +984,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Reject a block with a transaction double spending a transaction created in the same block")
self.move_tip(65)
- b67 = self.next_block(67)
+ self.next_block(67)
tx1 = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue)
tx2 = self.create_and_sign_transaction(tx1, 1)
tx3 = self.create_and_sign_transaction(tx1, 2)
@@ -1005,7 +1005,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Reject a block trying to claim too much subsidy in the coinbase transaction")
self.move_tip(65)
- b68 = self.next_block(68, additional_coinbase_value=10)
+ self.next_block(68, additional_coinbase_value=10)
tx = self.create_and_sign_transaction(out[20], out[20].vout[0].nValue - 9)
b68 = self.update_block(68, [tx])
self.send_blocks([b68], success=False, reject_reason='bad-cb-amount', reconnect=True)
@@ -1025,7 +1025,7 @@ class FullBlockTest(BitcoinTestFramework):
#
self.log.info("Reject a block containing a transaction spending from a non-existent input")
self.move_tip(69)
- b70 = self.next_block(70, spend=out[21])
+ self.next_block(70, spend=out[21])
bogus_tx = CTransaction()
bogus_tx.sha256 = uint256_from_str(b"23c70ed7c0506e9178fc1a987f40a33946d4ad4c962b5ae3a52546da53af0c5c")
tx = CTransaction()
@@ -1043,7 +1043,7 @@ class FullBlockTest(BitcoinTestFramework):
# b71 is a copy of 72, but re-adds one of its transactions. However, it has the same hash as b72.
self.log.info("Reject a block containing a duplicate transaction but with the same Merkle root (Merkle tree malleability")
self.move_tip(69)
- b72 = self.next_block(72)
+ self.next_block(72)
tx1 = self.create_and_sign_transaction(out[21], 2)
tx2 = self.create_and_sign_transaction(tx1, 1)
b72 = self.update_block(72, [tx1, tx2]) # now tip is 72
@@ -1081,7 +1081,7 @@ class FullBlockTest(BitcoinTestFramework):
# bytearray[20,526] : OP_CHECKSIG (this puts us over the limit)
self.log.info("Reject a block containing too many sigops after a large script element")
self.move_tip(72)
- b73 = self.next_block(73)
+ self.next_block(73)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5 + 1
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = int("4e", 16) # OP_PUSHDATA4
@@ -1109,7 +1109,7 @@ class FullBlockTest(BitcoinTestFramework):
# b75 succeeds because we put MAX_BLOCK_SIGOPS before the element
self.log.info("Check sigops are counted correctly after an invalid script element")
self.move_tip(72)
- b74 = self.next_block(74)
+ self.next_block(74)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42 # total = 20,561
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS] = 0x4e
@@ -1122,7 +1122,7 @@ class FullBlockTest(BitcoinTestFramework):
self.send_blocks([b74], success=False, reject_reason='bad-blk-sigops', reconnect=True)
self.move_tip(72)
- b75 = self.next_block(75)
+ self.next_block(75)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 42
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e
@@ -1137,7 +1137,7 @@ class FullBlockTest(BitcoinTestFramework):
# Check that if we push an element filled with CHECKSIGs, they are not counted
self.move_tip(75)
- b76 = self.next_block(76)
+ self.next_block(76)
size = MAX_BLOCK_SIGOPS - 1 + MAX_SCRIPT_ELEMENT_SIZE + 1 + 5
a = bytearray([OP_CHECKSIG] * size)
a[MAX_BLOCK_SIGOPS - 1] = 0x4e # PUSHDATA4, but leave the following bytes as just checksigs
@@ -1165,18 +1165,18 @@ class FullBlockTest(BitcoinTestFramework):
# updated. (Perhaps to spend to a P2SH OP_TRUE script)
self.log.info("Test transaction resurrection during a re-org")
self.move_tip(76)
- b77 = self.next_block(77)
+ self.next_block(77)
tx77 = self.create_and_sign_transaction(out[24], 10 * COIN)
b77 = self.update_block(77, [tx77])
self.send_blocks([b77], True)
self.save_spendable_output()
- b78 = self.next_block(78)
+ self.next_block(78)
tx78 = self.create_tx(tx77, 0, 9 * COIN)
b78 = self.update_block(78, [tx78])
self.send_blocks([b78], True)
- b79 = self.next_block(79)
+ self.next_block(79)
tx79 = self.create_tx(tx78, 0, 8 * COIN)
b79 = self.update_block(79, [tx79])
self.send_blocks([b79], True)
@@ -1208,7 +1208,7 @@ class FullBlockTest(BitcoinTestFramework):
# -> b81 (26) -> b82 (27) -> b83 (28)
#
self.log.info("Accept a block with invalid opcodes in dead execution paths")
- b83 = self.next_block(83)
+ self.next_block(83)
op_codes = [OP_IF, OP_INVALIDOPCODE, OP_ELSE, OP_TRUE, OP_ENDIF]
script = CScript(op_codes)
tx1 = self.create_and_sign_transaction(out[28], out[28].vout[0].nValue, script)
@@ -1227,7 +1227,7 @@ class FullBlockTest(BitcoinTestFramework):
# \-> b85 (29) -> b86 (30) \-> b89a (32)
#
self.log.info("Test re-orging blocks with OP_RETURN in them")
- b84 = self.next_block(84)
+ self.next_block(84)
tx1 = self.create_tx(out[29], 0, 0, CScript([OP_RETURN]))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
tx1.vout.append(CTxOut(0, CScript([OP_TRUE])))
@@ -1265,7 +1265,7 @@ class FullBlockTest(BitcoinTestFramework):
self.save_spendable_output()
# trying to spend the OP_RETURN output is rejected
- b89a = self.next_block("89a", spend=out[32])
+ self.next_block("89a", spend=out[32])
tx = self.create_tx(tx1, 0, 0, CScript([OP_TRUE]))
b89a = self.update_block("89a", [tx])
self.send_blocks([b89a], success=False, reject_reason='bad-txns-inputs-missingorspent', reconnect=True)
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 28a8959e93..c551c0b449 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -164,6 +164,9 @@ class MaxUploadTest(BitcoinTestFramework):
assert_equal(len(peer_info), 1) # node is still connected
assert_equal(peer_info[0]['permissions'], ['download'])
+ self.log.info("Test passing an unparsable value to -maxuploadtarget throws an error")
+ self.stop_node(0)
+ self.nodes[0].assert_start_raises_init_error(extra_args=["-maxuploadtarget=abc"], expected_msg="Error: Unable to parse -maxuploadtarget: 'abc'")
if __name__ == '__main__':
MaxUploadTest().main()
diff --git a/test/functional/feature_posix_fs_permissions.py b/test/functional/feature_posix_fs_permissions.py
new file mode 100755
index 0000000000..c5a543e97a
--- /dev/null
+++ b/test/functional/feature_posix_fs_permissions.py
@@ -0,0 +1,43 @@
+#!/usr/bin/env python3
+# Copyright (c) 2022 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test file system permissions for POSIX platforms.
+"""
+
+import os
+import stat
+
+from test_framework.test_framework import BitcoinTestFramework
+
+
+class PosixFsPermissionsTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.setup_clean_chain = True
+ self.num_nodes = 1
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_platform_not_posix()
+
+ def check_directory_permissions(self, dir):
+ mode = os.lstat(dir).st_mode
+ self.log.info(f"{stat.filemode(mode)} {dir}")
+ assert mode == (stat.S_IFDIR | stat.S_IRUSR | stat.S_IWUSR | stat.S_IXUSR)
+
+ def check_file_permissions(self, file):
+ mode = os.lstat(file).st_mode
+ self.log.info(f"{stat.filemode(mode)} {file}")
+ assert mode == (stat.S_IFREG | stat.S_IRUSR | stat.S_IWUSR)
+
+ def run_test(self):
+ self.stop_node(0)
+ datadir = os.path.join(self.nodes[0].datadir, self.chain)
+ self.check_directory_permissions(datadir)
+ walletsdir = os.path.join(datadir, "wallets")
+ self.check_directory_permissions(walletsdir)
+ debuglog = os.path.join(datadir, "debug.log")
+ self.check_file_permissions(debuglog)
+
+
+if __name__ == '__main__':
+ PosixFsPermissionsTest().main()
diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py
index 68cbb5dbed..8350e9c91e 100755
--- a/test/functional/mempool_updatefromblock.py
+++ b/test/functional/mempool_updatefromblock.py
@@ -7,14 +7,12 @@
Test mempool update of transaction descendants/ancestors information (count, size)
when transactions have been re-added from a disconnected block to the mempool.
"""
+from math import ceil
import time
-from decimal import Decimal
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
-from test_framework.address import key_to_p2pkh
-from test_framework.wallet_util import bytes_to_wif
-from test_framework.key import ECKey
+from test_framework.wallet import MiniWallet
class MempoolUpdateFromBlockTest(BitcoinTestFramework):
@@ -22,15 +20,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
self.num_nodes = 1
self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000', '-limitancestorcount=100']]
- def get_new_address(self):
- key = ECKey()
- key.generate()
- pubkey = key.get_pubkey().get_bytes()
- address = key_to_p2pkh(pubkey)
- self.priv_keys.append(bytes_to_wif(key.get_bytes()))
- return address
-
- def transaction_graph_test(self, size, n_tx_to_mine=None, start_input_txid='', end_address='', fee=Decimal(0.00100000)):
+ def transaction_graph_test(self, size, n_tx_to_mine=None, fee=100_000):
"""Create an acyclic tournament (a type of directed graph) of transactions and use it for testing.
Keyword arguments:
@@ -45,14 +35,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
More details: https://en.wikipedia.org/wiki/Tournament_(graph_theory)
"""
-
- self.priv_keys = [self.nodes[0].get_deterministic_priv_key().key]
- if not start_input_txid:
- start_input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(1))['tx'][0]
-
- if not end_address:
- end_address = self.get_new_address()
-
+ wallet = MiniWallet(self.nodes[0])
first_block_hash = ''
tx_id = []
tx_size = []
@@ -61,41 +44,31 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
self.log.debug('Preparing transaction #{}...'.format(i))
# Prepare inputs.
if i == 0:
- inputs = [{'txid': start_input_txid, 'vout': 0}]
- inputs_value = self.nodes[0].gettxout(start_input_txid, 0)['value']
+ inputs = [wallet.get_utxo()] # let MiniWallet provide a start UTXO
else:
inputs = []
- inputs_value = 0
for j, tx in enumerate(tx_id[0:i]):
# Transaction tx[K] is a child of each of previous transactions tx[0]..tx[K-1] at their output K-1.
vout = i - j - 1
- inputs.append({'txid': tx_id[j], 'vout': vout})
- inputs_value += self.nodes[0].gettxout(tx, vout)['value']
-
- self.log.debug('inputs={}'.format(inputs))
- self.log.debug('inputs_value={}'.format(inputs_value))
+ inputs.append(wallet.get_utxo(txid=tx_id[j], vout=vout))
# Prepare outputs.
tx_count = i + 1
if tx_count < size:
# Transaction tx[K] is an ancestor of each of subsequent transactions tx[K+1]..tx[N-1].
n_outputs = size - tx_count
- output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001'))
- outputs = {}
- for _ in range(n_outputs):
- outputs[self.get_new_address()] = output_value
else:
- output_value = (inputs_value - fee).quantize(Decimal('0.00000001'))
- outputs = {end_address: output_value}
-
- self.log.debug('output_value={}'.format(output_value))
- self.log.debug('outputs={}'.format(outputs))
+ n_outputs = 1
# Create a new transaction.
- unsigned_raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
- signed_raw_tx = self.nodes[0].signrawtransactionwithkey(unsigned_raw_tx, self.priv_keys)
- tx_id.append(self.nodes[0].sendrawtransaction(signed_raw_tx['hex']))
- tx_size.append(self.nodes[0].getmempoolentry(tx_id[-1])['vsize'])
+ new_tx = wallet.send_self_transfer_multi(
+ from_node=self.nodes[0],
+ utxos_to_spend=inputs,
+ num_outputs=n_outputs,
+ fee_per_output=ceil(fee / n_outputs)
+ )
+ tx_id.append(new_tx['txid'])
+ tx_size.append(new_tx['tx'].get_vsize())
if tx_count in n_tx_to_mine:
# The created transactions are mined into blocks by batches.
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index 3109ad2b56..ea4999a965 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -13,11 +13,12 @@ from test_framework.messages import (
MAX_HEADERS_RESULTS,
MAX_INV_SIZE,
MAX_PROTOCOL_MESSAGE_LENGTH,
+ MSG_TX,
+ from_hex,
msg_getdata,
msg_headers,
msg_inv,
msg_ping,
- MSG_TX,
msg_version,
ser_string,
)
@@ -73,6 +74,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
self.test_oversized_inv_msg()
self.test_oversized_getdata_msg()
self.test_oversized_headers_msg()
+ self.test_invalid_pow_headers_msg()
self.test_resource_exhaustion()
def test_buffer(self):
@@ -248,6 +250,36 @@ class InvalidMessagesTest(BitcoinTestFramework):
size = MAX_HEADERS_RESULTS + 1
self.test_oversized_msg(msg_headers([CBlockHeader()] * size), size)
+ def test_invalid_pow_headers_msg(self):
+ self.log.info("Test headers message with invalid proof-of-work is logged as misbehaving and disconnects peer")
+ blockheader_tip_hash = self.nodes[0].getbestblockhash()
+ blockheader_tip = from_hex(CBlockHeader(), self.nodes[0].getblockheader(blockheader_tip_hash, False))
+
+ # send valid headers message first
+ assert_equal(self.nodes[0].getblockchaininfo()['headers'], 0)
+ blockheader = CBlockHeader()
+ blockheader.hashPrevBlock = int(blockheader_tip_hash, 16)
+ blockheader.nTime = int(time.time())
+ blockheader.nBits = blockheader_tip.nBits
+ blockheader.rehash()
+ while not blockheader.hash.startswith('0'):
+ blockheader.nNonce += 1
+ blockheader.rehash()
+ peer = self.nodes[0].add_p2p_connection(P2PInterface())
+ peer.send_and_ping(msg_headers([blockheader]))
+ assert_equal(self.nodes[0].getblockchaininfo()['headers'], 1)
+ chaintips = self.nodes[0].getchaintips()
+ assert_equal(chaintips[0]['status'], 'headers-only')
+ assert_equal(chaintips[0]['hash'], blockheader.hash)
+
+ # invalidate PoW
+ while not blockheader.hash.startswith('f'):
+ blockheader.nNonce += 1
+ blockheader.rehash()
+ with self.nodes[0].assert_debug_log(['Misbehaving', 'header with invalid proof of work']):
+ peer.send_message(msg_headers([blockheader]))
+ peer.wait_for_disconnect()
+
def test_resource_exhaustion(self):
self.log.info("Test node stays up despite many large junk messages")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py
index a61710b739..8df3b68785 100755
--- a/test/functional/rpc_decodescript.py
+++ b/test/functional/rpc_decodescript.py
@@ -263,6 +263,16 @@ class DecodeScriptTest(BitcoinTestFramework):
rpc_result = self.nodes[0].decodescript(script)
assert_equal(result, rpc_result)
+ def decodescript_miniscript(self):
+ """Check that a Miniscript is decoded when possible under P2WSH context."""
+ # Sourced from https://github.com/bitcoin/bitcoin/pull/27037#issuecomment-1416151907.
+ # Miniscript-compatible offered HTLC
+ res = self.nodes[0].decodescript("82012088a914ffffffffffffffffffffffffffffffffffffffff88210250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0ad51b2")
+ assert res["segwit"]["desc"] == "wsh(and_v(and_v(v:hash160(ffffffffffffffffffffffffffffffffffffffff),v:pk(0250929b74c1a04954b78b4b6035e97a5e078a5a0f28ec96d547bfee9ace803ac0)),older(1)))#gm8xz4fl"
+ # Miniscript-incompatible offered HTLC
+ res = self.nodes[0].decodescript("82012088a914ffffffffffffffffffffffffffffffffffffffff882102ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffacb2")
+ assert res["segwit"]["desc"] == "wsh(raw(82012088a914ffffffffffffffffffffffffffffffffffffffff882102ffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffacb2))#ra6w2xa7"
+
def run_test(self):
self.log.info("Test decoding of standard input scripts [scriptSig]")
self.decodescript_script_sig()
@@ -272,6 +282,8 @@ class DecodeScriptTest(BitcoinTestFramework):
self.decoderawtransaction_asm_sighashtype()
self.log.info("Data-driven tests")
self.decodescript_datadriven_tests()
+ self.log.info("Miniscript descriptor decoding")
+ self.decodescript_miniscript()
if __name__ == '__main__':
DecodeScriptTest().main()
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 6ff4e4ee54..513b795478 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -880,6 +880,11 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
if platform.system() != "Linux":
raise SkipTest("not on a Linux system")
+ def skip_if_platform_not_posix(self):
+ """Skip the running test if we are not on a POSIX platform"""
+ if os.name != 'posix':
+ raise SkipTest("not on a POSIX system")
+
def skip_if_no_bitcoind_zmq(self):
"""Skip the running test if bitcoind has not been compiled with zmq support."""
if not self.is_zmq_compiled():
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index a301cf9184..0fe28b3855 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -211,6 +211,7 @@ BASE_SCRIPTS = [
'p2p_addrv2_relay.py',
'p2p_compactblocks_hb.py',
'p2p_disconnect_ban.py',
+ 'feature_posix_fs_permissions.py',
'rpc_decodescript.py',
'rpc_blockchain.py',
'rpc_deprecated.py',
diff --git a/test/functional/wallet_backwards_compatibility.py b/test/functional/wallet_backwards_compatibility.py
index f55a3758ce..76aac3e486 100755
--- a/test/functional/wallet_backwards_compatibility.py
+++ b/test/functional/wallet_backwards_compatibility.py
@@ -33,11 +33,12 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 10
+ self.num_nodes = 11
# Add new version after each release:
self.extra_args = [
["-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # Pre-release: use to mine blocks. noban for immediate tx relay
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # Pre-release: use to receive coins, swap wallets, etc
+ ["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v24.0.1
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v23.0
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v22.0
["-nowallet", "-walletrbf=1", "-addresstype=bech32", "-whitelist=noban@127.0.0.1"], # v0.21.0
@@ -57,6 +58,7 @@ class BackwardsCompatibilityTest(BitcoinTestFramework):
self.add_nodes(self.num_nodes, extra_args=self.extra_args, versions=[
None,
None,
+ 240001,
230000,
220000,
210000,
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index a2ae997ecb..ad79e0288c 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -81,7 +81,7 @@ class BumpFeeTest(BitcoinTestFramework):
self.log.info("Running tests")
dest_address = peer_node.getnewaddress()
- for mode in ["default", "fee_rate"]:
+ for mode in ["default", "fee_rate", "new_outputs"]:
test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address)
self.test_invalid_parameters(rbf_node, peer_node, dest_address)
test_segwit_bumpfee_succeeds(self, rbf_node, dest_address)
@@ -157,6 +157,14 @@ class BumpFeeTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, 'Invalid estimate_mode parameter, must be one of: "unset", "economical", "conservative"',
rbf_node.bumpfee, rbfid, {"estimate_mode": mode})
+ self.log.info("Test invalid outputs values")
+ assert_raises_rpc_error(-8, "Invalid parameter, output argument cannot be an empty array",
+ rbf_node.bumpfee, rbfid, {"outputs": []})
+ assert_raises_rpc_error(-8, "Invalid parameter, duplicated address: " + dest_address,
+ rbf_node.bumpfee, rbfid, {"outputs": [{dest_address: 0.1}, {dest_address: 0.2}]})
+ assert_raises_rpc_error(-8, "Invalid parameter, duplicate key: data",
+ rbf_node.bumpfee, rbfid, {"outputs": [{"data": "deadbeef"}, {"data": "deadbeef"}]})
+
self.clear_mempool()
@@ -169,6 +177,10 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
if mode == "fee_rate":
bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": str(NORMAL)})
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
+ elif mode == "new_outputs":
+ new_address = peer_node.getnewaddress()
+ bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"outputs": {new_address: 0.0003}})
+ bumped_tx = rbf_node.bumpfee(rbfid, {"outputs": {new_address: 0.0003}})
else:
bumped_psbt = rbf_node.psbtbumpfee(rbfid)
bumped_tx = rbf_node.bumpfee(rbfid)
@@ -192,6 +204,10 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
bumpedwtx = rbf_node.gettransaction(bumped_tx["txid"])
assert_equal(oldwtx["replaced_by_txid"], bumped_tx["txid"])
assert_equal(bumpedwtx["replaces_txid"], rbfid)
+ # if this is a new_outputs test, check that outputs were indeed replaced
+ if mode == "new_outputs":
+ assert len(bumpedwtx["details"]) == 1
+ assert bumpedwtx["details"][0]["address"] == new_address
self.clear_mempool()
@@ -628,12 +644,14 @@ def test_change_script_match(self, rbf_node, dest_address):
self.clear_mempool()
-def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")):
+def spend_one_input(node, dest_address, change_size=Decimal("0.00049000"), data=None):
tx_input = dict(
sequence=MAX_BIP125_RBF_SEQUENCE, **next(u for u in node.listunspent() if u["amount"] == Decimal("0.00100000")))
destinations = {dest_address: Decimal("0.00050000")}
if change_size > 0:
destinations[node.getrawchangeaddress()] = change_size
+ if data:
+ destinations['data'] = data
rawtx = node.createrawtransaction([tx_input], destinations)
signedtx = node.signrawtransactionwithwallet(rawtx)
txid = node.sendrawtransaction(signedtx["hex"])
diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py
index 83c1826a41..bdb9081261 100755
--- a/test/functional/wallet_groups.py
+++ b/test/functional/wallet_groups.py
@@ -41,6 +41,11 @@ class WalletGroupTest(BitcoinTestFramework):
def run_test(self):
self.log.info("Setting up")
+ # To take full use of immediate tx relay, all nodes need to be reachable
+ # via inbound peers, i.e. connect first to last to close the circle
+ # (the default test network topology looks like this:
+ # node0 <-- node1 <-- node2 <-- node3 <-- node4 <-- node5)
+ self.connect_nodes(0, self.num_nodes - 1)
# Mine some coins
self.generate(self.nodes[0], COINBASE_MATURITY + 1)
diff --git a/test/functional/wallet_migration.py b/test/functional/wallet_migration.py
index 688ac98617..72c5fe7b84 100755
--- a/test/functional/wallet_migration.py
+++ b/test/functional/wallet_migration.py
@@ -163,6 +163,10 @@ class WalletMigrationTest(BitcoinTestFramework):
assert_equal(basic2.getbalance(), basic2_balance)
self.assert_list_txs_equal(basic2.listtransactions(), basic2_txs)
+ # Now test migration on a descriptor wallet
+ self.log.info("Test \"nothing to migrate\" when the user tries to migrate a wallet with no legacy data")
+ assert_raises_rpc_error(-4, "Error: This wallet is already a descriptor wallet", basic2.migratewallet)
+
def test_multisig(self):
default = self.nodes[0].get_wallet_rpc(self.default_wallet_name)
diff --git a/test/functional/wallet_miniscript.py b/test/functional/wallet_miniscript.py
index cefcaf4dc7..7bc3424bf4 100755
--- a/test/functional/wallet_miniscript.py
+++ b/test/functional/wallet_miniscript.py
@@ -5,19 +5,137 @@
"""Test Miniscript descriptors integration in the wallet."""
from test_framework.descriptors import descsum_create
+from test_framework.psbt import PSBT, PSBT_IN_SHA256
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
+TPRVS = [
+ "tprv8ZgxMBicQKsPerQj6m35no46amfKQdjY7AhLnmatHYXs8S4MTgeZYkWAn4edSGwwL3vkSiiGqSZQrmy5D3P5gBoqgvYP2fCUpBwbKTMTAkL",
+ "tprv8ZgxMBicQKsPd3cbrKjE5GKKJLDEidhtzSSmPVtSPyoHQGL2LZw49yt9foZsN9BeiC5VqRaESUSDV2PS9w7zAVBSK6EQH3CZW9sMKxSKDwD",
+ "tprv8iF7W37EHnVEtDr9EFeyFjQJFL6SfGby2AnZ2vQARxTQHQXy9tdzZvBBVp8a19e5vXhskczLkJ1AZjqgScqWL4FpmXVp8LLjiorcrFK63Sr",
+]
+TPUBS = [
+ "tpubD6NzVbkrYhZ4YPAbyf6urxqqnmJF79PzQtyERAmvkSVS9fweCTjxjDh22Z5St9fGb1a5DUCv8G27nYupKP1Ctr1pkamJossoetzws1moNRn",
+ "tpubD6NzVbkrYhZ4YMQC15JS7QcrsAyfGrGiykweqMmPxTkEVScu7vCZLNpPXW1XphHwzsgmqdHWDQAfucbM72EEB1ZEyfgZxYvkZjYVXx1xS9p",
+ "tpubD6NzVbkrYhZ4YU9vM1s53UhD75UyJatx8EMzMZ3VUjR2FciNfLLkAw6a4pWACChzobTseNqdWk4G7ZdBqRDLtLSACKykTScmqibb1ZrCvJu",
+ "tpubD6NzVbkrYhZ4XRMcMFMMFvzVt6jaDAtjZhD7JLwdPdMm9xa76DnxYYP7w9TZGJDVFkek3ArwVsuacheqqPog8TH5iBCX1wuig8PLXim4n9a",
+ "tpubD6NzVbkrYhZ4WsqRzDmkL82SWcu42JzUvKWzrJHQ8EC2vEHRHkXj1De93sD3biLrKd8XGnamXURGjMbYavbszVDXpjXV2cGUERucLJkE6cy",
+ "tpubDEFLeBkKTm8aiYkySz8hXAXPVnPSfxMi7Fxhg9sejUrkwJuRWvPdLEiXjTDbhGbjLKCZUDUUibLxTnK5UP1q7qYrSnPqnNe7M8mvAW1STcc",
+]
+PUBKEYS = [
+ "02aebf2d10b040eb936a6f02f44ee82f8b34f5c1ccb20ff3949c2b28206b7c1068",
+ "030f64b922aee2fd597f104bc6cb3b670f1ca2c6c49b1071a1a6c010575d94fe5a",
+ "02abe475b199ec3d62fa576faee16a334fdb86ffb26dce75becebaaedf328ac3fe",
+ "0314f3dc33595b0d016bb522f6fe3a67680723d842c1b9b8ae6b59fdd8ab5cccb4",
+ "025eba3305bd3c829e4e1551aac7358e4178832c739e4fc4729effe428de0398ab",
+ "029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0",
+ "0211c7b2e18b6fd330f322de087da62da92ae2ae3d0b7cec7e616479cce175f183",
+]
+
MINISCRIPTS = [
# One of two keys
- "or_b(pk(tpubD6NzVbkrYhZ4XRMcMFMMFvzVt6jaDAtjZhD7JLwdPdMm9xa76DnxYYP7w9TZGJDVFkek3ArwVsuacheqqPog8TH5iBCX1wuig8PLXim4n9a/*),s:pk(tpubD6NzVbkrYhZ4WsqRzDmkL82SWcu42JzUvKWzrJHQ8EC2vEHRHkXj1De93sD3biLrKd8XGnamXURGjMbYavbszVDXpjXV2cGUERucLJkE6cy/*))",
+ f"or_b(pk({TPUBS[0]}/*),s:pk({TPUBS[1]}/*))",
# A script similar (same spending policy) to BOLT3's offered HTLC (with anchor outputs)
- "or_d(pk(tpubD6NzVbkrYhZ4XRMcMFMMFvzVt6jaDAtjZhD7JLwdPdMm9xa76DnxYYP7w9TZGJDVFkek3ArwVsuacheqqPog8TH5iBCX1wuig8PLXim4n9a/*),and_v(and_v(v:pk(tpubD6NzVbkrYhZ4WsqRzDmkL82SWcu42JzUvKWzrJHQ8EC2vEHRHkXj1De93sD3biLrKd8XGnamXURGjMbYavbszVDXpjXV2cGUERucLJkE6cy/*),or_c(pk(tpubD6NzVbkrYhZ4YNwtTWrKRJQzQX3PjPKeUQg1gYh1hiLMkk1cw8SRLgB1yb7JzE8bHKNt6EcZXkJ6AqpCZL1aaRSjnG36mLgbQvJZBNsjWnG/*),v:hash160(7f999c905d5e35cefd0a37673f746eb13fba3640))),older(1)))",
+ f"or_d(pk({TPUBS[0]}/*),and_v(and_v(v:pk({TPUBS[1]}/*),or_c(pk({TPUBS[2]}/*),v:hash160(7f999c905d5e35cefd0a37673f746eb13fba3640))),older(1)))",
# A Revault Unvault policy with the older() replaced by an after()
- "andor(multi(2,tpubD6NzVbkrYhZ4YMQC15JS7QcrsAyfGrGiykweqMmPxTkEVScu7vCZLNpPXW1XphHwzsgmqdHWDQAfucbM72EEB1ZEyfgZxYvkZjYVXx1xS9p/*,tpubD6NzVbkrYhZ4WkCyc7E3z6g6NkypHMiecnwc4DpWHTPqFdteRGkEKukdrSSyJGNnGrHNMfy4BCw2UXo5soYRCtCDDfy4q8pc8oyB7RgTFv8/*),and_v(v:multi(4,030f64b922aee2fd597f104bc6cb3b670f1ca2c6c49b1071a1a6c010575d94fe5a,02abe475b199ec3d62fa576faee16a334fdb86ffb26dce75becebaaedf328ac3fe,0314f3dc33595b0d016bb522f6fe3a67680723d842c1b9b8ae6b59fdd8ab5cccb4,025eba3305bd3c829e4e1551aac7358e4178832c739e4fc4729effe428de0398ab),after(424242)),thresh(4,pkh(tpubD6NzVbkrYhZ4YVrNggiT2ptVHwnFbLBqDkCtV5HkxR4WtcRLAQReKTkqZGNcV6GE7cQsmpBzzSzhk16DUwB1gn1L7ZPnJF2dnNePP1uMBCY/*),a:pkh(tpubD6NzVbkrYhZ4YU9vM1s53UhD75UyJatx8EMzMZ3VUjR2FciNfLLkAw6a4pWACChzobTseNqdWk4G7ZdBqRDLtLSACKykTScmqibb1ZrCvJu/*),a:pkh(tpubD6NzVbkrYhZ4YUHcFfuH9iEBLiH8CBRJTpS7X3qjHmh82m1KCNbzs6w9gyK8oWHSZmKHWcakAXCGfbKg6xoCvKzQCWAHyxaC7QcWfmzyBf4/*),a:pkh(tpubD6NzVbkrYhZ4XXEmQtS3sgxpJbMyMg4McqRR1Af6ULzyrTRnhwjyr1etPD7svap9oFtJf4MM72brUb5o7uvF2Jyszc5c1t836fJW7SX2e8D/*)))",
+ f"andor(multi(2,{TPUBS[0]}/*,{TPUBS[1]}/*),and_v(v:multi(4,{PUBKEYS[0]},{PUBKEYS[1]},{PUBKEYS[2]},{PUBKEYS[3]}),after(424242)),thresh(4,pkh({TPUBS[2]}/*),a:pkh({TPUBS[3]}/*),a:pkh({TPUBS[4]}/*),a:pkh({TPUBS[5]}/*)))",
# Liquid-like federated pegin with emergency recovery keys
- "or_i(and_b(pk(029ffbe722b147f3035c87cb1c60b9a5947dd49c774cc31e94773478711a929ac0),a:and_b(pk(025f05815e3a1a8a83bfbb03ce016c9a2ee31066b98f567f6227df1d76ec4bd143),a:and_b(pk(025625f41e4a065efc06d5019cbbd56fe8c07595af1231e7cbc03fafb87ebb71ec),a:and_b(pk(02a27c8b850a00f67da3499b60562673dcf5fdfb82b7e17652a7ac54416812aefd),s:pk(03e618ec5f384d6e19ca9ebdb8e2119e5bef978285076828ce054e55c4daf473e2))))),and_v(v:thresh(2,pkh(tpubD6NzVbkrYhZ4YK67cd5fDe4fBVmGB2waTDrAt1q4ey9HPq9veHjWkw3VpbaCHCcWozjkhgAkWpFrxuPMUrmXVrLHMfEJ9auoZA6AS1g3grC/*),a:pkh(033841045a531e1adf9910a6ec279589a90b3b8a904ee64ffd692bd08a8996c1aa),a:pkh(02aebf2d10b040eb936a6f02f44ee82f8b34f5c1ccb20ff3949c2b28206b7c1068)),older(4209713)))",
+ f"or_i(and_b(pk({PUBKEYS[0]}),a:and_b(pk({PUBKEYS[1]}),a:and_b(pk({PUBKEYS[2]}),a:and_b(pk({PUBKEYS[3]}),s:pk({PUBKEYS[4]}))))),and_v(v:thresh(2,pkh({TPUBS[0]}/*),a:pkh({PUBKEYS[5]}),a:pkh({PUBKEYS[6]})),older(4209713)))",
+]
+
+MINISCRIPTS_PRIV = [
+ # One of two keys, of which one private key is known
+ {
+ "ms": f"or_i(pk({TPRVS[0]}/*),pk({TPUBS[0]}/*))",
+ "sequence": None,
+ "locktime": None,
+ "sigs_count": 1,
+ "stack_size": 3,
+ },
+ # A more complex policy, that can't be satisfied through the first branch (need for a preimage)
+ {
+ "ms": f"andor(ndv:older(2),and_v(v:pk({TPRVS[0]}),sha256(2a8ce30189b2ec3200b47aeb4feaac8fcad7c0ba170389729f4898b0b7933bcb)),and_v(v:pkh({TPRVS[1]}),pk({TPRVS[2]}/*)))",
+ "sequence": 2,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 5,
+ },
+ # The same policy but we provide the preimage. This path will be chosen as it's a smaller witness.
+ {
+ "ms": f"andor(ndv:older(2),and_v(v:pk({TPRVS[0]}),sha256(61e33e9dbfefc45f6a194187684d278f789fd4d5e207a357e79971b6519a8b12)),and_v(v:pkh({TPRVS[1]}),pk({TPRVS[2]}/*)))",
+ "sequence": 2,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 4,
+ "sha256_preimages": {
+ "61e33e9dbfefc45f6a194187684d278f789fd4d5e207a357e79971b6519a8b12": "e8774f330f5f330c23e8bbefc5595cb87009ddb7ac3b8deaaa8e9e41702d919c"
+ },
+ },
+ # Signature with a relative timelock
+ {
+ "ms": f"and_v(v:older(2),pk({TPRVS[0]}/*))",
+ "sequence": 2,
+ "locktime": None,
+ "sigs_count": 1,
+ "stack_size": 2,
+ },
+ # Signature with an absolute timelock
+ {
+ "ms": f"and_v(v:after(20),pk({TPRVS[0]}/*))",
+ "sequence": None,
+ "locktime": 20,
+ "sigs_count": 1,
+ "stack_size": 2,
+ },
+ # Signature with both
+ {
+ "ms": f"and_v(v:older(4),and_v(v:after(30),pk({TPRVS[0]}/*)))",
+ "sequence": 4,
+ "locktime": 30,
+ "sigs_count": 1,
+ "stack_size": 2,
+ },
+ # We have one key on each branch; Core signs both (can't finalize)
+ {
+ "ms": f"c:andor(pk({TPRVS[0]}/*),pk_k({TPUBS[0]}),and_v(v:pk({TPRVS[1]}),pk_k({TPUBS[1]})))",
+ "sequence": None,
+ "locktime": None,
+ "sigs_count": 2,
+ "stack_size": None,
+ },
+ # We have all the keys, wallet selects the timeout path to sign since it's smaller and sequence is set
+ {
+ "ms": f"andor(pk({TPRVS[0]}/*),pk({TPRVS[2]}),and_v(v:pk({TPRVS[1]}),older(10)))",
+ "sequence": 10,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 3,
+ },
+ # We have all the keys, wallet selects the primary path to sign unconditionally since nsequence wasn't set to be valid for timeout path
+ {
+ "ms": f"andor(pk({TPRVS[0]}/*),pk({TPRVS[2]}),and_v(v:pkh({TPRVS[1]}),older(10)))",
+ "sequence": None,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 3,
+ },
+ # Finalizes to the smallest valid witness, regardless of sequence
+ {
+ "ms": f"or_d(pk({TPRVS[0]}/*),and_v(v:pk({TPRVS[1]}),and_v(v:pk({TPRVS[2]}),older(10))))",
+ "sequence": 12,
+ "locktime": None,
+ "sigs_count": 3,
+ "stack_size": 2,
+ },
+ # Liquid-like federated pegin with emergency recovery privkeys
+ {
+ "ms": f"or_i(and_b(pk({TPUBS[0]}/*),a:and_b(pk({TPUBS[1]}),a:and_b(pk({TPUBS[2]}),a:and_b(pk({TPUBS[3]}),s:pk({PUBKEYS[0]}))))),and_v(v:thresh(2,pkh({TPRVS[0]}),a:pkh({TPRVS[1]}),a:pkh({TPUBS[4]})),older(42)))",
+ "sequence": 42,
+ "locktime": None,
+ "sigs_count": 2,
+ "stack_size": 8,
+ },
]
@@ -62,7 +180,77 @@ class WalletMiniscriptTest(BitcoinTestFramework):
lambda: len(self.ms_wo_wallet.listunspent(minconf=0, addresses=[addr])) == 1
)
utxo = self.ms_wo_wallet.listunspent(minconf=0, addresses=[addr])[0]
- assert utxo["txid"] == txid and not utxo["solvable"] # No satisfaction logic (yet)
+ assert utxo["txid"] == txid and utxo["solvable"]
+
+ def signing_test(
+ self, ms, sequence, locktime, sigs_count, stack_size, sha256_preimages
+ ):
+ self.log.info(f"Importing private Miniscript '{ms}'")
+ desc = descsum_create(f"wsh({ms})")
+ res = self.ms_sig_wallet.importdescriptors(
+ [
+ {
+ "desc": desc,
+ "active": True,
+ "range": 0,
+ "next_index": 0,
+ "timestamp": "now",
+ }
+ ]
+ )
+ assert res[0]["success"], res
+
+ self.log.info("Generating an address for it and testing it detects funds")
+ addr = self.ms_sig_wallet.getnewaddress()
+ txid = self.funder.sendtoaddress(addr, 0.01)
+ self.wait_until(lambda: txid in self.funder.getrawmempool())
+ self.funder.generatetoaddress(1, self.funder.getnewaddress())
+ utxo = self.ms_sig_wallet.listunspent(addresses=[addr])[0]
+ assert txid == utxo["txid"] and utxo["solvable"]
+
+ self.log.info("Creating a transaction spending these funds")
+ dest_addr = self.funder.getnewaddress()
+ seq = sequence if sequence is not None else 0xFFFFFFFF - 2
+ lt = locktime if locktime is not None else 0
+ psbt = self.ms_sig_wallet.createpsbt(
+ [
+ {
+ "txid": txid,
+ "vout": utxo["vout"],
+ "sequence": seq,
+ }
+ ],
+ [{dest_addr: 0.009}],
+ lt,
+ )
+
+ self.log.info("Signing it and checking the satisfaction.")
+ if sha256_preimages is not None:
+ psbt = PSBT.from_base64(psbt)
+ for (h, preimage) in sha256_preimages.items():
+ k = PSBT_IN_SHA256.to_bytes(1, "big") + bytes.fromhex(h)
+ psbt.i[0].map[k] = bytes.fromhex(preimage)
+ psbt = psbt.to_base64()
+ res = self.ms_sig_wallet.walletprocesspsbt(psbt=psbt, finalize=False)
+ psbtin = self.nodes[0].rpc.decodepsbt(res["psbt"])["inputs"][0]
+ assert len(psbtin["partial_signatures"]) == sigs_count
+ res = self.ms_sig_wallet.finalizepsbt(res["psbt"])
+ assert res["complete"] == (stack_size is not None)
+
+ if stack_size is not None:
+ txin = self.nodes[0].rpc.decoderawtransaction(res["hex"])["vin"][0]
+ assert len(txin["txinwitness"]) == stack_size, txin["txinwitness"]
+ self.log.info("Broadcasting the transaction.")
+ # If necessary, satisfy a relative timelock
+ if sequence is not None:
+ self.funder.generatetoaddress(sequence, self.funder.getnewaddress())
+ # If necessary, satisfy an absolute timelock
+ height = self.funder.getblockcount()
+ if locktime is not None and height < locktime:
+ self.funder.generatetoaddress(
+ locktime - height, self.funder.getnewaddress()
+ )
+ self.ms_sig_wallet.sendrawtransaction(res["hex"])
def run_test(self):
self.log.info("Making a descriptor wallet")
@@ -71,6 +259,8 @@ class WalletMiniscriptTest(BitcoinTestFramework):
wallet_name="ms_wo", descriptors=True, disable_private_keys=True
)
self.ms_wo_wallet = self.nodes[0].get_wallet_rpc("ms_wo")
+ self.nodes[0].createwallet(wallet_name="ms_sig", descriptors=True)
+ self.ms_sig_wallet = self.nodes[0].get_wallet_rpc("ms_sig")
# Sanity check we wouldn't let an insane Miniscript descriptor in
res = self.ms_wo_wallet.importdescriptors(
@@ -91,6 +281,17 @@ class WalletMiniscriptTest(BitcoinTestFramework):
for ms in MINISCRIPTS:
self.watchonly_test(ms)
+ # Test we can sign for any Miniscript.
+ for ms in MINISCRIPTS_PRIV:
+ self.signing_test(
+ ms["ms"],
+ ms["sequence"],
+ ms["locktime"],
+ ms["sigs_count"],
+ ms["stack_size"],
+ ms.get("sha256_preimages"),
+ )
+
if __name__ == "__main__":
WalletMiniscriptTest().main()
diff --git a/test/functional/wallet_pruning.py b/test/functional/wallet_pruning.py
index 6d8475ce8d..1ceceaee93 100755
--- a/test/functional/wallet_pruning.py
+++ b/test/functional/wallet_pruning.py
@@ -39,11 +39,15 @@ class WalletPruningTest(BitcoinTestFramework):
def mine_large_blocks(self, node, n):
# Get the block parameters for the first block
- best_block = node.getblock(node.getbestblockhash())
+ best_block = node.getblockheader(node.getbestblockhash())
height = int(best_block["height"]) + 1
self.nTime = max(self.nTime, int(best_block["time"])) + 1
previousblockhash = int(best_block["hash"], 16)
big_script = CScript([OP_RETURN] + [OP_TRUE] * 950000)
+ # Set mocktime to accept all future blocks
+ for i in self.nodes:
+ if i.running:
+ i.setmocktime(self.nTime + 600 * n)
for _ in range(n):
block = create_block(hashprev=previousblockhash, ntime=self.nTime, coinbase=create_coinbase(height, script_pubkey=big_script))
block.solve()
@@ -57,9 +61,6 @@ class WalletPruningTest(BitcoinTestFramework):
# Simulate 10 minutes of work time per block
# Important for matching a timestamp with a block +- some window
self.nTime += 600
- for n in self.nodes:
- if n.running:
- n.setmocktime(self.nTime) # Update node's time to accept future blocks
self.sync_all()
def test_wallet_import_pruned(self, wallet_name):
@@ -122,7 +123,7 @@ class WalletPruningTest(BitcoinTestFramework):
# A blk*.dat file is 128MB
# Generate 250 light blocks
- self.generate(self.nodes[0], 250, sync_fun=self.no_op)
+ self.generate(self.nodes[0], 250)
# Generate 50MB worth of large blocks in the blk00000.dat file
self.mine_large_blocks(self.nodes[0], 50)
diff --git a/test/get_previous_releases.py b/test/get_previous_releases.py
index 7f5f15655c..60c868ca04 100755
--- a/test/get_previous_releases.py
+++ b/test/get_previous_releases.py
@@ -80,6 +80,15 @@ SHA256_SUMS = {
"078f96b1e92895009c798ab827fb3fde5f6719eee886bd0c0e93acab18ea4865": {"tag": "v23.0", "tarball": "bitcoin-23.0-riscv64-linux-gnu.tar.gz"},
"c816780583009a9dad426dc0c183c89be9da98906e1e2c7ebae91041c1aaaaf3": {"tag": "v23.0", "tarball": "bitcoin-23.0-x86_64-apple-darwin.tar.gz"},
"2cca490c1f2842884a3c5b0606f179f9f937177da4eadd628e3f7fd7e25d26d0": {"tag": "v23.0", "tarball": "bitcoin-23.0-x86_64-linux-gnu.tar.gz"},
+
+ "0b48b9e69b30037b41a1e6b78fb7cbcc48c7ad627908c99686e81f3802454609": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-aarch64-linux-gnu.tar.gz"},
+ "37d7660f0277301744e96426bbb001d2206b8d4505385dfdeedf50c09aaaef60": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-arm-linux-gnueabihf.tar.gz"},
+ "90ed59e86bfda1256f4b4cad8cc1dd77ee0efec2492bcb5af61402709288b62c": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-arm64-apple-darwin.tar.gz"},
+ "7590645e8676f8b5fda62dc20174474c4ac8fd0defc83a19ed908ebf2e94dc11": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-powerpc64-linux-gnu.tar.gz"},
+ "79e89a101f23ff87816675b98769cd1ee91059f95c5277f38f48f21a9f7f8509": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-powerpc64le-linux-gnu.tar.gz"},
+ "6b163cef7de4beb07b8cb3347095e0d76a584019b1891135cd1268a1f05b9d88": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-riscv64-linux-gnu.tar.gz"},
+ "e2f751512f3c0f00eb68ba946d9c829e6cf99422a61e8f5e0a7c109c318674d0": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-x86_64-apple-darwin.tar.gz"},
+ "49df6e444515d457ea0b885d66f521f2a26ca92ccf73d5296082e633544253bf": {"tag": "v24.0.1", "tarball": "bitcoin-24.0.1-x86_64-linux-gnu.tar.gz"},
}
diff --git a/test/lint/lint-locale-dependence.py b/test/lint/lint-locale-dependence.py
index c5cb34b20a..faea643882 100755
--- a/test/lint/lint-locale-dependence.py
+++ b/test/lint/lint-locale-dependence.py
@@ -34,8 +34,6 @@
#
# See https://doc.qt.io/qt-5/qcoreapplication.html#locale-settings and
# https://stackoverflow.com/a/34878283 for more details.
-#
-# TODO: Reduce KNOWN_VIOLATIONS by replacing uses of locale dependent snprintf with strprintf.
import re
import sys
@@ -45,7 +43,6 @@ from subprocess import check_output, CalledProcessError
KNOWN_VIOLATIONS = [
"src/dbwrapper.cpp:.*vsnprintf",
- "src/test/dbwrapper_tests.cpp:.*snprintf",
"src/test/fuzz/locale.cpp:.*setlocale",
"src/test/fuzz/string.cpp:.*strtol",
"src/test/fuzz/string.cpp:.*strtoul",
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index 67ef512895..2fa4e383e2 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -53,6 +53,7 @@ unsigned-integer-overflow:policy/fees.cpp
unsigned-integer-overflow:prevector.h
unsigned-integer-overflow:script/interpreter.cpp
unsigned-integer-overflow:txmempool.cpp
+unsigned-integer-overflow:xoroshiro128plusplus.h
implicit-integer-sign-change:compat/stdin.cpp
implicit-integer-sign-change:compressor.h
implicit-integer-sign-change:crypto/
@@ -69,3 +70,4 @@ shift-base:crypto/
shift-base:hash.cpp
shift-base:streams.h
shift-base:util/bip32.cpp
+shift-base:xoroshiro128plusplus.h
diff --git a/test/util/rpcauth-test.py b/test/util/rpcauth-test.py
index 53058dc394..8a7ff26dcb 100755
--- a/test/util/rpcauth-test.py
+++ b/test/util/rpcauth-test.py
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test share/rpcauth/rpcauth.py
"""
-import base64
+import re
import configparser
import hmac
import importlib
@@ -28,18 +28,17 @@ class TestRPCAuth(unittest.TestCase):
self.assertEqual(len(self.rpcauth.generate_salt(i)), i * 2)
def test_generate_password(self):
+ """Test that generated passwords only consist of urlsafe characters."""
+ r = re.compile(r"[0-9a-zA-Z_-]*")
password = self.rpcauth.generate_password()
- expected_password = base64.urlsafe_b64encode(
- base64.urlsafe_b64decode(password)).decode('utf-8')
- self.assertEqual(expected_password, password)
+ self.assertTrue(r.fullmatch(password))
def test_check_password_hmac(self):
salt = self.rpcauth.generate_salt(16)
password = self.rpcauth.generate_password()
password_hmac = self.rpcauth.password_to_hmac(salt, password)
- m = hmac.new(bytearray(salt, 'utf-8'),
- bytearray(password, 'utf-8'), 'SHA256')
+ m = hmac.new(salt.encode('utf-8'), password.encode('utf-8'), 'SHA256')
expected_password_hmac = m.hexdigest()
self.assertEqual(expected_password_hmac, password_hmac)