aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml56
-rw-r--r--.github/workflows/ci.yml45
-rwxr-xr-xci/test/00_setup_env_native_asan.sh7
-rwxr-xr-xci/test/02_run_container.sh31
-rw-r--r--configure.ac6
-rwxr-xr-xcontrib/devtools/symbol-check.py34
-rwxr-xr-xcontrib/devtools/test-security-check.py34
-rw-r--r--contrib/guix/manifest.scm24
-rw-r--r--contrib/guix/patches/glibc-2.27-fcommon.patch34
-rw-r--r--contrib/guix/patches/glibc-2.27-no-librt.patch53
-rw-r--r--contrib/guix/patches/glibc-2.27-powerpc-ldbrx.patch245
-rw-r--r--contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch78
-rw-r--r--contrib/guix/patches/glibc-guix-prefix.patch (renamed from contrib/guix/patches/glibc-2.27-guix-prefix.patch)8
-rw-r--r--contrib/macdeploy/README.md34
-rwxr-xr-xcontrib/macdeploy/gen-sdk15
-rw-r--r--contrib/verify-binaries/README.md10
-rwxr-xr-xcontrib/verify-binaries/test.py15
-rwxr-xr-xcontrib/verify-binaries/verify.py26
-rw-r--r--depends/hosts/darwin.mk4
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/developer-notes.md5
-rw-r--r--doc/init.md6
-rw-r--r--doc/policy/packages.md25
-rw-r--r--doc/release-28984.md6
-rw-r--r--doc/release-notes-29987.md6
-rw-r--r--doc/release-notes-30058.md7
-rw-r--r--src/Makefile.am11
-rw-r--r--src/Makefile.test.include1
-rw-r--r--src/banman.h2
-rw-r--r--src/bitcoin-chainstate.cpp10
-rw-r--r--src/bitcoind.cpp3
-rw-r--r--src/coins.cpp2
-rw-r--r--src/httprpc.cpp19
-rw-r--r--src/httpserver.cpp5
-rw-r--r--src/httpserver.h9
-rw-r--r--src/i2p.cpp28
-rw-r--r--src/i2p.h8
-rw-r--r--src/index/base.cpp3
-rw-r--r--src/init.cpp78
-rw-r--r--src/init/bitcoin-node.cpp1
-rw-r--r--src/init/bitcoin-qt.cpp2
-rw-r--r--src/init/bitcoind.cpp2
-rw-r--r--src/interfaces/init.h2
-rw-r--r--src/interfaces/mining.h84
-rw-r--r--src/kernel/chainparams.cpp3
-rw-r--r--src/kernel/notifications_interface.h5
-rw-r--r--src/kernel/warning.h14
-rw-r--r--src/mapport.cpp5
-rw-r--r--src/net.cpp2
-rw-r--r--src/net_processing.cpp189
-rw-r--r--src/net_processing.h10
-rw-r--r--src/netbase.cpp42
-rw-r--r--src/netbase.h10
-rw-r--r--src/node/abort.cpp7
-rw-r--r--src/node/abort.h3
-rw-r--r--src/node/context.cpp2
-rw-r--r--src/node/context.h5
-rw-r--r--src/node/interfaces.cpp72
-rw-r--r--src/node/kernel_notifications.cpp31
-rw-r--r--src/node/kernel_notifications.h13
-rw-r--r--src/node/miner.cpp9
-rw-r--r--src/node/miner.h1
-rw-r--r--src/node/timeoffsets.cpp9
-rw-r--r--src/node/timeoffsets.h10
-rw-r--r--src/node/warnings.cpp68
-rw-r--r--src/node/warnings.h90
-rw-r--r--src/policy/v3_policy.cpp6
-rw-r--r--src/randomenv.cpp14
-rw-r--r--src/rest.cpp20
-rw-r--r--src/rpc/blockchain.cpp4
-rw-r--r--src/rpc/mining.cpp85
-rw-r--r--src/rpc/net.cpp3
-rw-r--r--src/rpc/request.cpp21
-rw-r--r--src/rpc/request.h3
-rw-r--r--src/rpc/server_util.cpp8
-rw-r--r--src/rpc/server_util.h4
-rw-r--r--src/rpc/util.cpp19
-rw-r--r--src/rpc/util.h2
-rw-r--r--src/secp256k1/.cirrus.yml4
-rw-r--r--src/secp256k1/.github/workflows/ci.yml74
-rw-r--r--src/secp256k1/CMakeLists.txt99
-rw-r--r--src/secp256k1/README.md1
-rw-r--r--src/secp256k1/build-aux/m4/bitcoin_secp.m416
-rw-r--r--src/secp256k1/cmake/AllTargetsCompileOptions.cmake12
-rw-r--r--src/secp256k1/cmake/CheckMemorySanitizer.cmake18
-rw-r--r--src/secp256k1/configure.ac65
-rw-r--r--src/secp256k1/src/modules/ecdh/tests_impl.h6
-rw-r--r--src/secp256k1/src/modules/ellswift/tests_impl.h38
-rw-r--r--src/secp256k1/src/modules/extrakeys/tests_impl.h36
-rw-r--r--src/secp256k1/src/modules/recovery/tests_impl.h8
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h6
-rw-r--r--src/secp256k1/src/modules/schnorrsig/tests_impl.h50
-rw-r--r--src/secp256k1/src/secp256k1.c4
-rw-r--r--src/secp256k1/src/testrand.h22
-rw-r--r--src/secp256k1/src/testrand_impl.h42
-rw-r--r--src/secp256k1/src/tests.c458
-rw-r--r--src/secp256k1/src/tests_exhaustive.c10
-rw-r--r--src/secp256k1/src/testutil.h121
-rw-r--r--src/test/blockfilter_index_tests.cpp3
-rw-r--r--src/test/blockmanager_tests.cpp4
-rw-r--r--src/test/denialofservice_tests.cpp17
-rw-r--r--src/test/fuzz/fuzz.cpp5
-rw-r--r--src/test/fuzz/i2p.cpp2
-rw-r--r--src/test/fuzz/package_eval.cpp2
-rw-r--r--src/test/fuzz/rbf.cpp24
-rw-r--r--src/test/fuzz/timeoffsets.cpp4
-rw-r--r--src/test/i2p_tests.cpp13
-rw-r--r--src/test/net_peer_connection_tests.cpp2
-rw-r--r--src/test/node_warnings_tests.cpp52
-rw-r--r--src/test/peerman_tests.cpp5
-rw-r--r--src/test/timeoffsets_tests.cpp7
-rw-r--r--src/test/transaction_tests.cpp80
-rw-r--r--src/test/txpackage_tests.cpp144
-rw-r--r--src/test/util/setup_common.cpp11
-rw-r--r--src/test/util/txmempool.cpp28
-rw-r--r--src/test/validation_block_tests.cpp6
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp2
-rw-r--r--src/util/fs_helpers.cpp40
-rw-r--r--src/util/fs_helpers.h14
-rw-r--r--src/validation.cpp185
-rw-r--r--src/wallet/feebumper.cpp2
-rw-r--r--src/wallet/migrate.cpp2
-rw-r--r--src/wallet/rpc/util.cpp2
-rw-r--r--src/wallet/spend.cpp49
-rw-r--r--src/wallet/test/fuzz/wallet_bdb_parser.cpp3
-rw-r--r--src/wallet/wallet.cpp4
-rw-r--r--src/warnings.cpp65
-rw-r--r--src/warnings.h22
-rwxr-xr-xtest/functional/feature_framework_unit_tests.py1
-rwxr-xr-xtest/functional/feature_settings.py2
-rwxr-xr-xtest/functional/mempool_accept.py3
-rwxr-xr-xtest/functional/mempool_package_rbf.py587
-rwxr-xr-xtest/functional/p2p_addr_relay.py3
-rwxr-xr-xtest/functional/p2p_addrv2_relay.py12
-rwxr-xr-xtest/functional/p2p_handshake.py7
-rwxr-xr-xtest/functional/p2p_invalid_messages.py7
-rwxr-xr-xtest/functional/p2p_mutated_blocks.py9
-rwxr-xr-xtest/functional/p2p_sendheaders.py48
-rwxr-xr-xtest/functional/p2p_unrequested_blocks.py4
-rwxr-xr-xtest/functional/rpc_createmultisig.py11
-rwxr-xr-xtest/functional/rpc_generate.py2
-rwxr-xr-xtest/functional/rpc_users.py39
-rwxr-xr-xtest/functional/test_framework/script_util.py25
-rwxr-xr-xtest/functional/test_runner.py32
-rwxr-xr-xtest/functional/wallet_bumpfee.py25
-rwxr-xr-xtest/functional/wallet_fundrawtransaction.py33
-rwxr-xr-xtest/functional/wallet_listsinceblock.py26
-rwxr-xr-xtest/functional/wallet_send.py34
148 files changed, 2968 insertions, 1706 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 3c59e41a13..b37fce7002 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -8,27 +8,44 @@ env: # Global defaults
CCACHE_DIR: "/tmp/ccache_dir"
CCACHE_NOHASHDIR: "1" # Debug info might contain a stale path if the build dir changes, but this is fine
+# A self-hosted machine(s) can be used via Cirrus CI. It can be configured with
+# multiple users to run tasks in parallel. No sudo permission is required.
+#
# https://cirrus-ci.org/guide/persistent-workers/
#
-# It is possible to select a specific persistent worker by label. Refer to the
+# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+.
+#
+# The following specific types should exist, with the following requirements:
+# - small: For an x86_64 machine, recommended to have 2 CPUs and 8 GB of memory.
+# - medium: For an x86_64 machine, recommended to have 4 CPUs and 16 GB of memory.
+# - arm64: For an aarch64 machine, recommended to have 2 CPUs and 8 GB of memory.
+#
+# CI jobs for the latter configuration can be run on x86_64 hardware
+# by installing qemu-user-static, which works out of the box with
+# podman or docker. Background: https://stackoverflow.com/a/72890225/313633
+#
+# The above machine types are matched to each task by their label. Refer to the
# Cirrus CI docs for more details.
#
-# Generally, a persistent worker must run Ubuntu 23.04+ or Debian 12+.
-# Specifically,
+# On machines that are persisted between CI jobs, RESTART_CI_DOCKER_BEFORE_RUN=1
+# ensures that previous containers and artifacts are cleared before each run.
+# This requires installing Podman instead of Docker.
+#
+# Futhermore:
# - apt-get is required due to PACKAGE_MANAGER_INSTALL
-# - podman-docker-4.1+ is required due to the use of `podman` when
-# RESTART_CI_DOCKER_BEFORE_RUN is set and 4.1+ due to the bugfix in 4.1
+# - podman-docker-4.1+ is required due to the bugfix in 4.1
# (https://github.com/bitcoin/bitcoin/pull/21652#issuecomment-1657098200)
-# - The ./ci/ depedencies (with cirrus-cli) should be installed:
+# - The ./ci/ dependencies (with cirrus-cli) should be installed. One-liner example
+# for a single user setup with sudo permission:
#
# ```
# apt update && apt install git screen python3 bash podman-docker curl -y && curl -L -o cirrus "https://github.com/cirruslabs/cirrus-cli/releases/latest/download/cirrus-linux-$(dpkg --print-architecture)" && mv cirrus /usr/local/bin/cirrus && chmod +x /usr/local/bin/cirrus
# ```
#
-# - There are no strict requirements on the hardware, because having less CPUs
-# runs the same CI script (maybe slower). To avoid rare and intermittent OOM
-# due to short memory usage spikes, it is recommended to add (and persist)
-# swap:
+# - There are no strict requirements on the hardware. Having fewer CPU threads
+# than recommended merely causes the CI script to run slower.
+# To avoid rare and intermittent OOM due to short memory usage spikes,
+# it is recommended to add (and persist) swap:
#
# ```
# fallocate -l 16G /swapfile_ci && chmod 600 /swapfile_ci && mkswap /swapfile_ci && swapon /swapfile_ci && ( echo '/swapfile_ci none swap sw 0 0' | tee -a /etc/fstab )
@@ -39,12 +56,6 @@ env: # Global defaults
# ```
# RESTART_CI_DOCKER_BEFORE_RUN=1 screen cirrus worker run --labels type=todo_fill_in_type --token todo_fill_in_token
# ```
-#
-# The following specific types should exist, with the following requirements:
-# - small: For an x86_64 machine, recommended to have 2 CPUs and 8 GB of memory.
-# - medium: For an x86_64 machine, recommended to have 4 CPUs and 16 GB of memory.
-# - noble: For a machine running the Linux kernel shipped with exaclty Ubuntu Noble 24.04. The machine is recommended to have 4 CPUs and 16 GB of memory.
-# - arm64: For an aarch64 machine, recommended to have 2 CPUs and 8 GB of memory.
# https://cirrus-ci.org/guide/tips-and-tricks/#sharing-configuration-between-tasks
filter_template: &FILTER_TEMPLATE
@@ -160,19 +171,6 @@ task:
FILE_ENV: "./ci/test/00_setup_env_native_msan.sh"
task:
- name: 'ASan + LSan + UBSan + integer, no depends, USDT'
- enable_bpfcc_script:
- # In the image build step, no external environment variables are available,
- # so any settings will need to be written to the settings env file:
- - sed -i "s|\${CIRRUS_CI}|true|g" ./ci/test/00_setup_env_native_asan.sh
- << : *GLOBAL_TASK_TEMPLATE
- persistent_worker:
- labels:
- type: noble # Must use this specific worker (needed for USDT functional tests)
- env:
- FILE_ENV: "./ci/test/00_setup_env_native_asan.sh"
-
-task:
name: 'fuzzer,address,undefined,integer, no depends'
<< : *GLOBAL_TASK_TEMPLATE
persistent_worker:
diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0ea479b667..54795332e8 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -18,7 +18,6 @@ concurrency:
cancel-in-progress: true
env:
- DANGER_RUN_CI_ON_HOST: 1
CI_FAILFAST_TEST_LEAVE_DANGLING: 1 # GHA does not care about dangling processes and setting this variable avoids killing the CI script itself on error
MAKEJOBS: '-j10'
@@ -81,6 +80,7 @@ jobs:
timeout-minutes: 120
env:
+ DANGER_RUN_CI_ON_HOST: 1
FILE_ENV: './ci/test/00_setup_env_mac_native.sh'
BASE_ROOT_DIR: ${{ github.workspace }}
@@ -304,3 +304,46 @@ jobs:
BITCOINFUZZ: "${{ github.workspace}}\\src\\fuzz.exe"
shell: cmd
run: py -3 test\fuzz\test_runner.py --par %NUMBER_OF_PROCESSORS% --loglevel DEBUG %RUNNER_TEMP%\qa-assets\fuzz_seed_corpus
+
+ asan-lsan-ubsan-integer-no-depends-usdt:
+ name: 'ASan + LSan + UBSan + integer, no depends, USDT'
+ runs-on: ubuntu-24.04 # has to match container in ci/test/00_setup_env_native_asan.sh for tracing tools
+ # No need to run on the read-only mirror, unless it is a PR.
+ if: github.repository != 'bitcoin-core/gui' || github.event_name == 'pull_request'
+ timeout-minutes: 120
+ env:
+ FILE_ENV: "./ci/test/00_setup_env_native_asan.sh"
+ DANGER_CI_ON_HOST_CACHE_FOLDERS: 1
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Set Ccache directory
+ run: echo "CCACHE_DIR=${RUNNER_TEMP}/ccache_dir" >> "$GITHUB_ENV"
+
+ - name: Set base root directory
+ run: echo "BASE_ROOT_DIR=${RUNNER_TEMP}" >> "$GITHUB_ENV"
+
+ - name: Restore Ccache cache
+ id: ccache-cache
+ uses: actions/cache/restore@v4
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ key: ${{ github.job }}-ccache-${{ github.run_id }}
+ restore-keys: ${{ github.job }}-ccache-
+
+ - name: Enable bpfcc script
+ # In the image build step, no external environment variables are available,
+ # so any settings will need to be written to the settings env file:
+ run: sed -i "s|\${INSTALL_BCC_TRACING_TOOLS}|true|g" ./ci/test/00_setup_env_native_asan.sh
+
+ - name: CI script
+ run: ./ci/test_run_all.sh
+
+ - name: Save Ccache cache
+ uses: actions/cache/save@v4
+ if: github.event_name != 'pull_request' && steps.ccache-cache.outputs.cache-hit != 'true'
+ with:
+ path: ${{ env.CCACHE_DIR }}
+ # https://github.com/actions/cache/blob/main/tips-and-workarounds.md#update-a-cache
+ key: ${{ github.job }}-ccache-${{ github.run_id }}
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index 668e9ecc8a..23d9180f96 100755
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
@@ -7,8 +7,10 @@
export LC_ALL=C.UTF-8
export CI_IMAGE_NAME_TAG="docker.io/ubuntu:24.04"
-# Only install BCC tracing packages in Cirrus CI.
-if [[ "${CIRRUS_CI}" == "true" ]]; then
+
+# Only install BCC tracing packages in CI. Container has to match the host for BCC to work.
+if [[ "${INSTALL_BCC_TRACING_TOOLS}" == "true" ]]; then
+ # Required for USDT functional tests to run
BPFCC_PACKAGE="bpfcc-tools linux-headers-$(uname --kernel-release)"
export CI_CONTAINER_CAP="--privileged -v /sys/kernel:/sys/kernel:rw"
else
@@ -24,3 +26,4 @@ export BITCOIN_CONFIG="--enable-usdt --enable-zmq --with-incompatible-bdb --with
CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' \
--with-sanitizers=address,float-divide-by-zero,integer,undefined \
CC='clang-18 -ftrivial-auto-var-init=pattern' CXX='clang++-18 -ftrivial-auto-var-init=pattern'"
+export CCACHE_MAXSIZE=300M
diff --git a/ci/test/02_run_container.sh b/ci/test/02_run_container.sh
index 86bb856d17..afd447c347 100755
--- a/ci/test/02_run_container.sh
+++ b/ci/test/02_run_container.sh
@@ -16,6 +16,7 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
# System-dependent env vars must be kept as is. So read them from the container.
docker run --rm "${CI_IMAGE_NAME_TAG}" bash -c "env | grep --extended-regexp '^(HOME|PATH|USER)='" | tee --append "/tmp/env-$USER-$CONTAINER_NAME"
echo "Creating $CI_IMAGE_NAME_TAG container to run in"
+
DOCKER_BUILDKIT=1 docker build \
--file "${BASE_READ_ONLY_DIR}/ci/test_imagefile" \
--build-arg "CI_IMAGE_NAME_TAG=${CI_IMAGE_NAME_TAG}" \
@@ -23,11 +24,32 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
--label="${CI_IMAGE_LABEL}" \
--tag="${CONTAINER_NAME}" \
"${BASE_READ_ONLY_DIR}"
+
docker volume create "${CONTAINER_NAME}_ccache" || true
docker volume create "${CONTAINER_NAME}_depends" || true
docker volume create "${CONTAINER_NAME}_depends_sources" || true
docker volume create "${CONTAINER_NAME}_previous_releases" || true
+ CI_CCACHE_MOUNT="type=volume,src=${CONTAINER_NAME}_ccache,dst=$CCACHE_DIR"
+ CI_DEPENDS_MOUNT="type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR/built"
+ CI_DEPENDS_SOURCES_MOUNT="type=volume,src=${CONTAINER_NAME}_depends_sources,dst=$DEPENDS_DIR/sources"
+ CI_PREVIOUS_RELEASES_MOUNT="type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR"
+
+ if [ "$DANGER_CI_ON_HOST_CACHE_FOLDERS" ]; then
+ # ensure the directories exist
+ mkdir -p "${CCACHE_DIR}"
+ mkdir -p "${DEPENDS_DIR}/built"
+ mkdir -p "${DEPENDS_DIR}/sources"
+ mkdir -p "${PREVIOUS_RELEASES_DIR}"
+
+ CI_CCACHE_MOUNT="type=bind,src=${CCACHE_DIR},dst=$CCACHE_DIR"
+ CI_DEPENDS_MOUNT="type=bind,src=${DEPENDS_DIR}/built,dst=$DEPENDS_DIR/built"
+ CI_DEPENDS_SOURCES_MOUNT="type=bind,src=${DEPENDS_DIR}/sources,dst=$DEPENDS_DIR/sources"
+ CI_PREVIOUS_RELEASES_MOUNT="type=bind,src=${PREVIOUS_RELEASES_DIR},dst=$PREVIOUS_RELEASES_DIR"
+ fi
+
+ docker network create --ipv6 --subnet 1111:1111::/112 ci-ip6net || true
+
if [ -n "${RESTART_CI_DOCKER_BEFORE_RUN}" ] ; then
echo "Restart docker before run to stop and clear all containers started with --rm"
podman container rm --force --all # Similar to "systemctl restart docker"
@@ -48,12 +70,13 @@ if [ -z "$DANGER_RUN_CI_ON_HOST" ]; then
# shellcheck disable=SC2086
CI_CONTAINER_ID=$(docker run --cap-add LINUX_IMMUTABLE $CI_CONTAINER_CAP --rm --interactive --detach --tty \
--mount "type=bind,src=$BASE_READ_ONLY_DIR,dst=$BASE_READ_ONLY_DIR,readonly" \
- --mount "type=volume,src=${CONTAINER_NAME}_ccache,dst=$CCACHE_DIR" \
- --mount "type=volume,src=${CONTAINER_NAME}_depends,dst=$DEPENDS_DIR/built" \
- --mount "type=volume,src=${CONTAINER_NAME}_depends_sources,dst=$DEPENDS_DIR/sources" \
- --mount "type=volume,src=${CONTAINER_NAME}_previous_releases,dst=$PREVIOUS_RELEASES_DIR" \
+ --mount "${CI_CCACHE_MOUNT}" \
+ --mount "${CI_DEPENDS_MOUNT}" \
+ --mount "${CI_DEPENDS_SOURCES_MOUNT}" \
+ --mount "${CI_PREVIOUS_RELEASES_MOUNT}" \
--env-file /tmp/env-$USER-$CONTAINER_NAME \
--name "$CONTAINER_NAME" \
+ --network ci-ip6net \
"$CONTAINER_NAME")
export CI_CONTAINER_ID
export CI_EXEC_CMD_PREFIX="docker exec ${CI_CONTAINER_ID}"
diff --git a/configure.ac b/configure.ac
index ab369cc98a..af0d1d1505 100644
--- a/configure.ac
+++ b/configure.ac
@@ -405,6 +405,7 @@ AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough], [WARN_CXXFLAGS="$WARN_CXXFLAGS -
AX_CHECK_COMPILE_FLAG([-Wunreachable-code], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunreachable-code"], [], [$CXXFLAG_WERROR])
AX_CHECK_COMPILE_FLAG([-Wdocumentation], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdocumentation"], [], [$CXXFLAG_WERROR])
AX_CHECK_COMPILE_FLAG([-Wself-assign], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wself-assign"], [], [$CXXFLAG_WERROR])
+AX_CHECK_COMPILE_FLAG([-Wundef], [WARN_CXXFLAGS="$WARN_CXXFLAGS -Wundef"], [], [$CXXFLAG_WERROR])
dnl Some compilers (gcc) ignore unknown -Wno-* options, but warn about all
dnl unknown options if any other warning is produced. Test the -Wfoo case, and
@@ -896,7 +897,7 @@ if test "$TARGET_OS" = "darwin"; then
AX_CHECK_LINK_FLAG([-Wl,-fixup_chains], [HARDENED_LDFLAGS="$HARDENED_LDFLAGS -Wl,-fixup_chains"], [], [$LDFLAG_WERROR])
fi
-AC_CHECK_HEADERS([sys/select.h sys/prctl.h sys/sysctl.h vm/vm_param.h sys/vmmeter.h sys/resources.h])
+AC_CHECK_HEADERS([sys/select.h sys/prctl.h vm/vm_param.h sys/vmmeter.h sys/resources.h])
AC_CHECK_DECLS([getifaddrs, freeifaddrs],[CHECK_SOCKET],,
[#include <sys/types.h>
@@ -1392,6 +1393,9 @@ if test "$with_libmultiprocess" = "yes" || test "$with_libmultiprocess" = "auto"
PKG_CHECK_MODULES([LIBMULTIPROCESS], [libmultiprocess], [
libmultiprocess_found=yes;
libmultiprocess_prefix=`$PKG_CONFIG --variable=prefix libmultiprocess`;
+ if test "$suppress_external_warnings" != "no" ; then
+ LIBMULTIPROCESS_CFLAGS=SUPPRESS_WARNINGS($LIBMULTIPROCESS_CFLAGS)
+ fi
], [true])
elif test "$with_libmultiprocess" != "no"; then
AC_MSG_ERROR([--with-libmultiprocess=$with_libmultiprocess value is not yes, auto, or no])
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 6613874ce3..c4e6bc81e1 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -14,31 +14,31 @@ import sys
import lief
-# Debian 10 (Buster) EOL: 2024. https://wiki.debian.org/LTS
+# Debian 11 (Bullseye) EOL: 2026. https://wiki.debian.org/LTS
#
-# - libgcc version 8.3.0 (https://packages.debian.org/search?suite=buster&arch=any&searchon=names&keywords=libgcc1)
-# - libc version 2.28 (https://packages.debian.org/search?suite=buster&arch=any&searchon=names&keywords=libc6)
+# - libgcc version 10.2.1 (https://packages.debian.org/bullseye/libgcc-s1)
+# - libc version 2.31 (https://packages.debian.org/source/bullseye/glibc)
#
-# Ubuntu 18.04 (Bionic) EOL: 2028. https://wiki.ubuntu.com/ReleaseTeam
+# Ubuntu 20.04 (Focal) EOL: 2030. https://wiki.ubuntu.com/ReleaseTeam
#
-# - libgcc version 8.4.0 (https://packages.ubuntu.com/bionic/libgcc1)
-# - libc version 2.27 (https://packages.ubuntu.com/bionic/libc6)
+# - libgcc version 10.5.0 (https://packages.ubuntu.com/focal/libgcc1)
+# - libc version 2.31 (https://packages.ubuntu.com/focal/libc6)
#
-# CentOS Stream 8 EOL: 2024. https://wiki.centos.org/About/Product
+# CentOS Stream 9 EOL: 2027. https://www.centos.org/cl-vs-cs/#end-of-life
#
-# - libgcc version 8.5.0 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
-# - libc version 2.28 (http://mirror.centos.org/centos/8-stream/AppStream/x86_64/os/Packages/)
+# - libgcc version 12.2.1 (https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/Packages/)
+# - libc version 2.34 (https://mirror.stream.centos.org/9-stream/AppStream/x86_64/os/Packages/)
#
# See https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html for more info.
MAX_VERSIONS = {
'GCC': (4,3,0),
'GLIBC': {
- lief.ELF.ARCH.x86_64: (2,27),
- lief.ELF.ARCH.ARM: (2,27),
- lief.ELF.ARCH.AARCH64:(2,27),
- lief.ELF.ARCH.PPC64: (2,27),
- lief.ELF.ARCH.RISCV: (2,27),
+ lief.ELF.ARCH.x86_64: (2,31),
+ lief.ELF.ARCH.ARM: (2,31),
+ lief.ELF.ARCH.AARCH64:(2,31),
+ lief.ELF.ARCH.PPC64: (2,31),
+ lief.ELF.ARCH.RISCV: (2,31),
},
'LIBATOMIC': (1,0),
'V': (0,5,0), # xkb (bitcoin-qt only)
@@ -212,6 +212,11 @@ def check_exported_symbols(binary) -> bool:
ok = False
return ok
+def check_RUNPATH(binary) -> bool:
+ assert binary.get(lief.ELF.DYNAMIC_TAGS.RUNPATH) is None
+ assert binary.get(lief.ELF.DYNAMIC_TAGS.RPATH) is None
+ return True
+
def check_ELF_libraries(binary) -> bool:
ok: bool = True
for library in binary.libraries:
@@ -277,6 +282,7 @@ lief.EXE_FORMATS.ELF: [
('LIBRARY_DEPENDENCIES', check_ELF_libraries),
('INTERPRETER_NAME', check_ELF_interpreter),
('ABI', check_ELF_ABI),
+ ('RUNPATH', check_RUNPATH),
],
lief.EXE_FORMATS.MACHO: [
('DYNAMIC_LIBRARIES', check_MACHO_libraries),
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index dd0cf7030a..7bfd4d98da 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -59,32 +59,32 @@ class TestSecurityChecks(unittest.TestCase):
arch = get_arch(cc, source, executable)
if arch == lief.ARCHITECTURES.X86:
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE NX RELRO Canary CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE RELRO Canary CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE NX RELRO CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO CONTROL_FLOW'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed RELRO CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
(1, executable+': failed separate_code CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed CONTROL_FLOW'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code', '-fcf-protection=full']),
(0, ''))
else:
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE NX RELRO Canary'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fno-stack-protector','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
- (1, executable+': failed PIE RELRO Canary'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-zexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE NX RELRO'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
+ (1, executable+': failed PIE RELRO'))
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-no-pie','-fno-PIE', '-Wl,-z,separate-code']),
(1, executable+': failed PIE RELRO'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-znorelro','-pie','-fPIE', '-Wl,-z,separate-code']),
(1, executable+': failed RELRO'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,noseparate-code']),
(1, executable+': failed separate_code'))
- self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-fstack-protector-all','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
+ self.assertEqual(call_security_check(cc, source, executable, ['-Wl,-znoexecstack','-Wl,-zrelro','-Wl,-z,now','-pie','-fPIE', '-Wl,-z,separate-code']),
(0, ''))
clean_files(source, executable)
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 53569d7f7d..44fbfa1c0b 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -98,7 +98,7 @@ chain for " target " development."))
#:key
(base-gcc-for-libc linux-base-gcc)
(base-kernel-headers base-linux-kernel-headers)
- (base-libc glibc-2.27)
+ (base-libc glibc-2.31)
(base-gcc linux-base-gcc))
"Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values
desirable for building Bitcoin Core release binaries."
@@ -440,24 +440,21 @@ inspecting signatures in Mach-O binaries.")
(("-rpath=") "-rpath-link="))
#t))))))))
-(define-public glibc-2.27
+(define-public glibc-2.31
+ (let ((commit "8e30f03744837a85e33d84ccd34ed3abe30d37c3"))
(package
- (inherit glibc-2.31)
- (version "2.27")
+ (inherit glibc) ;; 2.35
+ (version "2.31")
(source (origin
(method git-fetch)
(uri (git-reference
(url "https://sourceware.org/git/glibc.git")
- (commit "73886db6218e613bd6d4edf529f11e008a6c2fa6")))
- (file-name (git-file-name "glibc" "73886db6218e613bd6d4edf529f11e008a6c2fa6"))
+ (commit commit)))
+ (file-name (git-file-name "glibc" commit))
(sha256
(base32
- "0azpb9cvnbv25zg8019rqz48h8i2257ngyjg566dlnp74ivrs9vq"))
- (patches (search-our-patches "glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch"
- "glibc-2.27-fcommon.patch"
- "glibc-2.27-guix-prefix.patch"
- "glibc-2.27-no-librt.patch"
- "glibc-2.27-powerpc-ldbrx.patch"))))
+ "1zi0s9yy5zkisw823vivn7zlj8w6g9p3mm7lmlqiixcxdkz4dbn6"))
+ (patches (search-our-patches "glibc-guix-prefix.patch"))))
(arguments
(substitute-keyword-arguments (package-arguments glibc)
((#:configure-flags flags)
@@ -473,12 +470,13 @@ inspecting signatures in Mach-O binaries.")
(lambda* (#:key outputs #:allow-other-keys)
;; Install the rpc data base file under `$out/etc/rpc'.
;; Otherwise build will fail with "Permission denied."
+ ;; Can be removed when we are building 2.32 or later.
(let ((out (assoc-ref outputs "out")))
(substitute* "sunrpc/Makefile"
(("^\\$\\(inst_sysconfdir\\)/rpc(.*)$" _ suffix)
(string-append out "/etc/rpc" suffix "\n"))
(("^install-others =.*$")
- (string-append "install-others = " out "/etc/rpc\n"))))))))))))
+ (string-append "install-others = " out "/etc/rpc\n")))))))))))))
(packages->manifest
(append
diff --git a/contrib/guix/patches/glibc-2.27-fcommon.patch b/contrib/guix/patches/glibc-2.27-fcommon.patch
deleted file mode 100644
index f8d14837fc..0000000000
--- a/contrib/guix/patches/glibc-2.27-fcommon.patch
+++ /dev/null
@@ -1,34 +0,0 @@
-commit 264a4a0dbe1f4369db315080034b500bed66016c
-Author: fanquake <fanquake@gmail.com>
-Date: Fri May 6 11:03:04 2022 +0100
-
- build: use -fcommon to retain legacy behaviour with GCC 10
-
- GCC 10 started using -fno-common by default, which causes issues with
- the powerpc builds using gibc 2.27. A patch was committed to glibc to fix
- the issue, 18363b4f010da9ba459b13310b113ac0647c2fcc but is non-trvial
- to backport, and was broken in at least one way, see the followup in
- commit 7650321ce037302bfc2f026aa19e0213b8d02fe6.
-
- For now, retain the legacy GCC behaviour by passing -fcommon when
- building glibc.
-
- https://gcc.gnu.org/onlinedocs/gcc/Code-Gen-Options.html.
- https://sourceware.org/git/?p=glibc.git;a=commit;h=18363b4f010da9ba459b13310b113ac0647c2fcc
- https://sourceware.org/git/?p=glibc.git;a=commit;h=7650321ce037302bfc2f026aa19e0213b8d02fe6
-
- This patch can be dropped when we are building with glibc 2.31+.
-
-diff --git a/Makeconfig b/Makeconfig
-index 86a71e5802..aa2166be60 100644
---- a/Makeconfig
-+++ b/Makeconfig
-@@ -896,7 +896,7 @@ ifeq "$(strip $(+cflags))" ""
- endif # $(+cflags) == ""
-
- +cflags += $(cflags-cpu) $(+gccwarn) $(+merge-constants) $(+math-flags) \
-- $(+stack-protector)
-+ $(+stack-protector) -fcommon
- +gcc-nowarn := -w
-
- # Don't duplicate options if we inherited variables from the parent.
diff --git a/contrib/guix/patches/glibc-2.27-no-librt.patch b/contrib/guix/patches/glibc-2.27-no-librt.patch
deleted file mode 100644
index 4f2092ba7e..0000000000
--- a/contrib/guix/patches/glibc-2.27-no-librt.patch
+++ /dev/null
@@ -1,53 +0,0 @@
-This patch can be dropped when we are building with glibc 2.30+.
-
-commit 6e41ef56c9baab719a02f1377b1e7ce7bff61e73
-Author: Florian Weimer <fweimer@redhat.com>
-Date: Fri Feb 8 10:21:56 2019 +0100
-
- rt: Turn forwards from librt to libc into compat symbols [BZ #24194]
-
- As the result of commit 6e6249d0b461b952d0f544792372663feb6d792a
- ("BZ#14743: Move clock_* symbols from librt to libc."), in glibc 2.17,
- clock_gettime, clock_getres, clock_settime, clock_getcpuclockid,
- clock_nanosleep were added to libc, and the file rt/clock-compat.c
- was added with forwarders to the actual implementations in libc.
- These forwarders were wrapped in
-
- #if SHLIB_COMPAT (librt, GLIBC_2_2, GLIBC_2_17)
-
- so that they are not present for newer architectures (such as
- powerpc64le) with a 2.17 or later ABI baseline. But the forwarders
- were not marked as compatibility symbols. As a result, on older
- architectures, historic configure checks such as
-
- AC_CHECK_LIB(rt, clock_gettime)
-
- still cause linking against librt, even though this is completely
- unnecessary. It also creates a needless porting hazard because
- architectures behave differently when it comes to symbol availability.
-
- Reviewed-by: Carlos O'Donell <carlos@redhat.com>
-
-diff --git a/rt/clock-compat.c b/rt/clock-compat.c
-index f816973c05..11e71aa890 100644
---- a/rt/clock-compat.c
-+++ b/rt/clock-compat.c
-@@ -30,14 +30,16 @@
- #if HAVE_IFUNC
- # undef INIT_ARCH
- # define INIT_ARCH()
--# define COMPAT_REDIRECT(name, proto, arglist) libc_ifunc (name, &__##name)
-+# define COMPAT_REDIRECT(name, proto, arglist) libc_ifunc (name, &__##name) \
-+ compat_symbol (librt, name, name, GLIBC_2_2);
- #else
- # define COMPAT_REDIRECT(name, proto, arglist) \
- int \
- name proto \
- { \
- return __##name arglist; \
-- }
-+ } \
-+ compat_symbol (librt, name, name, GLIBC_2_2);
- #endif
-
- COMPAT_REDIRECT (clock_getres,
diff --git a/contrib/guix/patches/glibc-2.27-powerpc-ldbrx.patch b/contrib/guix/patches/glibc-2.27-powerpc-ldbrx.patch
deleted file mode 100644
index 26716054c8..0000000000
--- a/contrib/guix/patches/glibc-2.27-powerpc-ldbrx.patch
+++ /dev/null
@@ -1,245 +0,0 @@
-From 50b0b3c9ff71ffd7ebbd74ae46844c3566478123 Mon Sep 17 00:00:00 2001
-From: "Gabriel F. T. Gomes" <gabrielftg@linux.ibm.com>
-Date: Mon, 27 May 2019 15:21:22 -0300
-Subject: [PATCH] powerpc: Fix build failures with current GCC
-
-Since GCC commit 271500 (svn), also known as the following commit on the
-git mirror:
-
-commit e154242724b084380e3221df7c08fcdbd8460674
-Author: amodra <amodra@138bc75d-0d04-0410-961f-82ee72b054a4>
-Date: Wed May 22 04:34:26 2019 +0000
-
- [RS6000] Don't pass -many to the assembler
-
-glibc builds are failing when an assembly implementation does not
-declare the correct '.machine' directive, or when no such directive is
-declared at all. For example, when a POWER6 instruction is used, but
-'.machine power6' is not declared, the assembler will fail with an error
-similar to the following:
-
- ../sysdeps/powerpc/powerpc64/power8/strcmp.S: Assembler messages:
- 24 ../sysdeps/powerpc/powerpc64/power8/strcmp.S:55: Error: unrecognized opcode: `cmpb'
-
-This patch adds '.machine powerN' directives where none existed, as well
-as it updates '.machine power7' directives on POWER8 files, because the
-minimum binutils version required to build glibc (binutils 2.25) now
-provides this machine version. It also adds '-many' to the assembler
-command used to build tst-set_ppr.c.
-
-Tested for powerpc, powerpc64, and powerpc64le, as well as with
-build-many-glibcs.py for powerpc targets.
-
-Reviewed-by: Tulio Magno Quites Machado Filho <tuliom@linux.ibm.com>
----
- sysdeps/powerpc/Makefile | 5 +++
- sysdeps/powerpc/powerpc64/power4/memcmp.S | 7 ++++
- sysdeps/powerpc/powerpc64/power7/strncmp.S | 1 +
- .../powerpc/powerpc64/power8/fpu/s_llround.S | 1 +
- sysdeps/powerpc/powerpc64/power8/strcasecmp.S | 36 ++++++-------------
- sysdeps/powerpc/powerpc64/power8/strcasestr.S | 14 ++------
- sysdeps/powerpc/powerpc64/power8/strcmp.S | 1 +
- 7 files changed, 28 insertions(+), 37 deletions(-)
-
-diff --git a/sysdeps/powerpc/Makefile b/sysdeps/powerpc/Makefile
-index 6aa683b03f..23126147df 100644
---- a/sysdeps/powerpc/Makefile
-+++ b/sysdeps/powerpc/Makefile
-@@ -45,6 +45,11 @@ ifeq ($(subdir),misc)
- sysdep_headers += sys/platform/ppc.h
- tests += test-gettimebase
- tests += tst-set_ppr
-+
-+# This test is expected to run and exit with EXIT_UNSUPPORTED on
-+# processors that do not implement the Power ISA 2.06 or greater.
-+# But the test makes use of instructions from Power ISA 2.06 and 2.07.
-+CFLAGS-tst-set_ppr.c += -Wa,-many
- endif
-
- ifneq (,$(filter %le,$(config-machine)))
-diff --git a/sysdeps/powerpc/powerpc64/power4/memcmp.S b/sysdeps/powerpc/powerpc64/power4/memcmp.S
-index e5319f101f..38dcf4c9a1 100644
---- a/sysdeps/powerpc/powerpc64/power4/memcmp.S
-+++ b/sysdeps/powerpc/powerpc64/power4/memcmp.S
-@@ -26,7 +26,14 @@
- # define MEMCMP memcmp
- #endif
-
-+#ifndef __LITTLE_ENDIAN__
- .machine power4
-+#else
-+/* Little endian is only available since POWER8, so it's safe to
-+ specify .machine as power8 (or older), even though this is a POWER4
-+ file. Since the little-endian code uses 'ldbrx', power7 is enough. */
-+ .machine power7
-+#endif
- ENTRY_TOCLESS (MEMCMP, 4)
- CALL_MCOUNT 3
-
-diff --git a/sysdeps/powerpc/powerpc64/power7/strncmp.S b/sysdeps/powerpc/powerpc64/power7/strncmp.S
-index 0c7429d19f..10f898c5a3 100644
---- a/sysdeps/powerpc/powerpc64/power7/strncmp.S
-+++ b/sysdeps/powerpc/powerpc64/power7/strncmp.S
-@@ -28,6 +28,7 @@
- const char *s2 [r4],
- size_t size [r5]) */
-
-+ .machine power7
- ENTRY_TOCLESS (STRNCMP, 5)
- CALL_MCOUNT 3
-
-diff --git a/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S b/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
-index a22fc63bb3..84c76ba0f9 100644
---- a/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
-+++ b/sysdeps/powerpc/powerpc64/power8/fpu/s_llround.S
-@@ -26,6 +26,7 @@
-
- /* long long [r3] llround (float x [fp1]) */
-
-+ .machine power8
- ENTRY_TOCLESS (__llround)
- CALL_MCOUNT 0
- frin fp1,fp1 /* Round to nearest +-0.5. */
-diff --git a/sysdeps/powerpc/powerpc64/power8/strcasecmp.S b/sysdeps/powerpc/powerpc64/power8/strcasecmp.S
-index 3a2efe2a64..eeacd40c7f 100644
---- a/sysdeps/powerpc/powerpc64/power8/strcasecmp.S
-+++ b/sysdeps/powerpc/powerpc64/power8/strcasecmp.S
-@@ -91,21 +91,7 @@
- 3: \
- TOLOWER()
-
--#ifdef _ARCH_PWR8
--# define VCLZD_V8_v7 vclzd v8, v7;
--# define MFVRD_R3_V1 mfvrd r3, v1;
--# define VSUBUDM_V9_V8 vsubudm v9, v9, v8;
--# define VPOPCNTD_V8_V8 vpopcntd v8, v8;
--# define VADDUQM_V7_V8 vadduqm v9, v7, v8;
--#else
--# define VCLZD_V8_v7 .long 0x11003fc2
--# define MFVRD_R3_V1 .long 0x7c230067
--# define VSUBUDM_V9_V8 .long 0x112944c0
--# define VPOPCNTD_V8_V8 .long 0x110047c3
--# define VADDUQM_V7_V8 .long 0x11274100
--#endif
--
-- .machine power7
-+ .machine power8
-
- ENTRY (__STRCASECMP)
- #ifdef USE_AS_STRNCASECMP
-@@ -265,15 +251,15 @@ L(different):
- #ifdef __LITTLE_ENDIAN__
- /* Count trailing zero. */
- vspltisb v8, -1
-- VADDUQM_V7_V8
-+ vadduqm v9, v7, v8
- vandc v8, v9, v7
-- VPOPCNTD_V8_V8
-+ vpopcntd v8, v8
- vspltb v6, v8, 15
- vcmpequb. v6, v6, v1
- blt cr6, L(shift8)
- #else
- /* Count leading zero. */
-- VCLZD_V8_v7
-+ vclzd v8, v7
- vspltb v6, v8, 7
- vcmpequb. v6, v6, v1
- blt cr6, L(shift8)
-@@ -291,7 +277,7 @@ L(skipsum):
- /* Merge and move to GPR. */
- vmrglb v6, v6, v7
- vslo v1, v6, v1
-- MFVRD_R3_V1
-+ mfvrd r3, v1
- /* Place the characters that are different in first position. */
- sldi rSTR2, rRTN, 56
- srdi rSTR2, rSTR2, 56
-@@ -301,7 +287,7 @@ L(skipsum):
- vslo v6, v5, v8
- vslo v7, v4, v8
- vmrghb v1, v6, v7
-- MFVRD_R3_V1
-+ mfvrd r3, v1
- srdi rSTR2, rRTN, 48
- sldi rSTR2, rSTR2, 56
- srdi rSTR2, rSTR2, 56
-@@ -320,15 +306,15 @@ L(null_found):
- #ifdef __LITTLE_ENDIAN__
- /* Count trailing zero. */
- vspltisb v8, -1
-- VADDUQM_V7_V8
-+ vadduqm v9, v7, v8
- vandc v8, v9, v7
-- VPOPCNTD_V8_V8
-+ vpopcntd v8, v8
- vspltb v6, v8, 15
- vcmpequb. v6, v6, v10
- blt cr6, L(shift_8)
- #else
- /* Count leading zero. */
-- VCLZD_V8_v7
-+ vclzd v8, v7
- vspltb v6, v8, 7
- vcmpequb. v6, v6, v10
- blt cr6, L(shift_8)
-@@ -343,10 +329,10 @@ L(skipsum1):
- vspltisb v10, 7
- vslb v10, v10, v10
- vsldoi v9, v0, v10, 1
-- VSUBUDM_V9_V8
-+ vsubudm v9, v9, v8
- vspltisb v8, 8
- vsldoi v8, v0, v8, 1
-- VSUBUDM_V9_V8
-+ vsubudm v9, v9, v8
- /* Shift and remove junk after null character. */
- #ifdef __LITTLE_ENDIAN__
- vslo v5, v5, v9
-diff --git a/sysdeps/powerpc/powerpc64/power8/strcasestr.S b/sysdeps/powerpc/powerpc64/power8/strcasestr.S
-index 9fc24c29f9..e10f06fd86 100644
---- a/sysdeps/powerpc/powerpc64/power8/strcasestr.S
-+++ b/sysdeps/powerpc/powerpc64/power8/strcasestr.S
-@@ -73,18 +73,8 @@
- vor reg, v8, reg; \
- vcmpequb. v6, reg, v4;
-
--/* TODO: change these to the actual instructions when the minimum required
-- binutils allows it. */
--#ifdef _ARCH_PWR8
--#define VCLZD_V8_v7 vclzd v8, v7;
--#else
--#define VCLZD_V8_v7 .long 0x11003fc2
--#endif
--
- #define FRAMESIZE (FRAME_MIN_SIZE+48)
--/* TODO: change this to .machine power8 when the minimum required binutils
-- allows it. */
-- .machine power7
-+ .machine power8
- ENTRY (STRCASESTR, 4)
- CALL_MCOUNT 2
- mflr r0 /* Load link register LR to r0. */
-@@ -291,7 +281,7 @@ L(nullchk1):
- vcmpequb. v6, v0, v7
- /* Shift r3 by 16 bytes and proceed. */
- blt cr6, L(shift16)
-- VCLZD_V8_v7
-+ vclzd v8, v7
- #ifdef __LITTLE_ENDIAN__
- vspltb v6, v8, 15
- #else
-diff --git a/sysdeps/powerpc/powerpc64/power8/strcmp.S b/sysdeps/powerpc/powerpc64/power8/strcmp.S
-index 15e7351d1b..d592266d1d 100644
---- a/sysdeps/powerpc/powerpc64/power8/strcmp.S
-+++ b/sysdeps/powerpc/powerpc64/power8/strcmp.S
-@@ -31,6 +31,7 @@
- 64K as default, the page cross handling assumes minimum page size of
- 4k. */
-
-+ .machine power8
- ENTRY_TOCLESS (STRCMP, 4)
- li r0,0
-
---
-2.41.0
diff --git a/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch b/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch
deleted file mode 100644
index ab8ae9c023..0000000000
--- a/contrib/guix/patches/glibc-2.27-riscv64-Use-__has_include-to-include-asm-syscalls.h.patch
+++ /dev/null
@@ -1,78 +0,0 @@
-Note that this has been modified from the original commit, to use __has_include
-instead of __has_include__, as the later was causing build failures with GCC 10.
-See also: http://lists.busybox.net/pipermail/buildroot/2020-July/590376.html.
-
-https://sourceware.org/git/?p=glibc.git;a=commit;h=0b9c84906f653978fb8768c7ebd0ee14a47e662e
-
-This patch can be dropped when we are building with glibc 2.28+.
-
-From 562c52cc81a4e456a62e6455feb32732049e9070 Mon Sep 17 00:00:00 2001
-From: "H.J. Lu" <hjl.tools@gmail.com>
-Date: Mon, 31 Dec 2018 09:26:42 -0800
-Subject: [PATCH] riscv: Use __has_include__ to include <asm/syscalls.h> [BZ
- #24022]
-
-<asm/syscalls.h> has been removed by
-
-commit 27f8899d6002e11a6e2d995e29b8deab5aa9cc25
-Author: David Abdurachmanov <david.abdurachmanov@gmail.com>
-Date: Thu Nov 8 20:02:39 2018 +0100
-
- riscv: add asm/unistd.h UAPI header
-
- Marcin Juszkiewicz reported issues while generating syscall table for riscv
- using 4.20-rc1. The patch refactors our unistd.h files to match some other
- architectures.
-
- - Add asm/unistd.h UAPI header, which has __ARCH_WANT_NEW_STAT only for 64-bit
- - Remove asm/syscalls.h UAPI header and merge to asm/unistd.h
- - Adjust kernel asm/unistd.h
-
- So now asm/unistd.h UAPI header should show all syscalls for riscv.
-
-<asm/syscalls.h> may be restored by
-
-Subject: [PATCH] riscv: restore asm/syscalls.h UAPI header
-Date: Tue, 11 Dec 2018 09:09:35 +0100
-
-UAPI header asm/syscalls.h was merged into UAPI asm/unistd.h header,
-which did resolve issue with missing syscalls macros resulting in
-glibc (2.28) build failure. It also broke glibc in a different way:
-asm/syscalls.h is being used by glibc. I noticed this while doing
-Fedora 30/Rawhide mass rebuild.
-
-The patch returns asm/syscalls.h header and incl. it into asm/unistd.h.
-I plan to send a patch to glibc to use asm/unistd.h instead of
-asm/syscalls.h
-
-In the meantime, we use __has_include__, which was added to GCC 5, to
-check if <asm/syscalls.h> exists before including it. Tested with
-build-many-glibcs.py for riscv against kernel 4.19.12 and 4.20-rc7.
-
- [BZ #24022]
- * sysdeps/unix/sysv/linux/riscv/flush-icache.c: Check if
- <asm/syscalls.h> exists with __has_include__ before including it.
----
- sysdeps/unix/sysv/linux/riscv/flush-icache.c | 6 +++++-
- 1 file changed, 5 insertions(+), 1 deletion(-)
-
-diff --git a/sysdeps/unix/sysv/linux/riscv/flush-icache.c b/sysdeps/unix/sysv/linux/riscv/flush-icache.c
-index d612ef4c6c..0b2042620b 100644
---- a/sysdeps/unix/sysv/linux/riscv/flush-icache.c
-+++ b/sysdeps/unix/sysv/linux/riscv/flush-icache.c
-@@ -21,7 +21,11 @@
- #include <stdlib.h>
- #include <atomic.h>
- #include <sys/cachectl.h>
--#include <asm/syscalls.h>
-+#if __has_include (<asm/syscalls.h>)
-+# include <asm/syscalls.h>
-+#else
-+# include <asm/unistd.h>
-+#endif
-
- typedef int (*func_type) (void *, void *, unsigned long int);
-
---
-2.31.1
-
diff --git a/contrib/guix/patches/glibc-2.27-guix-prefix.patch b/contrib/guix/patches/glibc-guix-prefix.patch
index dc515907ff..60e12ca525 100644
--- a/contrib/guix/patches/glibc-2.27-guix-prefix.patch
+++ b/contrib/guix/patches/glibc-guix-prefix.patch
@@ -4,19 +4,13 @@ hash for the same package will differ when on different architectures.
In order to be reproducible regardless of the architecture used to build
the package, map all guix store prefixes to something fixed, e.g. /usr.
-We might be able to drop this in favour of using --with-nonshared-cflags
-when we begin using newer versions of glibc.
-
--- a/Makeconfig
+++ b/Makeconfig
-@@ -992,6 +992,10 @@ object-suffixes :=
+@@ -1007,6 +1007,7 @@ object-suffixes :=
CPPFLAGS-.o = $(pic-default)
# libc.a must be compiled with -fPIE/-fpie for static PIE.
CFLAGS-.o = $(filter %frame-pointer,$(+cflags)) $(pie-default)
-+
-+# Map Guix store paths to /usr
+CFLAGS-.o += `find /gnu/store -maxdepth 1 -mindepth 1 -type d -exec echo -n " -ffile-prefix-map={}=/usr" \;`
-+
libtype.o := lib%.a
object-suffixes += .o
ifeq (yes,$(build-shared))
diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md
index 78f61685e1..d47ee6774e 100644
--- a/contrib/macdeploy/README.md
+++ b/contrib/macdeploy/README.md
@@ -56,30 +56,22 @@ The `sha256sum` should be `c0c2e7bb92c1fee0c4e9f3a485e4530786732d6c6dd9e9f418c28
## Deterministic macOS App Notes
-macOS Applications are created in Linux using a recent LLVM.
+macOS Applications are created on Linux using a recent LLVM.
-Apple uses `clang` extensively for development and has upstreamed the necessary
-functionality so that a vanilla clang can take advantage. It supports the use of `-F`,
-`-target`, `-mmacosx-version-min`, and `-isysroot`, which are all necessary when
-building for macOS.
+All builds must target an Apple SDK. These SDKs are free to download, but not redistributable.
+See the SDK Extraction notes above for how to obtain it.
-To complicate things further, all builds must target an Apple SDK. These SDKs are free to
-download, but not redistributable. See the SDK Extraction notes above for how to obtain it.
+The Guix build process has been designed to avoid including the SDK's files in Guix's outputs.
+All interim tarballs are fully deterministic and may be freely redistributed.
-The Guix process builds 2 sets of files: Linux tools, then Apple binaries which are
-created using these tools. The build process has been designed to avoid including the
-SDK's files in Guix's outputs. All interim tarballs are fully deterministic and may be freely
-redistributed.
-
-As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a requirement in
-order to satisfy the new Gatekeeper requirements. Because this private key cannot be
-shared, we'll have to be a bit creative in order for the build process to remain somewhat
-deterministic. Here's how it works:
+Using an Apple-blessed key to sign binaries is a requirement to produce (distributable) macOS
+binaries. Because this private key cannot be shared, we'll have to be a bit creative in order
+for the build process to remain somewhat deterministic. Here's how it works:
- Builders use Guix to create an unsigned release. This outputs an unsigned ZIP which
- users may choose to bless and run. It also outputs an unsigned app structure in the form
- of a tarball.
+ users may choose to bless, self-codesign, and run. It also outputs an unsigned app structure
+ in the form of a tarball.
- The Apple keyholder uses this unsigned app to create a detached signature, using the
- script that is also included there. Detached signatures are available from this [repository](https://github.com/bitcoin-core/bitcoin-detached-sigs).
-- Builders feed the unsigned app + detached signature back into Guix. It uses the
- pre-built tools to recombine the pieces into a deterministic ZIP.
+ included script. Detached signatures are available from this [repository](https://github.com/bitcoin-core/bitcoin-detached-sigs).
+- Builders feed the unsigned app + detached signature back into Guix, which combines the
+ pieces into a deterministic ZIP.
diff --git a/contrib/macdeploy/gen-sdk b/contrib/macdeploy/gen-sdk
index b73f5cba14..86a6262b5c 100755
--- a/contrib/macdeploy/gen-sdk
+++ b/contrib/macdeploy/gen-sdk
@@ -8,21 +8,6 @@ import gzip
import os
import contextlib
-# monkey-patch Python 3.8 and older to fix wrong TAR header handling
-# see https://github.com/bitcoin/bitcoin/pull/24534
-# and https://github.com/python/cpython/pull/18080 for more info
-if sys.version_info < (3, 9):
- _old_create_header = tarfile.TarInfo._create_header
- def _create_header(info, format, encoding, errors):
- buf = _old_create_header(info, format, encoding, errors)
- # replace devmajor/devminor with binary zeroes
- buf = buf[:329] + bytes(16) + buf[345:]
- # recompute checksum
- chksum = tarfile.calc_chksums(buf)[0]
- buf = buf[:-364] + bytes("%06o\0" % chksum, "ascii") + buf[-357:]
- return buf
- tarfile.TarInfo._create_header = staticmethod(_create_header)
-
@contextlib.contextmanager
def cd(path):
"""Context manager that restores PWD even if an exception was raised."""
diff --git a/contrib/verify-binaries/README.md b/contrib/verify-binaries/README.md
index 04d683e69b..0f3e16a5bc 100644
--- a/contrib/verify-binaries/README.md
+++ b/contrib/verify-binaries/README.md
@@ -50,6 +50,7 @@ Get JSON output and don't prompt for user input (no auto key import):
```sh
./contrib/verify-binaries/verify.py --json pub 22.0-x86
+./contrib/verify-binaries/verify.py --json pub 23.0-rc5-linux-gnu
```
Rely only on local GPG state and manually specified keys, while requiring a
@@ -57,14 +58,15 @@ threshold of at least 10 trusted signatures:
```sh
./contrib/verify-binaries/verify.py \
--trusted-keys 74E2DEF5D77260B98BC19438099BAD163C70FBFA,9D3CC86A72F8494342EA5FD10A41BDC3F4FAFF1C \
- --min-good-sigs 10 pub 22.0-x86
+ --min-good-sigs 10 pub 22.0-linux
```
-If you only want to download the binaries for a certain platform, add the corresponding suffix, e.g.:
+If you only want to download the binaries for a certain architecture and/or platform, add the corresponding suffix, e.g.:
```sh
-./contrib/verify-binaries/verify.py pub 24.0.1-darwin
-./contrib/verify-binaries/verify.py pub 23.1-rc1-win64
+./contrib/verify-binaries/verify.py pub 25.2-x86_64-linux
+./contrib/verify-binaries/verify.py pub 24.1-rc1-darwin
+./contrib/verify-binaries/verify.py pub 27.0-win64-setup.exe
```
If you do not want to keep the downloaded binaries, specify the cleanup option.
diff --git a/contrib/verify-binaries/test.py b/contrib/verify-binaries/test.py
index 22d718ece3..875606ec22 100755
--- a/contrib/verify-binaries/test.py
+++ b/contrib/verify-binaries/test.py
@@ -12,6 +12,21 @@ def main():
expect_code(run_verify("", "pub", '0.32.awefa.12f9h'), 11, "Malformed version should fail")
expect_code(run_verify('--min-good-sigs 20', "pub", "22.0"), 9, "--min-good-sigs 20 should fail")
+ print("- testing verification (22.0-x86_64-linux-gnu.tar.gz)", flush=True)
+ _220_x86_64_linux_gnu = run_verify("--json", "pub", "22.0-x86_64-linux-gnu.tar.gz")
+ try:
+ result = json.loads(_220_x86_64_linux_gnu.stdout.decode())
+ except Exception:
+ print("failed on 22.0-x86_64-linux-gnu.tar.gz --json:")
+ print_process_failure(_220_x86_64_linux_gnu)
+ raise
+
+ expect_code(_220_x86_64_linux_gnu, 0, "22.0-x86_64-linux-gnu.tar.gz should succeed")
+ v = result['verified_binaries']
+ assert result['good_trusted_sigs']
+ assert len(v) == 1
+ assert v['bitcoin-22.0-x86_64-linux-gnu.tar.gz'] == '59ebd25dd82a51638b7a6bb914586201e67db67b919b2a1ff08925a7936d1b16'
+
print("- testing verification (22.0)", flush=True)
_220 = run_verify("--json", "pub", "22.0")
try:
diff --git a/contrib/verify-binaries/verify.py b/contrib/verify-binaries/verify.py
index 12e6e10d8a..6c07b36c9d 100755
--- a/contrib/verify-binaries/verify.py
+++ b/contrib/verify-binaries/verify.py
@@ -97,23 +97,17 @@ def bool_from_env(key, default=False) -> bool:
VERSION_FORMAT = "<major>.<minor>[.<patch>][-rc[0-9]][-platform]"
-VERSION_EXAMPLE = "22.0-x86_64 or 23.1-rc1-darwin"
+VERSION_EXAMPLE = "22.0 or 23.1-rc1-darwin.dmg or 27.0-x86_64-linux-gnu"
def parse_version_string(version_str):
- parts = version_str.split('-')
- version_base = parts[0]
- version_rc = ""
- version_os = ""
- if len(parts) == 2: # "<version>-rcN" or "version-platform"
- if "rc" in parts[1]:
- version_rc = parts[1]
- else:
- version_os = parts[1]
- elif len(parts) == 3: # "<version>-rcN-platform"
- version_rc = parts[1]
- version_os = parts[2]
+ # "<version>[-rcN][-platform]"
+ version_base, _, platform = version_str.partition('-')
+ rc = ""
+ if platform.startswith("rc"): # "<version>-rcN[-platform]"
+ rc, _, platform = platform.partition('-')
+ # else "<version>" or "<version>-platform"
- return version_base, version_rc, version_os
+ return version_base, rc, platform
def download_with_wget(remote_file, local_file):
@@ -514,7 +508,9 @@ def verify_published_handler(args: argparse.Namespace) -> ReturnCode:
# Extract hashes and filenames
hashes_to_verify = parse_sums_file(SUMS_FILENAME, [os_filter])
if not hashes_to_verify:
- log.error("no files matched the platform specified")
+ available_versions = ["-".join(line[1].split("-")[2:]) for line in parse_sums_file(SUMS_FILENAME, [])]
+ closest_match = difflib.get_close_matches(os_filter, available_versions, cutoff=0, n=1)[0]
+ log.error(f"No files matched the platform specified. Did you mean: {closest_match}")
return ReturnCode.NO_BINARIES_MATCH
# remove binaries that are known not to be hosted by bitcoincore.org
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index f2e9abdf37..564381d1e9 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -65,8 +65,8 @@ darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \
-iwithsysroot/usr/include/c++/v1 \
-iwithsysroot/usr/include -iframeworkwithsysroot/System/Library/Frameworks
-darwin_CFLAGS=-pipe -std=$(C_STANDARD) -mmacosx-version-min=$(OSX_MIN_VERSION)
-darwin_CXXFLAGS=-pipe -std=$(CXX_STANDARD) -mmacosx-version-min=$(OSX_MIN_VERSION)
+darwin_CFLAGS=-pipe -std=$(C_STANDARD) -mmacos-version-min=$(OSX_MIN_VERSION)
+darwin_CXXFLAGS=-pipe -std=$(CXX_STANDARD) -mmacos-version-min=$(OSX_MIN_VERSION)
darwin_LDFLAGS=-Wl,-platform_version,macos,$(OSX_MIN_VERSION),$(OSX_SDK_VERSION)
ifneq ($(build_os),darwin)
diff --git a/doc/dependencies.md b/doc/dependencies.md
index fc574b6164..63c505c9cb 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -19,7 +19,7 @@ You can find installation instructions in the `build-*.md` file for your platfor
| --- | --- | --- | --- | --- |
| [Boost](../depends/packages/boost.mk) | [link](https://www.boost.org/users/download/) | [1.81.0](https://github.com/bitcoin/bitcoin/pull/26557) | [1.73.0](https://github.com/bitcoin/bitcoin/pull/29066) | No |
| [libevent](../depends/packages/libevent.mk) | [link](https://github.com/libevent/libevent/releases) | [2.1.12-stable](https://github.com/bitcoin/bitcoin/pull/21991) | [2.1.8](https://github.com/bitcoin/bitcoin/pull/24681) | No |
-| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.27](https://github.com/bitcoin/bitcoin/pull/27029) | Yes |
+| glibc | [link](https://www.gnu.org/software/libc/) | N/A | [2.31](https://github.com/bitcoin/bitcoin/pull/29987) | Yes |
| Linux Kernel | [link](https://www.kernel.org/) | N/A | [3.17.0](https://github.com/bitcoin/bitcoin/pull/27699) | Yes |
## Optional
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index eb2bb41aa4..d9d5b392c5 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -1457,8 +1457,9 @@ independent (node, wallet, GUI), are defined in
there are [`interfaces::Chain`](../src/interfaces/chain.h), used by wallet to
access the node's latest chain state,
[`interfaces::Node`](../src/interfaces/node.h), used by the GUI to control the
-node, and [`interfaces::Wallet`](../src/interfaces/wallet.h), used by the GUI
-to control an individual wallet. There are also more specialized interface
+node, [`interfaces::Wallet`](../src/interfaces/wallet.h), used by the GUI
+to control an individual wallet and [`interfaces::Mining`](../src/interfaces/mining.h),
+used by RPC to generate block templates. There are also more specialized interface
types like [`interfaces::Handler`](../src/interfaces/handler.h)
[`interfaces::ChainClient`](../src/interfaces/chain.h) passed to and from
various interface methods.
diff --git a/doc/init.md b/doc/init.md
index 64ab971557..b570fed22c 100644
--- a/doc/init.md
+++ b/doc/init.md
@@ -35,8 +35,10 @@ it will use a special cookie file for authentication. The cookie is generated wi
content when the daemon starts, and deleted when it exits. Read access to this file
controls who can access it through RPC.
-By default the cookie is stored in the data directory, but it's location can be overridden
-with the option '-rpccookiefile'.
+By default the cookie is stored in the data directory, but its location can be
+overridden with the option `-rpccookiefile`. Default file permissions for the
+cookie are "owner" (i.e. user read/writeable) via default application-wide file
+umask of `0077`, but these can be overridden with the `-rpccookieperms` option.
This allows for running bitcoind without having to do any manual configuration.
diff --git a/doc/policy/packages.md b/doc/policy/packages.md
index 7e983221c5..a220bdd17f 100644
--- a/doc/policy/packages.md
+++ b/doc/policy/packages.md
@@ -36,10 +36,29 @@ The following rules are enforced for all packages:
* Packages cannot have conflicting transactions, i.e. no two transactions in a package can spend
the same inputs. Packages cannot have duplicate transactions. (#20833)
-* No transaction in a package can conflict with a mempool transaction. Replace By Fee is
- currently disabled for packages. (#20833)
+* Only limited package replacements are currently considered. (#28984)
- - Package RBF may be enabled in the future.
+ - All direct conflicts must signal replacement (or have `-mempoolfullrbf=1` set).
+
+ - Packages are 1-parent-1-child, with no in-mempool ancestors of the package.
+
+ - All conflicting clusters(connected components of mempool transactions) must be clusters of up to size 2.
+
+ - No more than MAX_REPLACEMENT_CANDIDATES transactions can be replaced, analogous to
+ regular [replacement rule](./mempool-replacements.md) 5).
+
+ - Replacements must pay more total total fees at the incremental relay fee (analogous to
+ regular [replacement rules](./mempool-replacements.md) 3 and 4).
+
+ - Parent feerate must be lower than package feerate.
+
+ - Must improve [feerate diagram](https://delvingbitcoin.org/t/mempool-incentive-compatibility/553). (#29242)
+
+ - *Rationale*: Basic support for package RBF can be used by wallets
+ by making chains of no longer than two, then directly conflicting
+ those chains when needed. Combined with V3 transactions this can
+ result in more robust fee bumping. More general package RBF may be
+ enabled in the future.
* When packages are evaluated against ancestor/descendant limits, the union of all transactions'
descendants and ancestors is considered. (#21800)
diff --git a/doc/release-28984.md b/doc/release-28984.md
new file mode 100644
index 0000000000..3da64f6578
--- /dev/null
+++ b/doc/release-28984.md
@@ -0,0 +1,6 @@
+P2P and network changes
+-----------------------
+
+- Limited package RBF is now enabled, where the proposed conflicting package would result in
+ a connected component, aka cluster, of size 2 in the mempool. All clusters being conflicted
+ against must be of size 2 or lower.
diff --git a/doc/release-notes-29987.md b/doc/release-notes-29987.md
new file mode 100644
index 0000000000..4d4c2358d1
--- /dev/null
+++ b/doc/release-notes-29987.md
@@ -0,0 +1,6 @@
+Compatibility
+=============
+
+The minimum required glibc to run Bitcoin Core is now
+2.31. This means that RHEL 8 and Ubuntu 18.04 (Bionic)
+are no-longer supported. (#29987) \ No newline at end of file
diff --git a/doc/release-notes-30058.md b/doc/release-notes-30058.md
new file mode 100644
index 0000000000..47e7ae704e
--- /dev/null
+++ b/doc/release-notes-30058.md
@@ -0,0 +1,7 @@
+- When running with -alertnotify, an alert can now be raised multiple
+times instead of just once. Previously, it was only raised when unknown
+new consensus rules were activated, whereas the scope has now been
+increased to include all kernel warnings. Specifically, alerts will now
+also be raised when an invalid chain with a large amount of work has
+been detected. Additional warnings may be added in the future.
+(#30058)
diff --git a/src/Makefile.am b/src/Makefile.am
index 2562c4cc65..0c47a737d0 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -177,6 +177,7 @@ BITCOIN_CORE_H = \
interfaces/handler.h \
interfaces/init.h \
interfaces/ipc.h \
+ interfaces/mining.h \
interfaces/node.h \
interfaces/wallet.h \
kernel/blockmanager_opts.h \
@@ -196,6 +197,7 @@ BITCOIN_CORE_H = \
kernel/messagestartchars.h \
kernel/notifications_interface.h \
kernel/validation_cache_sizes.h \
+ kernel/warning.h \
key.h \
key_io.h \
logging.h \
@@ -239,6 +241,7 @@ BITCOIN_CORE_H = \
node/types.h \
node/utxo_snapshot.h \
node/validation_cache_args.h \
+ node/warnings.h \
noui.h \
outputtype.h \
policy/v3_policy.h \
@@ -367,7 +370,6 @@ BITCOIN_CORE_H = \
wallet/wallettool.h \
wallet/walletutil.h \
walletinitinterface.h \
- warnings.h \
zmq/zmqabstractnotifier.h \
zmq/zmqnotificationinterface.h \
zmq/zmqpublishnotifier.h \
@@ -444,6 +446,7 @@ libbitcoin_node_a_SOURCES = \
node/txreconciliation.cpp \
node/utxo_snapshot.cpp \
node/validation_cache_args.cpp \
+ node/warnings.cpp \
noui.cpp \
policy/v3_policy.cpp \
policy/fees.cpp \
@@ -721,7 +724,6 @@ libbitcoin_common_a_SOURCES = \
script/sign.cpp \
script/signingprovider.cpp \
script/solver.cpp \
- warnings.cpp \
$(BITCOIN_CORE_H)
#
@@ -996,8 +998,7 @@ libbitcoinkernel_la_SOURCES = \
util/tokenpipe.cpp \
validation.cpp \
validationinterface.cpp \
- versionbits.cpp \
- warnings.cpp
+ versionbits.cpp
# Required for obj/build.h to be generated first.
# More details: https://www.gnu.org/software/automake/manual/html_node/Built-Sources-Example.html
@@ -1088,7 +1089,7 @@ libbitcoin_ipc_a_SOURCES = \
ipc/process.cpp \
ipc/process.h \
ipc/protocol.h
-libbitcoin_ipc_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
+libbitcoin_ipc_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(BOOST_CPPFLAGS)
libbitcoin_ipc_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) $(LIBMULTIPROCESS_CFLAGS)
include $(MPGEN_PREFIX)/include/mpgen.mk
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index fa7cadb726..633d0776f5 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -118,6 +118,7 @@ BITCOIN_TESTS =\
test/net_peer_eviction_tests.cpp \
test/net_tests.cpp \
test/netbase_tests.cpp \
+ test/node_warnings_tests.cpp \
test/orphanage_tests.cpp \
test/peerman_tests.cpp \
test/pmt_tests.cpp \
diff --git a/src/banman.h b/src/banman.h
index c6df7ec3c3..57ba2ac23c 100644
--- a/src/banman.h
+++ b/src/banman.h
@@ -34,7 +34,7 @@ class CSubNet;
// disk on shutdown and reloaded on startup. Banning can be used to
// prevent connections with spy nodes or other griefers.
//
-// 2. Discouragement. If a peer misbehaves enough (see Misbehaving() in
+// 2. Discouragement. If a peer misbehaves (see Misbehaving() in
// net_processing.cpp), we'll mark that address as discouraged. We still allow
// incoming connections from them, but they're preferred for eviction when
// we receive new incoming connections. We never make outgoing connections to
diff --git a/src/bitcoin-chainstate.cpp b/src/bitcoin-chainstate.cpp
index ca4434a882..ecbdcd48bb 100644
--- a/src/bitcoin-chainstate.cpp
+++ b/src/bitcoin-chainstate.cpp
@@ -16,6 +16,7 @@
#include <kernel/checks.h>
#include <kernel/context.h>
#include <kernel/validation_cache_sizes.h>
+#include <kernel/warning.h>
#include <consensus/validation.h>
#include <core_io.h>
@@ -28,6 +29,7 @@
#include <util/fs.h>
#include <util/signalinterrupt.h>
#include <util/task_runner.h>
+#include <util/translation.h>
#include <validation.h>
#include <validationinterface.h>
@@ -86,9 +88,13 @@ int main(int argc, char* argv[])
{
std::cout << "Progress: " << title.original << ", " << progress_percent << ", " << resume_possible << std::endl;
}
- void warning(const bilingual_str& warning) override
+ void warningSet(kernel::Warning id, const bilingual_str& message) override
{
- std::cout << "Warning: " << warning.original << std::endl;
+ std::cout << "Warning " << static_cast<int>(id) << " set: " << message.original << std::endl;
+ }
+ void warningUnset(kernel::Warning id) override
+ {
+ std::cout << "Warning " << static_cast<int>(id) << " unset" << std::endl;
}
void flushError(const bilingual_str& message) override
{
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 0b89aa42af..a09bb5c9da 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -17,6 +17,7 @@
#include <kernel/context.h>
#include <node/context.h>
#include <node/interface_ui.h>
+#include <node/warnings.h>
#include <noui.h>
#include <util/check.h>
#include <util/exception.h>
@@ -181,6 +182,8 @@ static bool AppInit(NodeContext& node)
return false;
}
+ node.warnings = std::make_unique<node::Warnings>();
+
node.kernel = std::make_unique<kernel::Context>();
node.ecc_context = std::make_unique<ECC_Context>();
if (!AppInitSanityChecks(*node.kernel))
diff --git a/src/coins.cpp b/src/coins.cpp
index b62653e0de..a4e4d4ad32 100644
--- a/src/coins.cpp
+++ b/src/coins.cpp
@@ -361,7 +361,7 @@ static bool ExecuteBackedWrapper(Func func, const std::vector<std::function<void
for (const auto& f : err_callbacks) {
f();
}
- LogPrintf("Error reading from database: %s\n", e.what());
+ LogError("Error reading from database: %s\n", e.what());
// Starting the shutdown sequence and returning false to the caller would be
// interpreted as 'entry not found' (as opposed to unable to read data), and
// could lead to invalid interpretation. Just exit immediately, as we can't
diff --git a/src/httprpc.cpp b/src/httprpc.cpp
index 128597157d..af809eaf38 100644
--- a/src/httprpc.cpp
+++ b/src/httprpc.cpp
@@ -11,6 +11,8 @@
#include <netaddress.h>
#include <rpc/protocol.h>
#include <rpc/server.h>
+#include <util/fs.h>
+#include <util/fs_helpers.h>
#include <util/strencodings.h>
#include <util/string.h>
#include <walletinitinterface.h>
@@ -19,6 +21,7 @@
#include <iterator>
#include <map>
#include <memory>
+#include <optional>
#include <set>
#include <string>
#include <vector>
@@ -291,8 +294,20 @@ static bool InitRPCAuthentication()
{
if (gArgs.GetArg("-rpcpassword", "") == "")
{
- LogPrintf("Using random cookie authentication.\n");
- if (!GenerateAuthCookie(&strRPCUserColonPass)) {
+ LogInfo("Using random cookie authentication.\n");
+
+ std::optional<fs::perms> cookie_perms{std::nullopt};
+ auto cookie_perms_arg{gArgs.GetArg("-rpccookieperms")};
+ if (cookie_perms_arg) {
+ auto perm_opt = InterpretPermString(*cookie_perms_arg);
+ if (!perm_opt) {
+ LogInfo("Invalid -rpccookieperms=%s; must be one of 'owner', 'group', or 'all'.\n", *cookie_perms_arg);
+ return false;
+ }
+ cookie_perms = *perm_opt;
+ }
+
+ if (!GenerateAuthCookie(&strRPCUserColonPass, cookie_perms)) {
return false;
}
} else {
diff --git a/src/httpserver.cpp b/src/httpserver.cpp
index b1d4dc9234..b6c6db8b35 100644
--- a/src/httpserver.cpp
+++ b/src/httpserver.cpp
@@ -26,6 +26,7 @@
#include <deque>
#include <memory>
#include <optional>
+#include <span>
#include <string>
#include <unordered_map>
@@ -634,7 +635,7 @@ void HTTPRequest::WriteHeader(const std::string& hdr, const std::string& value)
* Replies must be sent in the main loop in the main http thread,
* this cannot be done from worker threads.
*/
-void HTTPRequest::WriteReply(int nStatus, const std::string& strReply)
+void HTTPRequest::WriteReply(int nStatus, std::span<const std::byte> reply)
{
assert(!replySent && req);
if (m_interrupt) {
@@ -643,7 +644,7 @@ void HTTPRequest::WriteReply(int nStatus, const std::string& strReply)
// Send event to main http thread to send reply message
struct evbuffer* evb = evhttp_request_get_output_buffer(req);
assert(evb);
- evbuffer_add(evb, strReply.data(), strReply.size());
+ evbuffer_add(evb, reply.data(), reply.size());
auto req_copy = req;
HTTPEvent* ev = new HTTPEvent(eventBase, true, [req_copy, nStatus]{
evhttp_send_reply(req_copy, nStatus, nullptr, nullptr);
diff --git a/src/httpserver.h b/src/httpserver.h
index 9a49877f09..991081bab8 100644
--- a/src/httpserver.h
+++ b/src/httpserver.h
@@ -7,6 +7,7 @@
#include <functional>
#include <optional>
+#include <span>
#include <string>
namespace util {
@@ -123,12 +124,16 @@ public:
/**
* Write HTTP reply.
* nStatus is the HTTP status code to send.
- * strReply is the body of the reply. Keep it empty to send a standard message.
+ * reply is the body of the reply. Keep it empty to send a standard message.
*
* @note Can be called only once. As this will give the request back to the
* main thread, do not call any other HTTPRequest methods after calling this.
*/
- void WriteReply(int nStatus, const std::string& strReply = "");
+ void WriteReply(int nStatus, std::string_view reply = "")
+ {
+ WriteReply(nStatus, std::as_bytes(std::span{reply.data(), reply.size()}));
+ }
+ void WriteReply(int nStatus, std::span<const std::byte> reply);
};
/** Get the query parameter value from request uri for a specified key, or std::nullopt if the key
diff --git a/src/i2p.cpp b/src/i2p.cpp
index a907cfeacb..0420bc9238 100644
--- a/src/i2p.cpp
+++ b/src/i2p.cpp
@@ -148,7 +148,7 @@ bool Session::Listen(Connection& conn)
conn.sock = StreamAccept();
return true;
} catch (const std::runtime_error& e) {
- Log("Error listening: %s", e.what());
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Error, "Couldn't listen: %s\n", e.what());
CheckControlSock();
}
return false;
@@ -204,7 +204,11 @@ bool Session::Accept(Connection& conn)
return true;
}
- Log("Error accepting%s: %s", disconnect ? " (will close the session)" : "", errmsg);
+ if (*m_interrupt) {
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Accept was interrupted\n");
+ } else {
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Error accepting%s: %s\n", disconnect ? " (will close the session)" : "", errmsg);
+ }
if (disconnect) {
LOCK(m_mutex);
Disconnect();
@@ -219,7 +223,7 @@ bool Session::Connect(const CService& to, Connection& conn, bool& proxy_error)
// Refuse connecting to arbitrary ports. We don't specify any destination port to the SAM proxy
// when connecting (SAM 3.1 does not use ports) and it forces/defaults it to I2P_SAM31_PORT.
if (to.GetPort() != I2P_SAM31_PORT) {
- Log("Error connecting to %s, connection refused due to arbitrary port %s", to.ToStringAddrPort(), to.GetPort());
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Error connecting to %s, connection refused due to arbitrary port %s\n", to.ToStringAddrPort(), to.GetPort());
proxy_error = false;
return false;
}
@@ -267,7 +271,7 @@ bool Session::Connect(const CService& to, Connection& conn, bool& proxy_error)
throw std::runtime_error(strprintf("\"%s\"", connect_reply.full));
} catch (const std::runtime_error& e) {
- Log("Error connecting to %s: %s", to.ToStringAddrPort(), e.what());
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Error connecting to %s: %s\n", to.ToStringAddrPort(), e.what());
CheckControlSock();
return false;
}
@@ -285,12 +289,6 @@ std::string Session::Reply::Get(const std::string& key) const
return pos->second.value();
}
-template <typename... Args>
-void Session::Log(const std::string& fmt, const Args&... args) const
-{
- LogPrint(BCLog::I2P, "%s\n", tfm::format(fmt, args...));
-}
-
Session::Reply Session::SendRequestAndGetReply(const Sock& sock,
const std::string& request,
bool check_result_ok) const
@@ -346,7 +344,7 @@ void Session::CheckControlSock()
std::string errmsg;
if (m_control_sock && !m_control_sock->IsConnected(errmsg)) {
- Log("Control socket error: %s", errmsg);
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Control socket error: %s\n", errmsg);
Disconnect();
}
}
@@ -416,7 +414,7 @@ void Session::CreateIfNotCreatedAlready()
const auto session_type = m_transient ? "transient" : "persistent";
const auto session_id = GetRandHash().GetHex().substr(0, 10); // full is overkill, too verbose in the logs
- Log("Creating %s SAM session %s with %s", session_type, session_id, m_control_host.ToString());
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Debug, "Creating %s SAM session %s with %s\n", session_type, session_id, m_control_host.ToString());
auto sock = Hello();
@@ -453,7 +451,7 @@ void Session::CreateIfNotCreatedAlready()
m_session_id = session_id;
m_control_sock = std::move(sock);
- Log("%s SAM session %s created, my address=%s",
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Info, "%s SAM session %s created, my address=%s\n",
Capitalize(session_type),
m_session_id,
m_my_addr.ToStringAddrPort());
@@ -484,9 +482,9 @@ void Session::Disconnect()
{
if (m_control_sock) {
if (m_session_id.empty()) {
- Log("Destroying incomplete SAM session");
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Info, "Destroying incomplete SAM session\n");
} else {
- Log("Destroying SAM session %s", m_session_id);
+ LogPrintLevel(BCLog::I2P, BCLog::Level::Info, "Destroying SAM session %s\n", m_session_id);
}
m_control_sock.reset();
}
diff --git a/src/i2p.h b/src/i2p.h
index 8b0f1e1182..153263399d 100644
--- a/src/i2p.h
+++ b/src/i2p.h
@@ -157,14 +157,6 @@ private:
};
/**
- * Log a message in the `BCLog::I2P` category.
- * @param[in] fmt printf(3)-like format string.
- * @param[in] args printf(3)-like arguments that correspond to `fmt`.
- */
- template <typename... Args>
- void Log(const std::string& fmt, const Args&... args) const;
-
- /**
* Send request and get a reply from the SAM proxy.
* @param[in] sock A socket that is connected to the SAM proxy.
* @param[in] request Raw request to send, a newline terminator is appended to it.
diff --git a/src/index/base.cpp b/src/index/base.cpp
index e66c89f9e4..955d7b67c9 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -17,7 +17,6 @@
#include <util/thread.h>
#include <util/translation.h>
#include <validation.h> // For g_chainman
-#include <warnings.h>
#include <string>
#include <utility>
@@ -31,7 +30,7 @@ template <typename... Args>
void BaseIndex::FatalErrorf(const char* fmt, const Args&... args)
{
auto message = tfm::format(fmt, args...);
- node::AbortNode(m_chain->context()->shutdown, m_chain->context()->exit_status, Untranslated(message));
+ node::AbortNode(m_chain->context()->shutdown, m_chain->context()->exit_status, Untranslated(message), m_chain->context()->warnings.get());
}
CBlockLocator GetLocator(interfaces::Chain& chain, const uint256& block_hash)
diff --git a/src/init.cpp b/src/init.cpp
index dec391bb49..44c256d203 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -31,6 +31,7 @@
#include <init/common.h>
#include <interfaces/chain.h>
#include <interfaces/init.h>
+#include <interfaces/mining.h>
#include <interfaces/node.h>
#include <kernel/context.h>
#include <key.h>
@@ -109,7 +110,7 @@
#include <boost/signals2/signal.hpp>
-#if ENABLE_ZMQ
+#ifdef ENABLE_ZMQ
#include <zmq/zmqabstractnotifier.h>
#include <zmq/zmqnotificationinterface.h>
#include <zmq/zmqrpc.h>
@@ -364,7 +365,7 @@ void Shutdown(NodeContext& node)
client->stop();
}
-#if ENABLE_ZMQ
+#ifdef ENABLE_ZMQ
if (g_zmq_notification_interface) {
if (node.validation_signals) node.validation_signals->UnregisterValidationInterface(g_zmq_notification_interface.get());
g_zmq_notification_interface.reset();
@@ -409,7 +410,7 @@ static void HandleSIGHUP(int)
static BOOL WINAPI consoleCtrlHandler(DWORD dwCtrlType)
{
if (!(*Assert(g_shutdown))()) {
- LogPrintf("Error: failed to send shutdown signal on Ctrl-C\n");
+ LogError("Failed to send shutdown signal on Ctrl-C\n");
return false;
}
Sleep(INFINITE);
@@ -531,7 +532,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection memory usage for the send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target per 24h. Limit does not apply to peers with 'download' permission or blocks created within past week. 0 = no limit (default: %s). Optional suffix units [k|K|m|M|g|G|t|T] (default: M). Lowercase is 1000 base while uppercase is 1024 base", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
-#if HAVE_SOCKADDR_UN
+#ifdef HAVE_SOCKADDR_UN
argsman.AddArg("-onion=<ip:port|path>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy). May be a local file path prefixed with 'unix:'.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
#else
argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -544,7 +545,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-txreconciliation", strprintf("Enable transaction reconciliations per BIP 330 (default: %d)", DEFAULT_TXRECONCILIATION_ENABLE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION);
argsman.AddArg("-port=<port>", strprintf("Listen for connections on <port> (default: %u, testnet: %u, signet: %u, regtest: %u). Not relevant for I2P (see doc/i2p.md).", defaultChainParams->GetDefaultPort(), testnetChainParams->GetDefaultPort(), signetChainParams->GetDefaultPort(), regtestChainParams->GetDefaultPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
-#if HAVE_SOCKADDR_UN
+#ifdef HAVE_SOCKADDR_UN
argsman.AddArg("-proxy=<ip:port|path>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled). May be a local file path prefixed with 'unix:' if the proxy supports it.", ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_ELISION, OptionsCategory::CONNECTION);
#else
argsman.AddArg("-proxy=<ip:port>", "Connect through SOCKS5 proxy, set -noproxy to disable (default: disabled)", ArgsManager::ALLOW_ANY | ArgsManager::DISALLOW_ELISION, OptionsCategory::CONNECTION);
@@ -578,7 +579,7 @@ void SetupServerArgs(ArgsManager& argsman)
g_wallet_init_interface.AddWalletOptions(argsman);
-#if ENABLE_ZMQ
+#ifdef ENABLE_ZMQ
argsman.AddArg("-zmqpubhashblock=<address>", "Enable publish hash block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
argsman.AddArg("-zmqpubhashtx=<address>", "Enable publish hash transaction in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
argsman.AddArg("-zmqpubrawblock=<address>", "Enable publish raw block in <address>", ArgsManager::ALLOW_ANY, OptionsCategory::ZMQ);
@@ -658,6 +659,7 @@ void SetupServerArgs(ArgsManager& argsman)
argsman.AddArg("-rpcbind=<addr>[:port]", "Bind to given address to listen for JSON-RPC connections. Do not expose the RPC server to untrusted networks such as the public internet! This option is ignored unless -rpcallowip is also passed. Port is optional and overrides -rpcport. Use [host]:port notation for IPv6. This option can be specified multiple times (default: 127.0.0.1 and ::1 i.e., localhost)", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
argsman.AddArg("-rpcdoccheck", strprintf("Throw a non-fatal error at runtime if the documentation for an RPC is incorrect (default: %u)", DEFAULT_RPC_DOC_CHECK), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
argsman.AddArg("-rpccookiefile=<loc>", "Location of the auth cookie. Relative paths will be prefixed by a net-specific datadir location. (default: data dir)", ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
+ argsman.AddArg("-rpccookieperms=<readable-by>", strprintf("Set permissions on the RPC auth cookie file so that it is readable by [owner|group|all] (default: owner [via umask 0077])"), ArgsManager::ALLOW_ANY, OptionsCategory::RPC);
argsman.AddArg("-rpcpassword=<pw>", "Password for JSON-RPC connections", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::RPC);
argsman.AddArg("-rpcport=<port>", strprintf("Listen for JSON-RPC connections on <port> (default: %u, testnet: %u, signet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), signetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::RPC);
argsman.AddArg("-rpcservertimeout=<n>", strprintf("Timeout during HTTP requests (default: %d)", DEFAULT_HTTP_SERVER_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::RPC);
@@ -730,73 +732,73 @@ void InitParameterInteraction(ArgsManager& args)
// even when -connect or -proxy is specified
if (args.IsArgSet("-bind")) {
if (args.SoftSetBoolArg("-listen", true))
- LogPrintf("%s: parameter interaction: -bind set -> setting -listen=1\n", __func__);
+ LogInfo("parameter interaction: -bind set -> setting -listen=1\n");
}
if (args.IsArgSet("-whitebind")) {
if (args.SoftSetBoolArg("-listen", true))
- LogPrintf("%s: parameter interaction: -whitebind set -> setting -listen=1\n", __func__);
+ LogInfo("parameter interaction: -whitebind set -> setting -listen=1\n");
}
if (args.IsArgSet("-connect") || args.GetIntArg("-maxconnections", DEFAULT_MAX_PEER_CONNECTIONS) <= 0) {
// when only connecting to trusted nodes, do not seed via DNS, or listen by default
if (args.SoftSetBoolArg("-dnsseed", false))
- LogPrintf("%s: parameter interaction: -connect or -maxconnections=0 set -> setting -dnsseed=0\n", __func__);
+ LogInfo("parameter interaction: -connect or -maxconnections=0 set -> setting -dnsseed=0\n");
if (args.SoftSetBoolArg("-listen", false))
- LogPrintf("%s: parameter interaction: -connect or -maxconnections=0 set -> setting -listen=0\n", __func__);
+ LogInfo("parameter interaction: -connect or -maxconnections=0 set -> setting -listen=0\n");
}
std::string proxy_arg = args.GetArg("-proxy", "");
if (proxy_arg != "" && proxy_arg != "0") {
// to protect privacy, do not listen by default if a default proxy server is specified
if (args.SoftSetBoolArg("-listen", false))
- LogPrintf("%s: parameter interaction: -proxy set -> setting -listen=0\n", __func__);
+ LogInfo("parameter interaction: -proxy set -> setting -listen=0\n");
// to protect privacy, do not map ports when a proxy is set. The user may still specify -listen=1
// to listen locally, so don't rely on this happening through -listen below.
if (args.SoftSetBoolArg("-upnp", false))
- LogPrintf("%s: parameter interaction: -proxy set -> setting -upnp=0\n", __func__);
+ LogInfo("parameter interaction: -proxy set -> setting -upnp=0\n");
if (args.SoftSetBoolArg("-natpmp", false)) {
- LogPrintf("%s: parameter interaction: -proxy set -> setting -natpmp=0\n", __func__);
+ LogInfo("parameter interaction: -proxy set -> setting -natpmp=0\n");
}
// to protect privacy, do not discover addresses by default
if (args.SoftSetBoolArg("-discover", false))
- LogPrintf("%s: parameter interaction: -proxy set -> setting -discover=0\n", __func__);
+ LogInfo("parameter interaction: -proxy set -> setting -discover=0\n");
}
if (!args.GetBoolArg("-listen", DEFAULT_LISTEN)) {
// do not map ports or try to retrieve public IP when not listening (pointless)
if (args.SoftSetBoolArg("-upnp", false))
- LogPrintf("%s: parameter interaction: -listen=0 -> setting -upnp=0\n", __func__);
+ LogInfo("parameter interaction: -listen=0 -> setting -upnp=0\n");
if (args.SoftSetBoolArg("-natpmp", false)) {
- LogPrintf("%s: parameter interaction: -listen=0 -> setting -natpmp=0\n", __func__);
+ LogInfo("parameter interaction: -listen=0 -> setting -natpmp=0\n");
}
if (args.SoftSetBoolArg("-discover", false))
- LogPrintf("%s: parameter interaction: -listen=0 -> setting -discover=0\n", __func__);
+ LogInfo("parameter interaction: -listen=0 -> setting -discover=0\n");
if (args.SoftSetBoolArg("-listenonion", false))
- LogPrintf("%s: parameter interaction: -listen=0 -> setting -listenonion=0\n", __func__);
+ LogInfo("parameter interaction: -listen=0 -> setting -listenonion=0\n");
if (args.SoftSetBoolArg("-i2pacceptincoming", false)) {
- LogPrintf("%s: parameter interaction: -listen=0 -> setting -i2pacceptincoming=0\n", __func__);
+ LogInfo("parameter interaction: -listen=0 -> setting -i2pacceptincoming=0\n");
}
}
if (args.IsArgSet("-externalip")) {
// if an explicit public IP is specified, do not try to find others
if (args.SoftSetBoolArg("-discover", false))
- LogPrintf("%s: parameter interaction: -externalip set -> setting -discover=0\n", __func__);
+ LogInfo("parameter interaction: -externalip set -> setting -discover=0\n");
}
if (args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)) {
// disable whitelistrelay in blocksonly mode
if (args.SoftSetBoolArg("-whitelistrelay", false))
- LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n", __func__);
+ LogInfo("parameter interaction: -blocksonly=1 -> setting -whitelistrelay=0\n");
// Reduce default mempool size in blocksonly mode to avoid unexpected resource usage
if (args.SoftSetArg("-maxmempool", ToString(DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB)))
- LogPrintf("%s: parameter interaction: -blocksonly=1 -> setting -maxmempool=%d\n", __func__, DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB);
+ LogInfo("parameter interaction: -blocksonly=1 -> setting -maxmempool=%d\n", DEFAULT_BLOCKSONLY_MAX_MEMPOOL_SIZE_MB);
}
// Forcing relay from whitelisted hosts implies we will accept relays from them in the first place.
if (args.GetBoolArg("-whitelistforcerelay", DEFAULT_WHITELISTFORCERELAY)) {
if (args.SoftSetBoolArg("-whitelistrelay", true))
- LogPrintf("%s: parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n", __func__);
+ LogInfo("parameter interaction: -whitelistforcerelay=1 -> setting -whitelistrelay=1\n");
}
if (args.IsArgSet("-onlynet")) {
const auto onlynets = args.GetArgs("-onlynet");
@@ -805,7 +807,7 @@ void InitParameterInteraction(ArgsManager& args)
return n == NET_IPV4 || n == NET_IPV6;
});
if (!clearnet_reachable && args.SoftSetBoolArg("-dnsseed", false)) {
- LogPrintf("%s: parameter interaction: -onlynet excludes IPv4 and IPv6 -> setting -dnsseed=0\n", __func__);
+ LogInfo("parameter interaction: -onlynet excludes IPv4 and IPv6 -> setting -dnsseed=0\n");
}
}
}
@@ -837,10 +839,10 @@ std::set<BlockFilterType> g_enabled_filter_types;
{
// Rather than throwing std::bad-alloc if allocation fails, terminate
// immediately to (try to) avoid chain corruption.
- // Since LogPrintf may itself allocate memory, set the handler directly
+ // Since logging may itself allocate memory, set the handler directly
// to terminate first.
std::set_new_handler(std::terminate);
- LogPrintf("Error: Out of memory. Terminating.\n");
+ LogError("Out of memory. Terminating.\n");
// The log was successful, terminate now.
std::terminate();
@@ -1117,6 +1119,7 @@ bool AppInitLockDataDirectory()
bool AppInitInterfaces(NodeContext& node)
{
node.chain = node.init->makeChain();
+ node.mining = node.init->makeMining();
return true;
}
@@ -1175,9 +1178,9 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
scheduler.scheduleEvery([&args, &node]{
constexpr uint64_t min_disk_space = 50 << 20; // 50 MB
if (!CheckDiskSpace(args.GetBlocksDirPath(), min_disk_space)) {
- LogPrintf("Shutting down due to lack of disk space!\n");
+ LogError("Shutting down due to lack of disk space!\n");
if (!(*Assert(node.shutdown))()) {
- LogPrintf("Error: failed to send shutdown signal after disk space check\n");
+ LogError("Failed to send shutdown signal after disk space check\n");
}
}
}, std::chrono::minutes{5});
@@ -1200,7 +1203,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
for (const auto& client : node.chain_clients) {
client->registerRpcs();
}
-#if ENABLE_ZMQ
+#ifdef ENABLE_ZMQ
RegisterZMQRPCCommands(tableRPC);
#endif
@@ -1325,7 +1328,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
std::string host_out;
uint16_t port_out{0};
if (!SplitHostPort(socket_addr, port_out, host_out)) {
-#if HAVE_SOCKADDR_UN
+#ifdef HAVE_SOCKADDR_UN
// Allow unix domain sockets for some options e.g. unix:/some/file/path
if (!unix || socket_addr.find(ADDR_PREFIX_UNIX) != 0) {
return InitError(InvalidPortErrMsg(arg, socket_addr));
@@ -1472,7 +1475,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
return InitError(ResolveErrMsg("externalip", strAddr));
}
-#if ENABLE_ZMQ
+#ifdef ENABLE_ZMQ
g_zmq_notification_interface = CZMQNotificationInterface::Create(
[&chainman = node.chainman](std::vector<uint8_t>& block, const CBlockIndex& index) {
assert(chainman);
@@ -1486,7 +1489,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// ********************************************************* Step 7: load block chain
- node.notifications = std::make_unique<KernelNotifications>(*Assert(node.shutdown), node.exit_status);
+ node.notifications = std::make_unique<KernelNotifications>(*Assert(node.shutdown), node.exit_status, *Assert(node.warnings));
ReadNotificationArgs(args, *node.notifications);
ChainstateManager::Options chainman_opts{
.chainparams = chainparams,
@@ -1582,7 +1585,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
try {
return f();
} catch (const std::exception& e) {
- LogPrintf("%s\n", e.what());
+ LogError("%s\n", e.what());
return std::make_tuple(node::ChainstateLoadStatus::FAILURE, _("Error opening block database"));
}
};
@@ -1614,10 +1617,10 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
if (fRet) {
do_reindex = true;
if (!Assert(node.shutdown)->reset()) {
- LogPrintf("Internal error: failed to reset shutdown signal.\n");
+ LogError("Internal error: failed to reset shutdown signal.\n");
}
} else {
- LogPrintf("Aborted block database rebuild. Exiting.\n");
+ LogError("Aborted block database rebuild. Exiting.\n");
return false;
}
} else {
@@ -1639,7 +1642,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
assert(!node.peerman);
node.peerman = PeerManager::make(*node.connman, *node.addrman,
node.banman.get(), chainman,
- *node.mempool, peerman_opts);
+ *node.mempool, *node.warnings,
+ peerman_opts);
validation_signals.RegisterValidationInterface(node.peerman.get());
// ********************************************************* Step 8: start indexers
@@ -1752,7 +1756,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
if (args.GetBoolArg("-stopafterblockimport", DEFAULT_STOPAFTERBLOCKIMPORT)) {
LogPrintf("Stopping after block import\n");
if (!(*Assert(node.shutdown))()) {
- LogPrintf("Error: failed to send shutdown signal after finishing block import\n");
+ LogError("Failed to send shutdown signal after finishing block import\n");
}
return;
}
diff --git a/src/init/bitcoin-node.cpp b/src/init/bitcoin-node.cpp
index 97b8dc1161..00a3822791 100644
--- a/src/init/bitcoin-node.cpp
+++ b/src/init/bitcoin-node.cpp
@@ -30,6 +30,7 @@ public:
}
std::unique_ptr<interfaces::Node> makeNode() override { return interfaces::MakeNode(m_node); }
std::unique_ptr<interfaces::Chain> makeChain() override { return interfaces::MakeChain(m_node); }
+ std::unique_ptr<interfaces::Mining> makeMining() override { return interfaces::MakeMining(m_node); }
std::unique_ptr<interfaces::WalletLoader> makeWalletLoader(interfaces::Chain& chain) override
{
return MakeWalletLoader(chain, *Assert(m_node.args));
diff --git a/src/init/bitcoin-qt.cpp b/src/init/bitcoin-qt.cpp
index 3003a8fde1..5209c72973 100644
--- a/src/init/bitcoin-qt.cpp
+++ b/src/init/bitcoin-qt.cpp
@@ -6,6 +6,7 @@
#include <interfaces/chain.h>
#include <interfaces/echo.h>
#include <interfaces/init.h>
+#include <interfaces/mining.h>
#include <interfaces/node.h>
#include <interfaces/wallet.h>
#include <node/context.h>
@@ -25,6 +26,7 @@ public:
}
std::unique_ptr<interfaces::Node> makeNode() override { return interfaces::MakeNode(m_node); }
std::unique_ptr<interfaces::Chain> makeChain() override { return interfaces::MakeChain(m_node); }
+ std::unique_ptr<interfaces::Mining> makeMining() override { return interfaces::MakeMining(m_node); }
std::unique_ptr<interfaces::WalletLoader> makeWalletLoader(interfaces::Chain& chain) override
{
return MakeWalletLoader(chain, *Assert(m_node.args));
diff --git a/src/init/bitcoind.cpp b/src/init/bitcoind.cpp
index b5df764017..48be8831d2 100644
--- a/src/init/bitcoind.cpp
+++ b/src/init/bitcoind.cpp
@@ -6,6 +6,7 @@
#include <interfaces/chain.h>
#include <interfaces/echo.h>
#include <interfaces/init.h>
+#include <interfaces/mining.h>
#include <interfaces/node.h>
#include <interfaces/wallet.h>
#include <node/context.h>
@@ -27,6 +28,7 @@ public:
}
std::unique_ptr<interfaces::Node> makeNode() override { return interfaces::MakeNode(m_node); }
std::unique_ptr<interfaces::Chain> makeChain() override { return interfaces::MakeChain(m_node); }
+ std::unique_ptr<interfaces::Mining> makeMining() override { return interfaces::MakeMining(m_node); }
std::unique_ptr<interfaces::WalletLoader> makeWalletLoader(interfaces::Chain& chain) override
{
return MakeWalletLoader(chain, *Assert(m_node.args));
diff --git a/src/interfaces/init.h b/src/interfaces/init.h
index addc45aa26..094ead399d 100644
--- a/src/interfaces/init.h
+++ b/src/interfaces/init.h
@@ -7,6 +7,7 @@
#include <interfaces/chain.h>
#include <interfaces/echo.h>
+#include <interfaces/mining.h>
#include <interfaces/node.h>
#include <interfaces/wallet.h>
@@ -32,6 +33,7 @@ public:
virtual ~Init() = default;
virtual std::unique_ptr<Node> makeNode() { return nullptr; }
virtual std::unique_ptr<Chain> makeChain() { return nullptr; }
+ virtual std::unique_ptr<Mining> makeMining() { return nullptr; }
virtual std::unique_ptr<WalletLoader> makeWalletLoader(Chain& chain) { return nullptr; }
virtual std::unique_ptr<Echo> makeEcho() { return nullptr; }
virtual Ipc* ipc() { return nullptr; }
diff --git a/src/interfaces/mining.h b/src/interfaces/mining.h
new file mode 100644
index 0000000000..7d71a01450
--- /dev/null
+++ b/src/interfaces/mining.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2024 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_INTERFACES_MINING_H
+#define BITCOIN_INTERFACES_MINING_H
+
+#include <memory>
+#include <optional>
+#include <uint256.h>
+
+namespace node {
+struct CBlockTemplate;
+struct NodeContext;
+} // namespace node
+
+class BlockValidationState;
+class CBlock;
+class CScript;
+
+namespace interfaces {
+
+//! Interface giving clients (RPC, Stratum v2 Template Provider in the future)
+//! ability to create block templates.
+
+class Mining
+{
+public:
+ virtual ~Mining() {}
+
+ //! If this chain is exclusively used for testing
+ virtual bool isTestChain() = 0;
+
+ //! Returns whether IBD is still in progress.
+ virtual bool isInitialBlockDownload() = 0;
+
+ //! Returns the hash for the tip of this chain
+ virtual std::optional<uint256> getTipHash() = 0;
+
+ /**
+ * Construct a new block template
+ *
+ * @param[in] script_pub_key the coinbase output
+ * @param[in] use_mempool set false to omit mempool transactions
+ * @returns a block template
+ */
+ virtual std::unique_ptr<node::CBlockTemplate> createNewBlock(const CScript& script_pub_key, bool use_mempool = true) = 0;
+
+ /**
+ * Processes new block. A valid new block is automatically relayed to peers.
+ *
+ * @param[in] block The block we want to process.
+ * @param[out] new_block A boolean which is set to indicate if the block was first received via this call
+ * @returns If the block was processed, independently of block validity
+ */
+ virtual bool processNewBlock(const std::shared_ptr<const CBlock>& block, bool* new_block) = 0;
+
+ //! Return the number of transaction updates in the mempool,
+ //! used to decide whether to make a new block template.
+ virtual unsigned int getTransactionsUpdated() = 0;
+
+ /**
+ * Check a block is completely valid from start to finish.
+ * Only works on top of our current best block.
+ * Does not check proof-of-work.
+ *
+ * @param[in] block the block to validate
+ * @param[in] check_merkle_root call CheckMerkleRoot()
+ * @param[out] state details of why a block failed to validate
+ * @returns false if it does not build on the current tip, or any of the checks fail
+ */
+ virtual bool testBlockValidity(const CBlock& block, bool check_merkle_root, BlockValidationState& state) = 0;
+
+ //! Get internal node context. Useful for RPC and testing,
+ //! but not accessible across processes.
+ virtual node::NodeContext* context() { return nullptr; }
+};
+
+//! Return implementation of Mining interface.
+std::unique_ptr<Mining> MakeMining(node::NodeContext& node);
+
+} // namespace interfaces
+
+#endif // BITCOIN_INTERFACES_MINING_H
diff --git a/src/kernel/chainparams.cpp b/src/kernel/chainparams.cpp
index af02c6963b..bf3a340cb8 100644
--- a/src/kernel/chainparams.cpp
+++ b/src/kernel/chainparams.cpp
@@ -140,6 +140,7 @@ public:
vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost
vSeeds.emplace_back("dnsseed.emzy.de."); // Stephan Oeste
vSeeds.emplace_back("seed.bitcoin.wiz.biz."); // Jason Maurice
+ vSeeds.emplace_back("seed.mainnet.achownodes.xyz."); // Ava Chow, only supports x1, x5, x9, x49, x809, x849, xd, x400, x404, x408, x448, xc08, xc48, x40c
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,0);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,5);
@@ -246,6 +247,7 @@ public:
vSeeds.emplace_back("seed.tbtc.petertodd.net.");
vSeeds.emplace_back("seed.testnet.bitcoin.sprovoost.nl.");
vSeeds.emplace_back("testnet-seed.bluematt.me."); // Just a static list of stable node(s), only supports x9
+ vSeeds.emplace_back("seed.testnet.achownodes.xyz."); // Ava Chow, only supports x1, x5, x9, x49, x809, x849, xd, x400, x404, x408, x448, xc08, xc48, x40c
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,111);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196);
@@ -297,6 +299,7 @@ public:
if (!options.challenge) {
bin = ParseHex("512103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430210359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c452ae");
vSeeds.emplace_back("seed.signet.bitcoin.sprovoost.nl.");
+ vSeeds.emplace_back("seed.signet.achownodes.xyz."); // Ava Chow, only supports x1, x5, x9, x49, x809, x849, xd, x400, x404, x408, x448, xc08, xc48, x40c
// Hardcoded nodes can be removed once there are more DNS seeds
vSeeds.emplace_back("178.128.221.177");
diff --git a/src/kernel/notifications_interface.h b/src/kernel/notifications_interface.h
index 7283a88e86..8e090dd7db 100644
--- a/src/kernel/notifications_interface.h
+++ b/src/kernel/notifications_interface.h
@@ -16,6 +16,8 @@ namespace kernel {
//! Result type for use with std::variant to indicate that an operation should be interrupted.
struct Interrupted{};
+enum class Warning;
+
//! Simple result type for functions that need to propagate an interrupt status and don't have other return values.
using InterruptResult = std::variant<std::monostate, Interrupted>;
@@ -38,7 +40,8 @@ public:
[[nodiscard]] virtual InterruptResult blockTip(SynchronizationState state, CBlockIndex& index) { return {}; }
virtual void headerTip(SynchronizationState state, int64_t height, int64_t timestamp, bool presync) {}
virtual void progress(const bilingual_str& title, int progress_percent, bool resume_possible) {}
- virtual void warning(const bilingual_str& warning) {}
+ virtual void warningSet(Warning id, const bilingual_str& message) {}
+ virtual void warningUnset(Warning id) {}
//! The flush error notification is sent to notify the user that an error
//! occurred while flushing block data to disk. Kernel code may ignore flush
diff --git a/src/kernel/warning.h b/src/kernel/warning.h
new file mode 100644
index 0000000000..453f36c552
--- /dev/null
+++ b/src/kernel/warning.h
@@ -0,0 +1,14 @@
+// Copyright (c) 2024-present The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_KERNEL_WARNING_H
+#define BITCOIN_KERNEL_WARNING_H
+
+namespace kernel {
+enum class Warning {
+ UNKNOWN_NEW_RULES_ACTIVATED,
+ LARGE_WORK_INVALID_CHAIN,
+};
+} // namespace kernel
+#endif // BITCOIN_KERNEL_WARNING_H
diff --git a/src/mapport.cpp b/src/mapport.cpp
index 80670230c7..1920297be6 100644
--- a/src/mapport.cpp
+++ b/src/mapport.cpp
@@ -161,8 +161,11 @@ static bool ProcessUpnp()
struct UPNPUrls urls;
struct IGDdatas data;
int r;
-
+#if MINIUPNPC_API_VERSION <= 17
r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr));
+#else
+ r = UPNP_GetValidIGD(devlist, &urls, &data, lanaddr, sizeof(lanaddr), nullptr, 0);
+#endif
if (r == 1)
{
if (fDiscover) {
diff --git a/src/net.cpp b/src/net.cpp
index de974f39cb..990c58ee3d 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -3029,7 +3029,7 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError,
return false;
}
- std::unique_ptr<Sock> sock = CreateSock(addrBind.GetSAFamily());
+ std::unique_ptr<Sock> sock = CreateSock(addrBind.GetSAFamily(), SOCK_STREAM, IPPROTO_TCP);
if (!sock) {
strError = strprintf(Untranslated("Couldn't open socket for incoming connections (socket returned error %s)"), NetworkErrorString(WSAGetLastError()));
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "%s\n", strError.original);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 6374cb52c1..89b9488584 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -25,6 +25,7 @@
#include <node/blockstorage.h>
#include <node/timeoffsets.h>
#include <node/txreconciliation.h>
+#include <node/warnings.h>
#include <policy/fees.h>
#include <policy/policy.h>
#include <policy/settings.h>
@@ -130,8 +131,6 @@ static constexpr double BLOCK_DOWNLOAD_TIMEOUT_BASE = 1;
static constexpr double BLOCK_DOWNLOAD_TIMEOUT_PER_PEER = 0.5;
/** Maximum number of headers to announce when relaying blocks with headers message.*/
static const unsigned int MAX_BLOCKS_TO_ANNOUNCE = 8;
-/** Maximum number of unconnecting headers announcements before DoS score */
-static const int MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10;
/** Minimum blocks required to signal NODE_NETWORK_LIMITED */
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
/** Window, in blocks, for connecting to NODE_NETWORK_LIMITED peers */
@@ -225,8 +224,6 @@ struct Peer {
/** Protects misbehavior data members */
Mutex m_misbehavior_mutex;
- /** Accumulated misbehavior score for this peer */
- int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
/** Whether this peer should be disconnected and marked as discouraged (unless it has NetPermissionFlags::NoBan permission). */
bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
@@ -382,9 +379,6 @@ struct Peer {
/** Whether we've sent our peer a sendheaders message. **/
std::atomic<bool> m_sent_sendheaders{false};
- /** Length of current-streak of unconnecting headers announcements */
- int m_num_unconnecting_headers_msgs GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0};
-
/** When to potentially disconnect peer for stalling headers download */
std::chrono::microseconds m_headers_sync_timeout GUARDED_BY(NetEventsInterface::g_msgproc_mutex){0us};
@@ -489,7 +483,7 @@ class PeerManagerImpl final : public PeerManager
public:
PeerManagerImpl(CConnman& connman, AddrMan& addrman,
BanMan* banman, ChainstateManager& chainman,
- CTxMemPool& pool, Options opts);
+ CTxMemPool& pool, node::Warnings& warnings, Options opts);
/** Overridden from CValidationInterface. */
void BlockConnected(ChainstateRole role, const std::shared_ptr<const CBlock>& pblock, const CBlockIndex* pindexConnected) override
@@ -526,7 +520,7 @@ public:
m_best_height = height;
m_best_block_time = time;
};
- void UnitTestMisbehaving(NodeId peer_id, int howmuch) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), howmuch, ""); };
+ void UnitTestMisbehaving(NodeId peer_id) override EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex) { Misbehaving(*Assert(GetPeerRef(peer_id)), ""); };
void ProcessMessage(CNode& pfrom, const std::string& msg_type, DataStream& vRecv,
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex, !m_recent_confirmed_transactions_mutex, !m_most_recent_block_mutex, !m_headers_presync_mutex, g_msgproc_mutex);
@@ -551,11 +545,9 @@ private:
* May return an empty shared_ptr if the Peer object can't be found. */
PeerRef RemovePeer(NodeId id) EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
- /**
- * Increment peer's misbehavior score. If the new value >= DISCOURAGEMENT_THRESHOLD, mark the node
- * to be discouraged, meaning the peer might be disconnected and added to the discouragement filter.
- */
- void Misbehaving(Peer& peer, int howmuch, const std::string& message);
+ /** Mark a peer as misbehaving, which will cause it to be disconnected and its
+ * address discouraged. */
+ void Misbehaving(Peer& peer, const std::string& message);
/**
* Potentially mark a node discouraged based on the contents of a BlockValidationState object
@@ -564,19 +556,15 @@ private:
* punish peers differently depending on whether the data was provided in a compact
* block message or not. If the compact block had a valid header, but contained invalid
* txs, the peer should not be punished. See BIP 152.
- *
- * @return Returns true if the peer was punished (probably disconnected)
*/
- bool MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
+ void MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
bool via_compact_block, const std::string& message = "")
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/**
* Potentially disconnect and discourage a node based on the contents of a TxValidationState object
- *
- * @return Returns true if the peer was punished (probably disconnected)
*/
- bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
+ void MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
EXCLUSIVE_LOCKS_REQUIRED(!m_peer_mutex);
/** Maybe disconnect a peer and discourage future connections from its address.
@@ -667,10 +655,10 @@ private:
bool CheckHeadersPoW(const std::vector<CBlockHeader>& headers, const Consensus::Params& consensusParams, Peer& peer);
/** Calculate an anti-DoS work threshold for headers chains */
arith_uint256 GetAntiDoSWorkThreshold();
- /** Deal with state tracking and headers sync for peers that send the
- * occasional non-connecting header (this can happen due to BIP 130 headers
+ /** Deal with state tracking and headers sync for peers that send
+ * non-connecting headers (this can happen due to BIP 130 headers
* announcements for blocks interacting with the 2hr (MAX_FUTURE_BLOCK_TIME) rule). */
- void HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
+ void HandleUnconnectingHeaders(CNode& pfrom, Peer& peer, const std::vector<CBlockHeader>& headers) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
/** Return true if the headers connect to each other, false otherwise */
bool CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const;
/** Try to continue a low-work headers sync that has already begun.
@@ -790,7 +778,8 @@ private:
/** Next time to check for stale tip */
std::chrono::seconds m_stale_tip_check_time GUARDED_BY(cs_main){0s};
- TimeOffsets m_outbound_time_offsets;
+ node::Warnings& m_warnings;
+ TimeOffsets m_outbound_time_offsets{m_warnings};
const Options m_opts;
@@ -1175,7 +1164,7 @@ private:
void PushAddress(Peer& peer, const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(g_msgproc_mutex);
};
-const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+const CNodeState* PeerManagerImpl::State(NodeId pnode) const
{
std::map<NodeId, CNodeState>::const_iterator it = m_node_states.find(pnode);
if (it == m_node_states.end())
@@ -1183,7 +1172,7 @@ const CNodeState* PeerManagerImpl::State(NodeId pnode) const EXCLUSIVE_LOCKS_REQ
return &it->second;
}
-CNodeState* PeerManagerImpl::State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
+CNodeState* PeerManagerImpl::State(NodeId pnode)
{
return const_cast<CNodeState*>(std::as_const(*this).State(pnode));
}
@@ -1716,7 +1705,6 @@ void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
void PeerManagerImpl::FinalizeNode(const CNode& node)
{
NodeId nodeid = node.GetId();
- int misbehavior{0};
{
LOCK(cs_main);
{
@@ -1727,7 +1715,6 @@ void PeerManagerImpl::FinalizeNode(const CNode& node)
// destructed.
PeerRef peer = RemovePeer(nodeid);
assert(peer != nullptr);
- misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
m_wtxid_relay_peers -= peer->m_wtxid_relay;
assert(m_wtxid_relay_peers >= 0);
}
@@ -1770,7 +1757,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node)
assert(m_orphanage.Size() == 0);
}
} // cs_main
- if (node.fSuccessfullyConnected && misbehavior == 0 &&
+ if (node.fSuccessfullyConnected &&
!node.IsBlockOnlyConn() && !node.IsInboundConn()) {
// Only change visible addrman state for full outbound peers. We don't
// call Connected() for feeler connections since they don't have
@@ -1891,28 +1878,16 @@ void PeerManagerImpl::AddToCompactExtraTransactions(const CTransactionRef& tx)
vExtraTxnForCompactIt = (vExtraTxnForCompactIt + 1) % m_opts.max_extra_txs;
}
-void PeerManagerImpl::Misbehaving(Peer& peer, int howmuch, const std::string& message)
+void PeerManagerImpl::Misbehaving(Peer& peer, const std::string& message)
{
- assert(howmuch > 0);
-
LOCK(peer.m_misbehavior_mutex);
- const int score_before{peer.m_misbehavior_score};
- peer.m_misbehavior_score += howmuch;
- const int score_now{peer.m_misbehavior_score};
const std::string message_prefixed = message.empty() ? "" : (": " + message);
- std::string warning;
-
- if (score_now >= DISCOURAGEMENT_THRESHOLD && score_before < DISCOURAGEMENT_THRESHOLD) {
- warning = " DISCOURAGE THRESHOLD EXCEEDED";
- peer.m_should_discourage = true;
- }
-
- LogPrint(BCLog::NET, "Misbehaving: peer=%d (%d -> %d)%s%s\n",
- peer.m_id, score_before, score_now, warning, message_prefixed);
+ peer.m_should_discourage = true;
+ LogPrint(BCLog::NET, "Misbehaving: peer=%d%s\n", peer.m_id, message_prefixed);
}
-bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
+void PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidationState& state,
bool via_compact_block, const std::string& message)
{
PeerRef peer{GetPeerRef(nodeid)};
@@ -1927,8 +1902,8 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
case BlockValidationResult::BLOCK_CONSENSUS:
case BlockValidationResult::BLOCK_MUTATED:
if (!via_compact_block) {
- if (peer) Misbehaving(*peer, 100, message);
- return true;
+ if (peer) Misbehaving(*peer, message);
+ return;
}
break;
case BlockValidationResult::BLOCK_CACHED_INVALID:
@@ -1942,21 +1917,20 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
// Discourage outbound (but not inbound) peers if on an invalid chain.
// Exempt HB compact block peers. Manual connections are always protected from discouragement.
if (!via_compact_block && !node_state->m_is_inbound) {
- if (peer) Misbehaving(*peer, 100, message);
- return true;
+ if (peer) Misbehaving(*peer, message);
+ return;
}
break;
}
case BlockValidationResult::BLOCK_INVALID_HEADER:
case BlockValidationResult::BLOCK_CHECKPOINT:
case BlockValidationResult::BLOCK_INVALID_PREV:
- if (peer) Misbehaving(*peer, 100, message);
- return true;
+ if (peer) Misbehaving(*peer, message);
+ return;
// Conflicting (but not necessarily invalid) data or different policy:
case BlockValidationResult::BLOCK_MISSING_PREV:
- // TODO: Handle this much more gracefully (10 DoS points is super arbitrary)
- if (peer) Misbehaving(*peer, 10, message);
- return true;
+ if (peer) Misbehaving(*peer, message);
+ return;
case BlockValidationResult::BLOCK_RECENT_CONSENSUS_CHANGE:
case BlockValidationResult::BLOCK_TIME_FUTURE:
break;
@@ -1964,10 +1938,9 @@ bool PeerManagerImpl::MaybePunishNodeForBlock(NodeId nodeid, const BlockValidati
if (message != "") {
LogPrint(BCLog::NET, "peer=%d: %s\n", nodeid, message);
}
- return false;
}
-bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
+void PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state)
{
PeerRef peer{GetPeerRef(nodeid)};
switch (state.GetResult()) {
@@ -1975,8 +1948,8 @@ bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationStat
break;
// The node is providing invalid data:
case TxValidationResult::TX_CONSENSUS:
- if (peer) Misbehaving(*peer, 100, "");
- return true;
+ if (peer) Misbehaving(*peer, "");
+ return;
// Conflicting (but not necessarily invalid) data or different policy:
case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
case TxValidationResult::TX_INPUTS_NOT_STANDARD:
@@ -1992,7 +1965,6 @@ bool PeerManagerImpl::MaybePunishNodeForTx(NodeId nodeid, const TxValidationStat
case TxValidationResult::TX_UNKNOWN:
break;
}
- return false;
}
bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
@@ -2042,14 +2014,14 @@ std::optional<std::string> PeerManagerImpl::FetchBlock(NodeId peer_id, const CBl
std::unique_ptr<PeerManager> PeerManager::make(CConnman& connman, AddrMan& addrman,
BanMan* banman, ChainstateManager& chainman,
- CTxMemPool& pool, Options opts)
+ CTxMemPool& pool, node::Warnings& warnings, Options opts)
{
- return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, opts);
+ return std::make_unique<PeerManagerImpl>(connman, addrman, banman, chainman, pool, warnings, opts);
}
PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
BanMan* banman, ChainstateManager& chainman,
- CTxMemPool& pool, Options opts)
+ CTxMemPool& pool, node::Warnings& warnings, Options opts)
: m_rng{opts.deterministic_rng},
m_fee_filter_rounder{CFeeRate{DEFAULT_MIN_RELAY_TX_FEE}, m_rng},
m_chainparams(chainman.GetParams()),
@@ -2058,6 +2030,7 @@ PeerManagerImpl::PeerManagerImpl(CConnman& connman, AddrMan& addrman,
m_banman(banman),
m_chainman(chainman),
m_mempool(pool),
+ m_warnings{warnings},
m_opts{opts}
{
// While Erlay support is incomplete, it must be enabled explicitly via -txreconciliation.
@@ -2676,7 +2649,7 @@ void PeerManagerImpl::SendBlockTransactions(CNode& pfrom, Peer& peer, const CBlo
BlockTransactions resp(req);
for (size_t i = 0; i < req.indexes.size(); i++) {
if (req.indexes[i] >= block.vtx.size()) {
- Misbehaving(peer, 100, "getblocktxn with out-of-bounds tx indices");
+ Misbehaving(peer, "getblocktxn with out-of-bounds tx indices");
return;
}
resp.txn[i] = block.vtx[req.indexes[i]];
@@ -2689,13 +2662,13 @@ bool PeerManagerImpl::CheckHeadersPoW(const std::vector<CBlockHeader>& headers,
{
// Do these headers have proof-of-work matching what's claimed?
if (!HasValidProofOfWork(headers, consensusParams)) {
- Misbehaving(peer, 100, "header with invalid proof of work");
+ Misbehaving(peer, "header with invalid proof of work");
return false;
}
// Are these headers connected to each other?
if (!CheckHeadersAreContinuous(headers)) {
- Misbehaving(peer, 20, "non-continuous headers sequence");
+ Misbehaving(peer, "non-continuous headers sequence");
return false;
}
return true;
@@ -2719,37 +2692,24 @@ arith_uint256 PeerManagerImpl::GetAntiDoSWorkThreshold()
* announcement.
*
* We'll send a getheaders message in response to try to connect the chain.
- *
- * The peer can send up to MAX_NUM_UNCONNECTING_HEADERS_MSGS in a row that
- * don't connect before given DoS points.
- *
- * Once a headers message is received that is valid and does connect,
- * m_num_unconnecting_headers_msgs gets reset back to 0.
*/
-void PeerManagerImpl::HandleFewUnconnectingHeaders(CNode& pfrom, Peer& peer,
+void PeerManagerImpl::HandleUnconnectingHeaders(CNode& pfrom, Peer& peer,
const std::vector<CBlockHeader>& headers)
{
- peer.m_num_unconnecting_headers_msgs++;
// Try to fill in the missing headers.
const CBlockIndex* best_header{WITH_LOCK(cs_main, return m_chainman.m_best_header)};
if (MaybeSendGetHeaders(pfrom, GetLocator(best_header), peer)) {
- LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, m_num_unconnecting_headers_msgs=%d)\n",
+ LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d)\n",
headers[0].GetHash().ToString(),
headers[0].hashPrevBlock.ToString(),
best_header->nHeight,
- pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
+ pfrom.GetId());
}
// Set hashLastUnknownBlock for this peer, so that if we
// eventually get the headers - even from a different peer -
// we can use this peer to download.
WITH_LOCK(cs_main, UpdateBlockAvailability(pfrom.GetId(), headers.back().GetHash()));
-
- // The peer may just be broken, so periodically assign DoS points if this
- // condition persists.
- if (peer.m_num_unconnecting_headers_msgs % MAX_NUM_UNCONNECTING_HEADERS_MSGS == 0) {
- Misbehaving(peer, 20, strprintf("%d non-connecting headers", peer.m_num_unconnecting_headers_msgs));
- }
}
bool PeerManagerImpl::CheckHeadersAreContinuous(const std::vector<CBlockHeader>& headers) const
@@ -2768,25 +2728,21 @@ bool PeerManagerImpl::IsContinuationOfLowWorkHeadersSync(Peer& peer, CNode& pfro
{
if (peer.m_headers_sync) {
auto result = peer.m_headers_sync->ProcessNextHeaders(headers, headers.size() == MAX_HEADERS_RESULTS);
+ // If it is a valid continuation, we should treat the existing getheaders request as responded to.
+ if (result.success) peer.m_last_getheaders_timestamp = {};
if (result.request_more) {
auto locator = peer.m_headers_sync->NextHeadersRequestLocator();
// If we were instructed to ask for a locator, it should not be empty.
Assume(!locator.vHave.empty());
+ // We can only be instructed to request more if processing was successful.
+ Assume(result.success);
if (!locator.vHave.empty()) {
// It should be impossible for the getheaders request to fail,
- // because we should have cleared the last getheaders timestamp
- // when processing the headers that triggered this call. But
- // it may be possible to bypass this via compactblock
- // processing, so check the result before logging just to be
- // safe.
+ // because we just cleared the last getheaders timestamp.
bool sent_getheaders = MaybeSendGetHeaders(pfrom, locator, peer);
- if (sent_getheaders) {
- LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
- locator.vHave.front().ToString(), pfrom.GetId());
- } else {
- LogPrint(BCLog::NET, "error sending next getheaders (from %s) to continue sync with peer=%d\n",
- locator.vHave.front().ToString(), pfrom.GetId());
- }
+ Assume(sent_getheaders);
+ LogPrint(BCLog::NET, "more getheaders (from %s) to peer=%d\n",
+ locator.vHave.front().ToString(), pfrom.GetId());
}
}
@@ -2995,11 +2951,6 @@ void PeerManagerImpl::HeadersDirectFetchBlocks(CNode& pfrom, const Peer& peer, c
void PeerManagerImpl::UpdatePeerStateForReceivedHeaders(CNode& pfrom, Peer& peer,
const CBlockIndex& last_header, bool received_new_header, bool may_have_more_headers)
{
- if (peer.m_num_unconnecting_headers_msgs > 0) {
- LogPrint(BCLog::NET, "peer=%d: resetting m_num_unconnecting_headers_msgs (%d -> 0)\n", pfrom.GetId(), peer.m_num_unconnecting_headers_msgs);
- }
- peer.m_num_unconnecting_headers_msgs = 0;
-
LOCK(cs_main);
CNodeState *nodestate = State(pfrom.GetId());
@@ -3065,6 +3016,9 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
LOCK(m_headers_presync_mutex);
m_headers_presync_stats.erase(pfrom.GetId());
}
+ // A headers message with no headers cannot be an announcement, so assume
+ // it is a response to our last getheaders request, if there is one.
+ peer.m_last_getheaders_timestamp = {};
return;
}
@@ -3118,17 +3072,18 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, Peer& peer,
bool headers_connect_blockindex{chain_start_header != nullptr};
if (!headers_connect_blockindex) {
- if (nCount <= MAX_BLOCKS_TO_ANNOUNCE) {
- // If this looks like it could be a BIP 130 block announcement, use
- // special logic for handling headers that don't connect, as this
- // could be benign.
- HandleFewUnconnectingHeaders(pfrom, peer, headers);
- } else {
- Misbehaving(peer, 10, "invalid header received");
- }
+ // This could be a BIP 130 block announcement, use
+ // special logic for handling headers that don't connect, as this
+ // could be benign.
+ HandleUnconnectingHeaders(pfrom, peer, headers);
return;
}
+ // If headers connect, assume that this is in response to any outstanding getheaders
+ // request we may have sent, and clear out the time of our last request. Non-connecting
+ // headers cannot be a response to a getheaders request.
+ peer.m_last_getheaders_timestamp = {};
+
// If the headers we received are already in memory and an ancestor of
// m_best_header or our tip, skip anti-DoS checks. These headers will not
// use any more memory (and we are not leaking information that could be
@@ -3646,7 +3601,7 @@ void PeerManagerImpl::ProcessCompactBlockTxns(CNode& pfrom, Peer& peer, const Bl
ReadStatus status = partialBlock.FillBlock(*pblock, block_transactions.txn);
if (status == READ_STATUS_INVALID) {
RemoveBlockRequest(block_transactions.blockhash, pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
- Misbehaving(peer, 100, "invalid compact block/non-matching block transactions");
+ Misbehaving(peer, "invalid compact block/non-matching block transactions");
return;
} else if (status == READ_STATUS_FAILED) {
if (first_in_flight) {
@@ -4130,7 +4085,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (vAddr.size() > MAX_ADDR_TO_SEND)
{
- Misbehaving(*peer, 20, strprintf("%s message size = %u", msg_type, vAddr.size()));
+ Misbehaving(*peer, strprintf("%s message size = %u", msg_type, vAddr.size()));
return;
}
@@ -4212,7 +4167,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
- Misbehaving(*peer, 20, strprintf("inv message size = %u", vInv.size()));
+ Misbehaving(*peer, strprintf("inv message size = %u", vInv.size()));
return;
}
@@ -4304,7 +4259,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
vRecv >> vInv;
if (vInv.size() > MAX_INV_SZ)
{
- Misbehaving(*peer, 20, strprintf("getdata message size = %u", vInv.size()));
+ Misbehaving(*peer, strprintf("getdata message size = %u", vInv.size()));
return;
}
@@ -4844,7 +4799,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact);
if (status == READ_STATUS_INVALID) {
RemoveBlockRequest(pindex->GetBlockHash(), pfrom.GetId()); // Reset in-flight state in case Misbehaving does not result in a disconnect
- Misbehaving(*peer, 100, "invalid compact block");
+ Misbehaving(*peer, "invalid compact block");
return;
} else if (status == READ_STATUS_FAILED) {
if (first_in_flight) {
@@ -4984,16 +4939,12 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
- // Assume that this is in response to any outstanding getheaders
- // request we may have sent, and clear out the time of our last request
- peer->m_last_getheaders_timestamp = {};
-
std::vector<CBlockHeader> headers;
// Bypass the normal CBlock deserialization, as we don't want to risk deserializing 2000 full blocks.
unsigned int nCount = ReadCompactSize(vRecv);
if (nCount > MAX_HEADERS_RESULTS) {
- Misbehaving(*peer, 20, strprintf("headers message size = %u", nCount));
+ Misbehaving(*peer, strprintf("headers message size = %u", nCount));
return;
}
headers.resize(nCount);
@@ -5040,7 +4991,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (prev_block && IsBlockMutated(/*block=*/*pblock,
/*check_witness_root=*/DeploymentActiveAfter(prev_block, m_chainman, Consensus::DEPLOYMENT_SEGWIT))) {
LogDebug(BCLog::NET, "Received mutated block from peer=%d\n", peer->m_id);
- Misbehaving(*peer, 100, "mutated block");
+ Misbehaving(*peer, "mutated block");
WITH_LOCK(cs_main, RemoveBlockRequest(pblock->GetHash(), peer->m_id));
return;
}
@@ -5221,7 +5172,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
if (!filter.IsWithinSizeConstraints())
{
// There is no excuse for sending a too-large filter
- Misbehaving(*peer, 100, "too-large bloom filter");
+ Misbehaving(*peer, "too-large bloom filter");
} else if (auto tx_relay = peer->GetTxRelay(); tx_relay != nullptr) {
{
LOCK(tx_relay->m_bloom_filter_mutex);
@@ -5257,7 +5208,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
}
if (bad) {
- Misbehaving(*peer, 100, "bad filteradd message");
+ Misbehaving(*peer, "bad filteradd message");
}
return;
}
diff --git a/src/net_processing.h b/src/net_processing.h
index 85e399d948..bf9698ee02 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -16,6 +16,10 @@ class CChainParams;
class CTxMemPool;
class ChainstateManager;
+namespace node {
+class Warnings;
+} // namespace node
+
/** Whether transaction reconciliation protocol should be enabled by default. */
static constexpr bool DEFAULT_TXRECONCILIATION_ENABLE{false};
/** Default for -maxorphantx, maximum number of orphan transactions kept in memory */
@@ -25,8 +29,6 @@ static const uint32_t DEFAULT_MAX_ORPHAN_TRANSACTIONS{100};
static const uint32_t DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN{100};
static const bool DEFAULT_PEERBLOOMFILTERS = false;
static const bool DEFAULT_PEERBLOCKFILTERS = false;
-/** Threshold for marking a node to be discouraged, e.g. disconnected and added to the discouragement filter. */
-static const int DISCOURAGEMENT_THRESHOLD{100};
/** Maximum number of outstanding CMPCTBLOCK requests for the same block. */
static const unsigned int MAX_CMPCTBLOCKS_INFLIGHT_PER_BLOCK = 3;
@@ -73,7 +75,7 @@ public:
static std::unique_ptr<PeerManager> make(CConnman& connman, AddrMan& addrman,
BanMan* banman, ChainstateManager& chainman,
- CTxMemPool& pool, Options opts);
+ CTxMemPool& pool, node::Warnings& warnings, Options opts);
virtual ~PeerManager() { }
/**
@@ -104,7 +106,7 @@ public:
virtual void SetBestBlock(int height, std::chrono::seconds time) = 0;
/* Public for unit testing. */
- virtual void UnitTestMisbehaving(NodeId peer_id, int howmuch) = 0;
+ virtual void UnitTestMisbehaving(NodeId peer_id) = 0;
/**
* Evict extra outbound peers. If we think our tip may be stale, connect to an extra outbound.
diff --git a/src/netbase.cpp b/src/netbase.cpp
index ff46061d3d..f5f0997ba6 100644
--- a/src/netbase.cpp
+++ b/src/netbase.cpp
@@ -23,7 +23,7 @@
#include <limits>
#include <memory>
-#if HAVE_SOCKADDR_UN
+#ifdef HAVE_SOCKADDR_UN
#include <sys/un.h>
#endif
@@ -218,7 +218,7 @@ CService LookupNumeric(const std::string& name, uint16_t portDefault, DNSLookupF
bool IsUnixSocketPath(const std::string& name)
{
-#if HAVE_SOCKADDR_UN
+#ifdef HAVE_SOCKADDR_UN
if (name.find(ADDR_PREFIX_UNIX) != 0) return false;
// Split off "unix:" prefix
@@ -487,24 +487,23 @@ bool Socks5(const std::string& strDest, uint16_t port, const ProxyCredentials* a
}
}
-std::unique_ptr<Sock> CreateSockOS(sa_family_t address_family)
+std::unique_ptr<Sock> CreateSockOS(int domain, int type, int protocol)
{
// Not IPv4, IPv6 or UNIX
- if (address_family == AF_UNSPEC) return nullptr;
-
- int protocol{IPPROTO_TCP};
-#if HAVE_SOCKADDR_UN
- if (address_family == AF_UNIX) protocol = 0;
-#endif
+ if (domain == AF_UNSPEC) return nullptr;
// Create a socket in the specified address family.
- SOCKET hSocket = socket(address_family, SOCK_STREAM, protocol);
+ SOCKET hSocket = socket(domain, type, protocol);
if (hSocket == INVALID_SOCKET) {
return nullptr;
}
auto sock = std::make_unique<Sock>(hSocket);
+ if (domain != AF_INET && domain != AF_INET6 && domain != AF_UNIX) {
+ return sock;
+ }
+
// Ensure that waiting for I/O on this socket won't result in undefined
// behavior.
if (!sock->IsSelectable()) {
@@ -528,19 +527,22 @@ std::unique_ptr<Sock> CreateSockOS(sa_family_t address_family)
return nullptr;
}
-#if HAVE_SOCKADDR_UN
- if (address_family == AF_UNIX) return sock;
+#ifdef HAVE_SOCKADDR_UN
+ if (domain == AF_UNIX) return sock;
#endif
- // Set the no-delay option (disable Nagle's algorithm) on the TCP socket.
- const int on{1};
- if (sock->SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)) == SOCKET_ERROR) {
- LogPrint(BCLog::NET, "Unable to set TCP_NODELAY on a newly created socket, continuing anyway\n");
+ if (protocol == IPPROTO_TCP) {
+ // Set the no-delay option (disable Nagle's algorithm) on the TCP socket.
+ const int on{1};
+ if (sock->SetSockOpt(IPPROTO_TCP, TCP_NODELAY, &on, sizeof(on)) == SOCKET_ERROR) {
+ LogPrint(BCLog::NET, "Unable to set TCP_NODELAY on a newly created socket, continuing anyway\n");
+ }
}
+
return sock;
}
-std::function<std::unique_ptr<Sock>(const sa_family_t&)> CreateSock = CreateSockOS;
+std::function<std::unique_ptr<Sock>(int, int, int)> CreateSock = CreateSockOS;
template<typename... Args>
static void LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args) {
@@ -609,7 +611,7 @@ static bool ConnectToSocket(const Sock& sock, struct sockaddr* sockaddr, socklen
std::unique_ptr<Sock> ConnectDirectly(const CService& dest, bool manual_connection)
{
- auto sock = CreateSock(dest.GetSAFamily());
+ auto sock = CreateSock(dest.GetSAFamily(), SOCK_STREAM, IPPROTO_TCP);
if (!sock) {
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Cannot create a socket for connecting to %s\n", dest.ToStringAddrPort());
return {};
@@ -636,8 +638,8 @@ std::unique_ptr<Sock> Proxy::Connect() const
if (!m_is_unix_socket) return ConnectDirectly(proxy, /*manual_connection=*/true);
-#if HAVE_SOCKADDR_UN
- auto sock = CreateSock(AF_UNIX);
+#ifdef HAVE_SOCKADDR_UN
+ auto sock = CreateSock(AF_UNIX, SOCK_STREAM, 0);
if (!sock) {
LogPrintLevel(BCLog::NET, BCLog::Level::Error, "Cannot create a socket for connecting to %s\n", m_unix_socket_path);
return {};
diff --git a/src/netbase.h b/src/netbase.h
index 321c288f67..8ef6c28996 100644
--- a/src/netbase.h
+++ b/src/netbase.h
@@ -262,16 +262,18 @@ CService LookupNumeric(const std::string& name, uint16_t portDefault = 0, DNSLoo
CSubNet LookupSubNet(const std::string& subnet_str);
/**
- * Create a TCP or UNIX socket in the given address family.
- * @param[in] address_family to use for the socket.
+ * Create a real socket from the operating system.
+ * @param[in] domain Communications domain, first argument to the socket(2) syscall.
+ * @param[in] type Type of the socket, second argument to the socket(2) syscall.
+ * @param[in] protocol The particular protocol to be used with the socket, third argument to the socket(2) syscall.
* @return pointer to the created Sock object or unique_ptr that owns nothing in case of failure
*/
-std::unique_ptr<Sock> CreateSockOS(sa_family_t address_family);
+std::unique_ptr<Sock> CreateSockOS(int domain, int type, int protocol);
/**
* Socket factory. Defaults to `CreateSockOS()`, but can be overridden by unit tests.
*/
-extern std::function<std::unique_ptr<Sock>(const sa_family_t&)> CreateSock;
+extern std::function<std::unique_ptr<Sock>(int, int, int)> CreateSock;
/**
* Create a socket and try to connect to the specified service.
diff --git a/src/node/abort.cpp b/src/node/abort.cpp
index b727608384..8a17c41fd2 100644
--- a/src/node/abort.cpp
+++ b/src/node/abort.cpp
@@ -6,19 +6,18 @@
#include <logging.h>
#include <node/interface_ui.h>
+#include <node/warnings.h>
#include <util/signalinterrupt.h>
#include <util/translation.h>
-#include <warnings.h>
#include <atomic>
#include <cstdlib>
-#include <string>
namespace node {
-void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const bilingual_str& message)
+void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const bilingual_str& message, node::Warnings* warnings)
{
- SetMiscWarning(message);
+ if (warnings) warnings->Set(Warning::FATAL_INTERNAL_ERROR, message);
InitError(_("A fatal internal error occurred, see debug.log for details: ") + message);
exit_status.store(EXIT_FAILURE);
if (shutdown && !(*shutdown)()) {
diff --git a/src/node/abort.h b/src/node/abort.h
index 1092279142..c881af4634 100644
--- a/src/node/abort.h
+++ b/src/node/abort.h
@@ -14,7 +14,8 @@ class SignalInterrupt;
} // namespace util
namespace node {
-void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const bilingual_str& message);
+class Warnings;
+void AbortNode(util::SignalInterrupt* shutdown, std::atomic<int>& exit_status, const bilingual_str& message, node::Warnings* warnings);
} // namespace node
#endif // BITCOIN_NODE_ABORT_H
diff --git a/src/node/context.cpp b/src/node/context.cpp
index e32d21b383..75dfaee866 100644
--- a/src/node/context.cpp
+++ b/src/node/context.cpp
@@ -7,12 +7,14 @@
#include <addrman.h>
#include <banman.h>
#include <interfaces/chain.h>
+#include <interfaces/mining.h>
#include <kernel/context.h>
#include <key.h>
#include <net.h>
#include <net_processing.h>
#include <netgroup.h>
#include <node/kernel_notifications.h>
+#include <node/warnings.h>
#include <policy/fees.h>
#include <scheduler.h>
#include <txmempool.h>
diff --git a/src/node/context.h b/src/node/context.h
index a7d92989dd..a664fad80b 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -27,6 +27,7 @@ class PeerManager;
namespace interfaces {
class Chain;
class ChainClient;
+class Mining;
class Init;
class WalletLoader;
} // namespace interfaces
@@ -39,6 +40,7 @@ class SignalInterrupt;
namespace node {
class KernelNotifications;
+class Warnings;
//! NodeContext struct containing references to chain state and connection
//! state.
@@ -73,6 +75,7 @@ struct NodeContext {
std::vector<std::unique_ptr<interfaces::ChainClient>> chain_clients;
//! Reference to chain client that should used to load or create wallets
//! opened by the gui.
+ std::unique_ptr<interfaces::Mining> mining;
interfaces::WalletLoader* wallet_loader{nullptr};
std::unique_ptr<CScheduler> scheduler;
std::function<void()> rpc_interruption_point = [] {};
@@ -81,6 +84,8 @@ struct NodeContext {
//! Issues calls about blocks and transactions
std::unique_ptr<ValidationSignals> validation_signals;
std::atomic<int> exit_status{EXIT_SUCCESS};
+ //! Manages all the node warnings
+ std::unique_ptr<node::Warnings> warnings;
//! Declare default constructor and destructor that are not inline, so code
//! instantiating the NodeContext struct doesn't need to #include class
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 19f4aaf9c4..fa151407fa 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -8,12 +8,14 @@
#include <chain.h>
#include <chainparams.h>
#include <common/args.h>
+#include <consensus/validation.h>
#include <deploymentstatus.h>
#include <external_signer.h>
#include <index/blockfilterindex.h>
#include <init.h>
#include <interfaces/chain.h>
#include <interfaces/handler.h>
+#include <interfaces/mining.h>
#include <interfaces/node.h>
#include <interfaces/wallet.h>
#include <kernel/chain.h>
@@ -30,8 +32,10 @@
#include <node/context.h>
#include <node/interface_ui.h>
#include <node/mini_miner.h>
+#include <node/miner.h>
#include <node/transaction.h>
#include <node/types.h>
+#include <node/warnings.h>
#include <policy/feerate.h>
#include <policy/fees.h>
#include <policy/policy.h>
@@ -53,7 +57,6 @@
#include <util/translation.h>
#include <validation.h>
#include <validationinterface.h>
-#include <warnings.h>
#include <config/bitcoin-config.h> // IWYU pragma: keep
@@ -69,8 +72,10 @@ using interfaces::Chain;
using interfaces::FoundBlock;
using interfaces::Handler;
using interfaces::MakeSignalHandler;
+using interfaces::Mining;
using interfaces::Node;
using interfaces::WalletLoader;
+using node::BlockAssembler;
using util::Join;
namespace node {
@@ -93,7 +98,7 @@ public:
explicit NodeImpl(NodeContext& context) { setContext(&context); }
void initLogging() override { InitLogging(args()); }
void initParameterInteraction() override { InitParameterInteraction(args()); }
- bilingual_str getWarnings() override { return Join(GetWarnings(), Untranslated("<hr />")); }
+ bilingual_str getWarnings() override { return Join(Assert(m_context->warnings)->GetMessages(), Untranslated("<hr />")); }
int getExitStatus() override { return Assert(m_context)->exit_status.load(); }
uint32_t getLogCategories() override { return LogInstance().GetCategoryMask(); }
bool baseInitialize() override
@@ -101,6 +106,7 @@ public:
if (!AppInitBasicSetup(args(), Assert(context())->exit_status)) return false;
if (!AppInitParameterInteraction(args())) return false;
+ m_context->warnings = std::make_unique<node::Warnings>();
m_context->kernel = std::make_unique<kernel::Context>();
m_context->ecc_context = std::make_unique<ECC_Context>();
if (!AppInitSanityChecks(*m_context->kernel)) return false;
@@ -125,7 +131,7 @@ public:
void startShutdown() override
{
if (!(*Assert(Assert(m_context)->shutdown))()) {
- LogPrintf("Error: failed to send shutdown signal\n");
+ LogError("Failed to send shutdown signal\n");
}
// Stop RPC for clean shutdown if any of waitfor* commands is executed.
if (args().GetBoolArg("-server", false)) {
@@ -830,10 +836,70 @@ public:
ValidationSignals& validation_signals() { return *Assert(m_node.validation_signals); }
NodeContext& m_node;
};
+
+class MinerImpl : public Mining
+{
+public:
+ explicit MinerImpl(NodeContext& node) : m_node(node) {}
+
+ bool isTestChain() override
+ {
+ return chainman().GetParams().IsTestChain();
+ }
+
+ bool isInitialBlockDownload() override
+ {
+ return chainman().IsInitialBlockDownload();
+ }
+
+ std::optional<uint256> getTipHash() override
+ {
+ LOCK(::cs_main);
+ CBlockIndex* tip{chainman().ActiveChain().Tip()};
+ if (!tip) return {};
+ return tip->GetBlockHash();
+ }
+
+ bool processNewBlock(const std::shared_ptr<const CBlock>& block, bool* new_block) override
+ {
+ return chainman().ProcessNewBlock(block, /*force_processing=*/true, /*min_pow_checked=*/true, /*new_block=*/new_block);
+ }
+
+ unsigned int getTransactionsUpdated() override
+ {
+ return context()->mempool->GetTransactionsUpdated();
+ }
+
+ bool testBlockValidity(const CBlock& block, bool check_merkle_root, BlockValidationState& state) override
+ {
+ LOCK(cs_main);
+ CBlockIndex* tip{chainman().ActiveChain().Tip()};
+ // Fail if the tip updated before the lock was taken
+ if (block.hashPrevBlock != tip->GetBlockHash()) {
+ state.Error("Block does not connect to current chain tip.");
+ return false;
+ }
+
+ return TestBlockValidity(state, chainman().GetParams(), chainman().ActiveChainstate(), block, tip, /*fCheckPOW=*/false, check_merkle_root);
+ }
+
+ std::unique_ptr<CBlockTemplate> createNewBlock(const CScript& script_pub_key, bool use_mempool) override
+ {
+ BlockAssembler::Options options;
+ ApplyArgsManOptions(gArgs, options);
+
+ return BlockAssembler{chainman().ActiveChainstate(), use_mempool ? context()->mempool.get() : nullptr, options}.CreateNewBlock(script_pub_key);
+ }
+
+ NodeContext* context() override { return &m_node; }
+ ChainstateManager& chainman() { return *Assert(m_node.chainman); }
+ NodeContext& m_node;
+};
} // namespace
} // namespace node
namespace interfaces {
std::unique_ptr<Node> MakeNode(node::NodeContext& context) { return std::make_unique<node::NodeImpl>(context); }
std::unique_ptr<Chain> MakeChain(node::NodeContext& context) { return std::make_unique<node::ChainImpl>(context); }
+std::unique_ptr<Mining> MakeMining(node::NodeContext& context) { return std::make_unique<node::MinerImpl>(context); }
} // namespace interfaces
diff --git a/src/node/kernel_notifications.cpp b/src/node/kernel_notifications.cpp
index 1f07014ee2..9894052a3a 100644
--- a/src/node/kernel_notifications.cpp
+++ b/src/node/kernel_notifications.cpp
@@ -10,15 +10,16 @@
#include <common/args.h>
#include <common/system.h>
#include <kernel/context.h>
+#include <kernel/warning.h>
#include <logging.h>
#include <node/abort.h>
#include <node/interface_ui.h>
+#include <node/warnings.h>
#include <util/check.h>
#include <util/signalinterrupt.h>
#include <util/strencodings.h>
#include <util/string.h>
#include <util/translation.h>
-#include <warnings.h>
#include <cstdint>
#include <string>
@@ -28,7 +29,6 @@ using util::ReplaceAll;
static void AlertNotify(const std::string& strMessage)
{
- uiInterface.NotifyAlertChanged();
#if HAVE_SYSTEM
std::string strCmd = gArgs.GetArg("-alertnotify", "");
if (strCmd.empty()) return;
@@ -46,16 +46,6 @@ static void AlertNotify(const std::string& strMessage)
#endif
}
-static void DoWarning(const bilingual_str& warning)
-{
- static bool fWarned = false;
- SetMiscWarning(warning);
- if (!fWarned) {
- AlertNotify(warning.original);
- fWarned = true;
- }
-}
-
namespace node {
kernel::InterruptResult KernelNotifications::blockTip(SynchronizationState state, CBlockIndex& index)
@@ -63,7 +53,7 @@ kernel::InterruptResult KernelNotifications::blockTip(SynchronizationState state
uiInterface.NotifyBlockTip(state, &index);
if (m_stop_at_height && index.nHeight >= m_stop_at_height) {
if (!m_shutdown()) {
- LogPrintf("Error: failed to send shutdown signal after reaching stop height\n");
+ LogError("Failed to send shutdown signal after reaching stop height\n");
}
return kernel::Interrupted{};
}
@@ -80,20 +70,27 @@ void KernelNotifications::progress(const bilingual_str& title, int progress_perc
uiInterface.ShowProgress(title.translated, progress_percent, resume_possible);
}
-void KernelNotifications::warning(const bilingual_str& warning)
+void KernelNotifications::warningSet(kernel::Warning id, const bilingual_str& message)
+{
+ if (m_warnings.Set(id, message)) {
+ AlertNotify(message.original);
+ }
+}
+
+void KernelNotifications::warningUnset(kernel::Warning id)
{
- DoWarning(warning);
+ m_warnings.Unset(id);
}
void KernelNotifications::flushError(const bilingual_str& message)
{
- AbortNode(&m_shutdown, m_exit_status, message);
+ AbortNode(&m_shutdown, m_exit_status, message, &m_warnings);
}
void KernelNotifications::fatalError(const bilingual_str& message)
{
node::AbortNode(m_shutdown_on_fatal_error ? &m_shutdown : nullptr,
- m_exit_status, message);
+ m_exit_status, message, &m_warnings);
}
void ReadNotificationArgs(const ArgsManager& args, KernelNotifications& notifications)
diff --git a/src/node/kernel_notifications.h b/src/node/kernel_notifications.h
index f4d97a0fff..e37f4d4e1e 100644
--- a/src/node/kernel_notifications.h
+++ b/src/node/kernel_notifications.h
@@ -15,18 +15,24 @@ class CBlockIndex;
enum class SynchronizationState;
struct bilingual_str;
+namespace kernel {
+enum class Warning;
+} // namespace kernel
+
namespace util {
class SignalInterrupt;
} // namespace util
namespace node {
+class Warnings;
static constexpr int DEFAULT_STOPATHEIGHT{0};
class KernelNotifications : public kernel::Notifications
{
public:
- KernelNotifications(util::SignalInterrupt& shutdown, std::atomic<int>& exit_status) : m_shutdown(shutdown), m_exit_status{exit_status} {}
+ KernelNotifications(util::SignalInterrupt& shutdown, std::atomic<int>& exit_status, node::Warnings& warnings)
+ : m_shutdown(shutdown), m_exit_status{exit_status}, m_warnings{warnings} {}
[[nodiscard]] kernel::InterruptResult blockTip(SynchronizationState state, CBlockIndex& index) override;
@@ -34,7 +40,9 @@ public:
void progress(const bilingual_str& title, int progress_percent, bool resume_possible) override;
- void warning(const bilingual_str& warning) override;
+ void warningSet(kernel::Warning id, const bilingual_str& message) override;
+
+ void warningUnset(kernel::Warning id) override;
void flushError(const bilingual_str& message) override;
@@ -47,6 +55,7 @@ public:
private:
util::SignalInterrupt& m_shutdown;
std::atomic<int>& m_exit_status;
+ node::Warnings& m_warnings;
};
void ReadNotificationArgs(const ArgsManager& args, KernelNotifications& notifications);
diff --git a/src/node/miner.cpp b/src/node/miner.cpp
index 87f40e993f..03c6d74deb 100644
--- a/src/node/miner.cpp
+++ b/src/node/miner.cpp
@@ -80,15 +80,6 @@ void ApplyArgsManOptions(const ArgsManager& args, BlockAssembler::Options& optio
if (const auto parsed{ParseMoney(*blockmintxfee)}) options.blockMinFeeRate = CFeeRate{*parsed};
}
}
-static BlockAssembler::Options ConfiguredOptions()
-{
- BlockAssembler::Options options;
- ApplyArgsManOptions(gArgs, options);
- return options;
-}
-
-BlockAssembler::BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool)
- : BlockAssembler(chainstate, mempool, ConfiguredOptions()) {}
void BlockAssembler::resetBlock()
{
diff --git a/src/node/miner.h b/src/node/miner.h
index 06a917228d..c3178a7532 100644
--- a/src/node/miner.h
+++ b/src/node/miner.h
@@ -161,7 +161,6 @@ public:
bool test_block_validity{true};
};
- explicit BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool);
explicit BlockAssembler(Chainstate& chainstate, const CTxMemPool* mempool, const Options& options);
/** Construct a new block template with coinbase to scriptPubKeyIn */
diff --git a/src/node/timeoffsets.cpp b/src/node/timeoffsets.cpp
index 62f527be8a..002c00d245 100644
--- a/src/node/timeoffsets.cpp
+++ b/src/node/timeoffsets.cpp
@@ -3,13 +3,12 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <logging.h>
-#include <node/interface_ui.h>
#include <node/timeoffsets.h>
+#include <node/warnings.h>
#include <sync.h>
#include <tinyformat.h>
#include <util/time.h>
#include <util/translation.h>
-#include <warnings.h>
#include <algorithm>
#include <chrono>
@@ -49,8 +48,7 @@ bool TimeOffsets::WarnIfOutOfSync() const
// when median == std::numeric_limits<int64_t>::min(), calling std::chrono::abs is UB
auto median{std::max(Median(), std::chrono::seconds(std::numeric_limits<int64_t>::min() + 1))};
if (std::chrono::abs(median) <= WARN_THRESHOLD) {
- SetMedianTimeOffsetWarning(std::nullopt);
- uiInterface.NotifyAlertChanged();
+ m_warnings.Unset(node::Warning::CLOCK_OUT_OF_SYNC);
return false;
}
@@ -63,7 +61,6 @@ bool TimeOffsets::WarnIfOutOfSync() const
"RPC methods to get more info."
), Ticks<std::chrono::minutes>(WARN_THRESHOLD))};
LogWarning("%s\n", msg.original);
- SetMedianTimeOffsetWarning(msg);
- uiInterface.NotifyAlertChanged();
+ m_warnings.Set(node::Warning::CLOCK_OUT_OF_SYNC, msg);
return true;
}
diff --git a/src/node/timeoffsets.h b/src/node/timeoffsets.h
index 2b12584e12..eba706ac1e 100644
--- a/src/node/timeoffsets.h
+++ b/src/node/timeoffsets.h
@@ -11,8 +11,16 @@
#include <cstddef>
#include <deque>
+namespace node {
+class Warnings;
+} // namespace node
+
class TimeOffsets
{
+public:
+ TimeOffsets(node::Warnings& warnings) : m_warnings{warnings} {}
+
+private:
//! Maximum number of timeoffsets stored.
static constexpr size_t MAX_SIZE{50};
//! Minimum difference between system and network time for a warning to be raised.
@@ -23,6 +31,8 @@ class TimeOffsets
* positive offset means our peer's clock is ahead of our local clock. */
std::deque<std::chrono::seconds> m_offsets GUARDED_BY(m_mutex){};
+ node::Warnings& m_warnings;
+
public:
/** Add a new time offset sample. */
void Add(std::chrono::seconds offset) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
diff --git a/src/node/warnings.cpp b/src/node/warnings.cpp
new file mode 100644
index 0000000000..b99c845900
--- /dev/null
+++ b/src/node/warnings.cpp
@@ -0,0 +1,68 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2022 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <config/bitcoin-config.h> // IWYU pragma: keep
+
+#include <node/warnings.h>
+
+#include <common/system.h>
+#include <node/interface_ui.h>
+#include <sync.h>
+#include <univalue.h>
+#include <util/translation.h>
+
+#include <utility>
+#include <vector>
+
+namespace node {
+Warnings::Warnings()
+{
+ // Pre-release build warning
+ if (!CLIENT_VERSION_IS_RELEASE) {
+ m_warnings.insert(
+ {Warning::PRE_RELEASE_TEST_BUILD,
+ _("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications")});
+ }
+}
+bool Warnings::Set(warning_type id, bilingual_str message)
+{
+ LOCK(m_mutex);
+ const auto& [_, inserted]{m_warnings.insert({id, std::move(message)})};
+ if (inserted) uiInterface.NotifyAlertChanged();
+ return inserted;
+}
+
+bool Warnings::Unset(warning_type id)
+{
+ auto success{WITH_LOCK(m_mutex, return m_warnings.erase(id))};
+ if (success) uiInterface.NotifyAlertChanged();
+ return success;
+}
+
+std::vector<bilingual_str> Warnings::GetMessages() const
+{
+ LOCK(m_mutex);
+ std::vector<bilingual_str> messages;
+ messages.reserve(m_warnings.size());
+ for (const auto& [id, msg] : m_warnings) {
+ messages.push_back(msg);
+ }
+ return messages;
+}
+
+UniValue GetWarningsForRpc(const Warnings& warnings, bool use_deprecated)
+{
+ if (use_deprecated) {
+ const auto all_messages{warnings.GetMessages()};
+ return all_messages.empty() ? "" : all_messages.back().original;
+ }
+
+ UniValue messages{UniValue::VARR};
+ for (auto&& message : warnings.GetMessages()) {
+ messages.push_back(std::move(message.original));
+ }
+ return messages;
+}
+} // namespace node
diff --git a/src/node/warnings.h b/src/node/warnings.h
new file mode 100644
index 0000000000..24aeb8a922
--- /dev/null
+++ b/src/node/warnings.h
@@ -0,0 +1,90 @@
+// Copyright (c) 2009-2010 Satoshi Nakamoto
+// Copyright (c) 2009-2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_NODE_WARNINGS_H
+#define BITCOIN_NODE_WARNINGS_H
+
+#include <sync.h>
+#include <util/translation.h>
+
+#include <map>
+#include <variant>
+#include <vector>
+
+class UniValue;
+
+namespace kernel {
+enum class Warning;
+} // namespace kernel
+
+namespace node {
+enum class Warning {
+ CLOCK_OUT_OF_SYNC,
+ PRE_RELEASE_TEST_BUILD,
+ FATAL_INTERNAL_ERROR,
+};
+
+/**
+ * @class Warnings
+ * @brief Manages warning messages within a node.
+ *
+ * The Warnings class provides mechanisms to set, unset, and retrieve
+ * warning messages. It updates the GUI when warnings are changed.
+ *
+ * This class is designed to be non-copyable to ensure warnings
+ * are managed centrally.
+ */
+class Warnings
+{
+ typedef std::variant<kernel::Warning, node::Warning> warning_type;
+
+ mutable Mutex m_mutex;
+ std::map<warning_type, bilingual_str> m_warnings GUARDED_BY(m_mutex);
+
+public:
+ Warnings();
+ //! A warnings instance should always be passed by reference, never copied.
+ Warnings(const Warnings&) = delete;
+ Warnings& operator=(const Warnings&) = delete;
+ /**
+ * @brief Set a warning message. If a warning with the specified
+ * `id` is already active, false is returned and the new
+ * warning is ignored. If `id` does not yet exist, the
+ * warning is set, the UI is updated, and true is returned.
+ *
+ * @param[in] id Unique identifier of the warning.
+ * @param[in] message Warning message to be shown.
+ *
+ * @returns true if the warning was indeed set (i.e. there is no
+ * active warning with this `id`), otherwise false.
+ */
+ bool Set(warning_type id, bilingual_str message) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
+ /**
+ * @brief Unset a warning message. If a warning with the specified
+ * `id` is active, it is unset, the UI is updated, and true
+ * is returned. Otherwise, no warning is unset and false is
+ * returned.
+ *
+ * @param[in] id Unique identifier of the warning.
+ *
+ * @returns true if the warning was indeed unset (i.e. there is an
+ * active warning with this `id`), otherwise false.
+ */
+ bool Unset(warning_type id) EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
+ /** Return potential problems detected by the node, sorted by the
+ * warning_type id */
+ std::vector<bilingual_str> GetMessages() const EXCLUSIVE_LOCKS_REQUIRED(!m_mutex);
+};
+
+/**
+ * RPC helper function that wraps warnings.GetMessages().
+ *
+ * Returns a UniValue::VSTR with the latest warning if use_deprecated is
+ * set to true, or a UniValue::VARR with all warnings otherwise.
+ */
+UniValue GetWarningsForRpc(const Warnings& warnings, bool use_deprecated);
+} // namespace node
+
+#endif // BITCOIN_NODE_WARNINGS_H
diff --git a/src/policy/v3_policy.cpp b/src/policy/v3_policy.cpp
index bcf0b2b236..6bd043b8e3 100644
--- a/src/policy/v3_policy.cpp
+++ b/src/policy/v3_policy.cpp
@@ -91,7 +91,6 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v
const auto parent_info = [&] {
if (mempool_ancestors.size() > 0) {
auto& mempool_parent = *mempool_ancestors.begin();
- Assume(mempool_parent->GetCountWithDescendants() == 1);
return ParentInfo{mempool_parent->GetTx().GetHash(),
mempool_parent->GetTx().GetWitnessHash(),
mempool_parent->GetTx().version,
@@ -135,10 +134,7 @@ std::optional<std::string> PackageV3Checks(const CTransactionRef& ptx, int64_t v
}
}
- // It shouldn't be possible to have any mempool siblings at this point. SingleV3Checks
- // catches mempool siblings and sibling eviction is not extended to packages. Also, if the package consists of connected transactions,
- // any tx having a mempool ancestor would mean the package exceeds ancestor limits.
- if (!Assume(!parent_info.m_has_mempool_descendant)) {
+ if (parent_info.m_has_mempool_descendant) {
return strprintf("tx %s (wtxid=%s) would exceed descendant count limit",
parent_info.m_txid.ToString(), parent_info.m_wtxid.ToString());
}
diff --git a/src/randomenv.cpp b/src/randomenv.cpp
index aeec959c28..49033deef2 100644
--- a/src/randomenv.cpp
+++ b/src/randomenv.cpp
@@ -42,15 +42,15 @@
#if HAVE_DECL_GETIFADDRS && HAVE_DECL_FREEIFADDRS
#include <ifaddrs.h>
#endif
-#if HAVE_SYSCTL
+#ifdef HAVE_SYSCTL
#include <sys/sysctl.h>
-#if HAVE_VM_VM_PARAM_H
+#ifdef HAVE_VM_VM_PARAM_H
#include <vm/vm_param.h>
#endif
-#if HAVE_SYS_RESOURCES_H
+#ifdef HAVE_SYS_RESOURCES_H
#include <sys/resources.h>
#endif
-#if HAVE_SYS_VMMETER_H
+#ifdef HAVE_SYS_VMMETER_H
#include <sys/vmmeter.h>
#endif
#endif
@@ -162,7 +162,7 @@ void AddPath(CSHA512& hasher, const char *path)
}
#endif
-#if HAVE_SYSCTL
+#ifdef HAVE_SYSCTL
template<int... S>
void AddSysctl(CSHA512& hasher)
{
@@ -274,7 +274,7 @@ void RandAddDynamicEnv(CSHA512& hasher)
AddFile(hasher, "/proc/self/status");
#endif
-#if HAVE_SYSCTL
+#ifdef HAVE_SYSCTL
# ifdef CTL_KERN
# if defined(KERN_PROC) && defined(KERN_PROC_ALL)
AddSysctl<CTL_KERN, KERN_PROC, KERN_PROC_ALL>(hasher);
@@ -419,7 +419,7 @@ void RandAddStaticEnv(CSHA512& hasher)
// For MacOS/BSDs, gather data through sysctl instead of /proc. Not all of these
// will exist on every system.
-#if HAVE_SYSCTL
+#ifdef HAVE_SYSCTL
# ifdef CTL_HW
# ifdef HW_MACHINE
AddSysctl<CTL_HW, HW_MACHINE>(hasher);
diff --git a/src/rest.cpp b/src/rest.cpp
index d43018f5af..4abbc4d2ca 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -248,9 +248,8 @@ static bool rest_headers(const std::any& context,
ssHeader << pindex->GetBlockHeader();
}
- std::string binaryHeader = ssHeader.str();
req->WriteHeader("Content-Type", "application/octet-stream");
- req->WriteReply(HTTP_OK, binaryHeader);
+ req->WriteReply(HTTP_OK, ssHeader);
return true;
}
@@ -321,9 +320,8 @@ static bool rest_block(const std::any& context,
switch (rf) {
case RESTResponseFormat::BINARY: {
- const std::string binaryBlock{block_data.begin(), block_data.end()};
req->WriteHeader("Content-Type", "application/octet-stream");
- req->WriteReply(HTTP_OK, binaryBlock);
+ req->WriteReply(HTTP_OK, std::as_bytes(std::span{block_data}));
return true;
}
@@ -451,9 +449,8 @@ static bool rest_filter_header(const std::any& context, HTTPRequest* req, const
ssHeader << header;
}
- std::string binaryHeader = ssHeader.str();
req->WriteHeader("Content-Type", "application/octet-stream");
- req->WriteReply(HTTP_OK, binaryHeader);
+ req->WriteReply(HTTP_OK, ssHeader);
return true;
}
case RESTResponseFormat::HEX: {
@@ -548,9 +545,8 @@ static bool rest_block_filter(const std::any& context, HTTPRequest* req, const s
DataStream ssResp{};
ssResp << filter;
- std::string binaryResp = ssResp.str();
req->WriteHeader("Content-Type", "application/octet-stream");
- req->WriteReply(HTTP_OK, binaryResp);
+ req->WriteReply(HTTP_OK, ssResp);
return true;
}
case RESTResponseFormat::HEX: {
@@ -729,9 +725,8 @@ static bool rest_tx(const std::any& context, HTTPRequest* req, const std::string
DataStream ssTx;
ssTx << TX_WITH_WITNESS(tx);
- std::string binaryTx = ssTx.str();
req->WriteHeader("Content-Type", "application/octet-stream");
- req->WriteReply(HTTP_OK, binaryTx);
+ req->WriteReply(HTTP_OK, ssTx);
return true;
}
@@ -900,10 +895,9 @@ static bool rest_getutxos(const std::any& context, HTTPRequest* req, const std::
// use exact same output as mentioned in Bip64
DataStream ssGetUTXOResponse{};
ssGetUTXOResponse << active_height << active_hash << bitmap << outs;
- std::string ssGetUTXOResponseString = ssGetUTXOResponse.str();
req->WriteHeader("Content-Type", "application/octet-stream");
- req->WriteReply(HTTP_OK, ssGetUTXOResponseString);
+ req->WriteReply(HTTP_OK, ssGetUTXOResponse);
return true;
}
@@ -981,7 +975,7 @@ static bool rest_blockhash_by_height(const std::any& context, HTTPRequest* req,
DataStream ss_blockhash{};
ss_blockhash << pblockindex->GetBlockHash();
req->WriteHeader("Content-Type", "application/octet-stream");
- req->WriteReply(HTTP_OK, ss_blockhash.str());
+ req->WriteReply(HTTP_OK, ss_blockhash);
return true;
}
case RESTResponseFormat::HEX: {
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 46b0ae161f..e785678614 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -29,6 +29,7 @@
#include <node/context.h>
#include <node/transaction.h>
#include <node/utxo_snapshot.h>
+#include <node/warnings.h>
#include <primitives/transaction.h>
#include <rpc/server.h>
#include <rpc/server_util.h>
@@ -1308,7 +1309,8 @@ RPCHelpMan getblockchaininfo()
}
}
- obj.pushKV("warnings", GetNodeWarnings(IsDeprecatedRPCEnabled("warnings")));
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ obj.pushKV("warnings", node::GetWarningsForRpc(*CHECK_NONFATAL(node.warnings), IsDeprecatedRPCEnabled("warnings")));
return obj;
},
};
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 6412fb35ec..7e420dcd9b 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -16,10 +16,12 @@
#include <core_io.h>
#include <deploymentinfo.h>
#include <deploymentstatus.h>
+#include <interfaces/mining.h>
#include <key_io.h>
#include <net.h>
#include <node/context.h>
#include <node/miner.h>
+#include <node/warnings.h>
#include <pow.h>
#include <rpc/blockchain.h>
#include <rpc/mining.h>
@@ -44,6 +46,7 @@
using node::BlockAssembler;
using node::CBlockTemplate;
+using interfaces::Mining;
using node::NodeContext;
using node::RegenerateCommitments;
using node::UpdateTime;
@@ -126,7 +129,7 @@ static RPCHelpMan getnetworkhashps()
};
}
-static bool GenerateBlock(ChainstateManager& chainman, CBlock& block, uint64_t& max_tries, std::shared_ptr<const CBlock>& block_out, bool process_new_block)
+static bool GenerateBlock(ChainstateManager& chainman, Mining& miner, CBlock& block, uint64_t& max_tries, std::shared_ptr<const CBlock>& block_out, bool process_new_block)
{
block_out.reset();
block.hashMerkleRoot = BlockMerkleRoot(block);
@@ -146,23 +149,23 @@ static bool GenerateBlock(ChainstateManager& chainman, CBlock& block, uint64_t&
if (!process_new_block) return true;
- if (!chainman.ProcessNewBlock(block_out, /*force_processing=*/true, /*min_pow_checked=*/true, nullptr)) {
+ if (!miner.processNewBlock(block_out, nullptr)) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "ProcessNewBlock, block not accepted");
}
return true;
}
-static UniValue generateBlocks(ChainstateManager& chainman, const CTxMemPool& mempool, const CScript& coinbase_script, int nGenerate, uint64_t nMaxTries)
+static UniValue generateBlocks(ChainstateManager& chainman, Mining& miner, const CScript& coinbase_script, int nGenerate, uint64_t nMaxTries)
{
UniValue blockHashes(UniValue::VARR);
while (nGenerate > 0 && !chainman.m_interrupt) {
- std::unique_ptr<CBlockTemplate> pblocktemplate(BlockAssembler{chainman.ActiveChainstate(), &mempool}.CreateNewBlock(coinbase_script));
+ std::unique_ptr<CBlockTemplate> pblocktemplate(miner.createNewBlock(coinbase_script));
if (!pblocktemplate.get())
throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
std::shared_ptr<const CBlock> block_out;
- if (!GenerateBlock(chainman, pblocktemplate->block, nMaxTries, block_out, /*process_new_block=*/true)) {
+ if (!GenerateBlock(chainman, miner, pblocktemplate->block, nMaxTries, block_out, /*process_new_block=*/true)) {
break;
}
@@ -238,10 +241,10 @@ static RPCHelpMan generatetodescriptor()
}
NodeContext& node = EnsureAnyNodeContext(request.context);
- const CTxMemPool& mempool = EnsureMemPool(node);
+ Mining& miner = EnsureMining(node);
ChainstateManager& chainman = EnsureChainman(node);
- return generateBlocks(chainman, mempool, coinbase_script, num_blocks, max_tries);
+ return generateBlocks(chainman, miner, coinbase_script, num_blocks, max_tries);
},
};
}
@@ -284,12 +287,12 @@ static RPCHelpMan generatetoaddress()
}
NodeContext& node = EnsureAnyNodeContext(request.context);
- const CTxMemPool& mempool = EnsureMemPool(node);
+ Mining& miner = EnsureMining(node);
ChainstateManager& chainman = EnsureChainman(node);
CScript coinbase_script = GetScriptForDestination(destination);
- return generateBlocks(chainman, mempool, coinbase_script, num_blocks, max_tries);
+ return generateBlocks(chainman, miner, coinbase_script, num_blocks, max_tries);
},
};
}
@@ -336,6 +339,7 @@ static RPCHelpMan generateblock()
}
NodeContext& node = EnsureAnyNodeContext(request.context);
+ Mining& miner = EnsureMining(node);
const CTxMemPool& mempool = EnsureMemPool(node);
std::vector<CTransactionRef> txs;
@@ -367,9 +371,7 @@ static RPCHelpMan generateblock()
ChainstateManager& chainman = EnsureChainman(node);
{
- LOCK(cs_main);
-
- std::unique_ptr<CBlockTemplate> blocktemplate(BlockAssembler{chainman.ActiveChainstate(), nullptr}.CreateNewBlock(coinbase_script));
+ std::unique_ptr<CBlockTemplate> blocktemplate{miner.createNewBlock(coinbase_script, /*use_mempool=*/false)};
if (!blocktemplate) {
throw JSONRPCError(RPC_INTERNAL_ERROR, "Couldn't create new block");
}
@@ -383,18 +385,16 @@ static RPCHelpMan generateblock()
RegenerateCommitments(block, chainman);
{
- LOCK(cs_main);
-
BlockValidationState state;
- if (!TestBlockValidity(state, chainman.GetParams(), chainman.ActiveChainstate(), block, chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock), false, false)) {
- throw JSONRPCError(RPC_VERIFY_ERROR, strprintf("TestBlockValidity failed: %s", state.ToString()));
+ if (!miner.testBlockValidity(block, /*check_merkle_root=*/false, state)) {
+ throw JSONRPCError(RPC_VERIFY_ERROR, strprintf("testBlockValidity failed: %s", state.ToString()));
}
}
std::shared_ptr<const CBlock> block_out;
uint64_t max_tries{DEFAULT_MAX_TRIES};
- if (!GenerateBlock(chainman, block, max_tries, block_out, process_new_block) || !block_out) {
+ if (!GenerateBlock(chainman, miner, block, max_tries, block_out, process_new_block) || !block_out) {
throw JSONRPCError(RPC_MISC_ERROR, "Failed to make block.");
}
@@ -454,7 +454,7 @@ static RPCHelpMan getmininginfo()
obj.pushKV("networkhashps", getnetworkhashps().HandleRequest(request));
obj.pushKV("pooledtx", (uint64_t)mempool.size());
obj.pushKV("chain", chainman.GetParams().GetChainTypeString());
- obj.pushKV("warnings", GetNodeWarnings(IsDeprecatedRPCEnabled("warnings")));
+ obj.pushKV("warnings", node::GetWarningsForRpc(*CHECK_NONFATAL(node.warnings), IsDeprecatedRPCEnabled("warnings")));
return obj;
},
};
@@ -661,13 +661,13 @@ static RPCHelpMan getblocktemplate()
{
NodeContext& node = EnsureAnyNodeContext(request.context);
ChainstateManager& chainman = EnsureChainman(node);
+ Mining& miner = EnsureMining(node);
LOCK(cs_main);
+ uint256 tip{CHECK_NONFATAL(miner.getTipHash()).value()};
std::string strMode = "template";
UniValue lpval = NullUniValue;
std::set<std::string> setClientRules;
- Chainstate& active_chainstate = chainman.ActiveChainstate();
- CChain& active_chain = active_chainstate.m_chain;
if (!request.params[0].isNull())
{
const UniValue& oparam = request.params[0].get_obj();
@@ -702,12 +702,12 @@ static RPCHelpMan getblocktemplate()
return "duplicate-inconclusive";
}
- CBlockIndex* const pindexPrev = active_chain.Tip();
- // TestBlockValidity only supports blocks built on the current Tip
- if (block.hashPrevBlock != pindexPrev->GetBlockHash())
+ // testBlockValidity only supports blocks built on the current Tip
+ if (block.hashPrevBlock != tip) {
return "inconclusive-not-best-prevblk";
+ }
BlockValidationState state;
- TestBlockValidity(state, chainman.GetParams(), active_chainstate, block, pindexPrev, false, true);
+ miner.testBlockValidity(block, /*check_merkle_root=*/true, state);
return BIP22ValidationResult(state);
}
@@ -723,19 +723,18 @@ static RPCHelpMan getblocktemplate()
if (strMode != "template")
throw JSONRPCError(RPC_INVALID_PARAMETER, "Invalid mode");
- if (!chainman.GetParams().IsTestChain()) {
+ if (!miner.isTestChain()) {
const CConnman& connman = EnsureConnman(node);
if (connman.GetNodeCount(ConnectionDirection::Both) == 0) {
throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, PACKAGE_NAME " is not connected!");
}
- if (chainman.IsInitialBlockDownload()) {
+ if (miner.isInitialBlockDownload()) {
throw JSONRPCError(RPC_CLIENT_IN_INITIAL_DOWNLOAD, PACKAGE_NAME " is in initial sync and waiting for blocks...");
}
}
static unsigned int nTransactionsUpdatedLast;
- const CTxMemPool& mempool = EnsureMemPool(node);
if (!lpval.isNull())
{
@@ -755,7 +754,7 @@ static RPCHelpMan getblocktemplate()
else
{
// NOTE: Spec does not specify behaviour for non-string longpollid, but this makes testing easier
- hashWatchedChain = active_chain.Tip()->GetBlockHash();
+ hashWatchedChain = tip;
nTransactionsUpdatedLastLP = nTransactionsUpdatedLast;
}
@@ -771,7 +770,7 @@ static RPCHelpMan getblocktemplate()
{
// Timeout: Check transactions for update
// without holding the mempool lock to avoid deadlocks
- if (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLastLP)
+ if (miner.getTransactionsUpdated() != nTransactionsUpdatedLastLP)
break;
checktxtime += std::chrono::seconds(10);
}
@@ -779,6 +778,10 @@ static RPCHelpMan getblocktemplate()
}
ENTER_CRITICAL_SECTION(cs_main);
+ std::optional<uint256> maybe_tip{miner.getTipHash()};
+ CHECK_NONFATAL(maybe_tip);
+ tip = maybe_tip.value();
+
if (!IsRPCRunning())
throw JSONRPCError(RPC_CLIENT_NOT_CONNECTED, "Shutting down");
// TODO: Maybe recheck connections/IBD and (if something wrong) send an expires-immediately template to stop miners?
@@ -800,24 +803,25 @@ static RPCHelpMan getblocktemplate()
static CBlockIndex* pindexPrev;
static int64_t time_start;
static std::unique_ptr<CBlockTemplate> pblocktemplate;
- if (pindexPrev != active_chain.Tip() ||
- (mempool.GetTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - time_start > 5))
+ if (!pindexPrev || pindexPrev->GetBlockHash() != tip ||
+ (miner.getTransactionsUpdated() != nTransactionsUpdatedLast && GetTime() - time_start > 5))
{
// Clear pindexPrev so future calls make a new block, despite any failures from here on
pindexPrev = nullptr;
- // Store the pindexBest used before CreateNewBlock, to avoid races
- nTransactionsUpdatedLast = mempool.GetTransactionsUpdated();
- CBlockIndex* pindexPrevNew = active_chain.Tip();
+ // Store the pindexBest used before createNewBlock, to avoid races
+ nTransactionsUpdatedLast = miner.getTransactionsUpdated();
+ CBlockIndex* pindexPrevNew = chainman.m_blockman.LookupBlockIndex(tip);
time_start = GetTime();
// Create new block
CScript scriptDummy = CScript() << OP_TRUE;
- pblocktemplate = BlockAssembler{active_chainstate, &mempool}.CreateNewBlock(scriptDummy);
- if (!pblocktemplate)
+ pblocktemplate = miner.createNewBlock(scriptDummy);
+ if (!pblocktemplate) {
throw JSONRPCError(RPC_OUT_OF_MEMORY, "Out of memory");
+ }
- // Need to update only after we know CreateNewBlock succeeded
+ // Need to update only after we know createNewBlock succeeded
pindexPrev = pindexPrevNew;
}
CHECK_NONFATAL(pindexPrev);
@@ -940,7 +944,7 @@ static RPCHelpMan getblocktemplate()
result.pushKV("transactions", std::move(transactions));
result.pushKV("coinbaseaux", std::move(aux));
result.pushKV("coinbasevalue", (int64_t)pblock->vtx[0]->vout[0].nValue);
- result.pushKV("longpollid", active_chain.Tip()->GetBlockHash().GetHex() + ToString(nTransactionsUpdatedLast));
+ result.pushKV("longpollid", tip.GetHex() + ToString(nTransactionsUpdatedLast));
result.pushKV("target", hashTarget.GetHex());
result.pushKV("mintime", (int64_t)pindexPrev->GetMedianTimePast()+1);
result.pushKV("mutable", std::move(aMutable));
@@ -1046,10 +1050,13 @@ static RPCHelpMan submitblock()
}
}
+ NodeContext& node = EnsureAnyNodeContext(request.context);
+ Mining& miner = EnsureMining(node);
+
bool new_block;
auto sc = std::make_shared<submitblock_StateCatcher>(block.GetHash());
CHECK_NONFATAL(chainman.m_options.signals)->RegisterSharedValidationInterface(sc);
- bool accepted = chainman.ProcessNewBlock(blockptr, /*force_processing=*/true, /*min_pow_checked=*/true, /*new_block=*/&new_block);
+ bool accepted = miner.processNewBlock(blockptr, /*new_block=*/&new_block);
CHECK_NONFATAL(chainman.m_options.signals)->UnregisterSharedValidationInterface(sc);
if (!new_block && accepted) {
return "duplicate";
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 1cc55f891a..1119a3e668 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -16,6 +16,7 @@
#include <netbase.h>
#include <node/context.h>
#include <node/protocol_version.h>
+#include <node/warnings.h>
#include <policy/settings.h>
#include <protocol.h>
#include <rpc/blockchain.h>
@@ -715,7 +716,7 @@ static RPCHelpMan getnetworkinfo()
}
}
obj.pushKV("localaddresses", std::move(localAddresses));
- obj.pushKV("warnings", GetNodeWarnings(IsDeprecatedRPCEnabled("warnings")));
+ obj.pushKV("warnings", node::GetWarningsForRpc(*CHECK_NONFATAL(node.warnings), IsDeprecatedRPCEnabled("warnings")));
return obj;
},
};
diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp
index 87b9f18b33..083d1be44f 100644
--- a/src/rpc/request.cpp
+++ b/src/rpc/request.cpp
@@ -5,12 +5,11 @@
#include <rpc/request.h>
-#include <util/fs.h>
-
#include <common/args.h>
#include <logging.h>
#include <random.h>
#include <rpc/protocol.h>
+#include <util/fs.h>
#include <util/fs_helpers.h>
#include <util/strencodings.h>
@@ -95,7 +94,7 @@ static fs::path GetAuthCookieFile(bool temp=false)
static bool g_generated_cookie = false;
-bool GenerateAuthCookie(std::string *cookie_out)
+bool GenerateAuthCookie(std::string* cookie_out, std::optional<fs::perms> cookie_perms)
{
const size_t COOKIE_SIZE = 32;
unsigned char rand_pwd[COOKIE_SIZE];
@@ -109,7 +108,7 @@ bool GenerateAuthCookie(std::string *cookie_out)
fs::path filepath_tmp = GetAuthCookieFile(true);
file.open(filepath_tmp);
if (!file.is_open()) {
- LogPrintf("Unable to open cookie authentication file %s for writing\n", fs::PathToString(filepath_tmp));
+ LogInfo("Unable to open cookie authentication file %s for writing\n", fs::PathToString(filepath_tmp));
return false;
}
file << cookie;
@@ -117,11 +116,21 @@ bool GenerateAuthCookie(std::string *cookie_out)
fs::path filepath = GetAuthCookieFile(false);
if (!RenameOver(filepath_tmp, filepath)) {
- LogPrintf("Unable to rename cookie authentication file %s to %s\n", fs::PathToString(filepath_tmp), fs::PathToString(filepath));
+ LogInfo("Unable to rename cookie authentication file %s to %s\n", fs::PathToString(filepath_tmp), fs::PathToString(filepath));
return false;
}
+ if (cookie_perms) {
+ std::error_code code;
+ fs::permissions(filepath, cookie_perms.value(), fs::perm_options::replace, code);
+ if (code) {
+ LogInfo("Unable to set permissions on cookie authentication file %s\n", fs::PathToString(filepath_tmp));
+ return false;
+ }
+ }
+
g_generated_cookie = true;
- LogPrintf("Generated RPC authentication cookie %s\n", fs::PathToString(filepath));
+ LogInfo("Generated RPC authentication cookie %s\n", fs::PathToString(filepath));
+ LogInfo("Permissions used for cookie: %s\n", PermsToSymbolicString(fs::status(filepath).permissions()));
if (cookie_out)
*cookie_out = cookie;
diff --git a/src/rpc/request.h b/src/rpc/request.h
index 9968426636..24887e8691 100644
--- a/src/rpc/request.h
+++ b/src/rpc/request.h
@@ -11,6 +11,7 @@
#include <string>
#include <univalue.h>
+#include <util/fs.h>
enum class JSONRPCVersion {
V1_LEGACY,
@@ -23,7 +24,7 @@ UniValue JSONRPCReplyObj(UniValue result, UniValue error, std::optional<UniValue
UniValue JSONRPCError(int code, const std::string& message);
/** Generate a new RPC authentication cookie and write it to disk */
-bool GenerateAuthCookie(std::string *cookie_out);
+bool GenerateAuthCookie(std::string* cookie_out, std::optional<fs::perms> cookie_perms=std::nullopt);
/** Read the RPC authentication cookie from disk */
bool GetAuthCookie(std::string *cookie_out);
/** Delete RPC authentication cookie from disk */
diff --git a/src/rpc/server_util.cpp b/src/rpc/server_util.cpp
index efd4a43c28..0387cbb8e2 100644
--- a/src/rpc/server_util.cpp
+++ b/src/rpc/server_util.cpp
@@ -101,6 +101,14 @@ CConnman& EnsureConnman(const NodeContext& node)
return *node.connman;
}
+interfaces::Mining& EnsureMining(const NodeContext& node)
+{
+ if (!node.mining) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Node miner not found");
+ }
+ return *node.mining;
+}
+
PeerManager& EnsurePeerman(const NodeContext& node)
{
if (!node.peerman) {
diff --git a/src/rpc/server_util.h b/src/rpc/server_util.h
index a4a53166b4..1e6fb7e6a6 100644
--- a/src/rpc/server_util.h
+++ b/src/rpc/server_util.h
@@ -18,6 +18,9 @@ class BanMan;
namespace node {
struct NodeContext;
} // namespace node
+namespace interfaces {
+class Mining;
+} // namespace interfaces
node::NodeContext& EnsureAnyNodeContext(const std::any& context);
CTxMemPool& EnsureMemPool(const node::NodeContext& node);
@@ -31,6 +34,7 @@ ChainstateManager& EnsureAnyChainman(const std::any& context);
CBlockPolicyEstimator& EnsureFeeEstimator(const node::NodeContext& node);
CBlockPolicyEstimator& EnsureAnyFeeEstimator(const std::any& context);
CConnman& EnsureConnman(const node::NodeContext& node);
+interfaces::Mining& EnsureMining(const node::NodeContext& node);
PeerManager& EnsurePeerman(const node::NodeContext& node);
AddrMan& EnsureAddrman(const node::NodeContext& node);
AddrMan& EnsureAnyAddrman(const std::any& context);
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index bb1aef63f4..4df4466c49 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -5,17 +5,17 @@
#include <config/bitcoin-config.h> // IWYU pragma: keep
#include <clientversion.h>
-#include <core_io.h>
#include <common/args.h>
#include <common/messages.h>
#include <common/types.h>
#include <consensus/amount.h>
-#include <script/interpreter.h>
+#include <core_io.h>
#include <key_io.h>
#include <node/types.h>
#include <outputtype.h>
#include <rpc/util.h>
#include <script/descriptor.h>
+#include <script/interpreter.h>
#include <script/signingprovider.h>
#include <script/solver.h>
#include <tinyformat.h>
@@ -25,7 +25,6 @@
#include <util/strencodings.h>
#include <util/string.h>
#include <util/translation.h>
-#include <warnings.h>
#include <algorithm>
#include <iterator>
@@ -1382,17 +1381,3 @@ void PushWarnings(const std::vector<bilingual_str>& warnings, UniValue& obj)
if (warnings.empty()) return;
obj.pushKV("warnings", BilingualStringsToUniValue(warnings));
}
-
-UniValue GetNodeWarnings(bool use_deprecated)
-{
- if (use_deprecated) {
- const auto all_warnings{GetWarnings()};
- return all_warnings.empty() ? "" : all_warnings.back().original;
- }
-
- UniValue warnings{UniValue::VARR};
- for (auto&& warning : GetWarnings()) {
- warnings.push_back(std::move(warning.original));
- }
- return warnings;
-}
diff --git a/src/rpc/util.h b/src/rpc/util.h
index ca6ca6007b..23024376e0 100644
--- a/src/rpc/util.h
+++ b/src/rpc/util.h
@@ -503,6 +503,4 @@ private:
void PushWarnings(const UniValue& warnings, UniValue& obj);
void PushWarnings(const std::vector<bilingual_str>& warnings, UniValue& obj);
-UniValue GetNodeWarnings(bool use_deprecated);
-
#endif // BITCOIN_RPC_UTIL_H
diff --git a/src/secp256k1/.cirrus.yml b/src/secp256k1/.cirrus.yml
index 6e77403bf5..4bd15511a4 100644
--- a/src/secp256k1/.cirrus.yml
+++ b/src/secp256k1/.cirrus.yml
@@ -10,8 +10,8 @@ env:
MAKEFLAGS: -j4
BUILD: check
### secp256k1 config
- ECMULTWINDOW: auto
- ECMULTGENKB: auto
+ ECMULTWINDOW: 15
+ ECMULTGENKB: 22
ASM: no
WIDEMUL: auto
WITH_VALGRIND: yes
diff --git a/src/secp256k1/.github/workflows/ci.yml b/src/secp256k1/.github/workflows/ci.yml
index d246682044..ade94e1eec 100644
--- a/src/secp256k1/.github/workflows/ci.yml
+++ b/src/secp256k1/.github/workflows/ci.yml
@@ -21,8 +21,8 @@ env:
MAKEFLAGS: '-j4'
BUILD: 'check'
### secp256k1 config
- ECMULTWINDOW: 'auto'
- ECMULTGENKB: 'auto'
+ ECMULTWINDOW: 15
+ ECMULTGENKB: 22
ASM: 'no'
WIDEMUL: 'auto'
WITH_VALGRIND: 'yes'
@@ -485,18 +485,24 @@ jobs:
matrix:
configuration:
- env_vars:
+ CTIMETESTS: 'yes'
CFLAGS: '-fsanitize=memory -fsanitize-recover=memory -g'
- env_vars:
ECMULTGENKB: 2
ECMULTWINDOW: 2
+ CTIMETESTS: 'yes'
CFLAGS: '-fsanitize=memory -fsanitize-recover=memory -g -O3'
+ - env_vars:
+ # -fsanitize-memory-param-retval is clang's default, but our build system disables it
+ # when ctime_tests when enabled.
+ CFLAGS: '-fsanitize=memory -fsanitize-recover=memory -fsanitize-memory-param-retval -g'
+ CTIMETESTS: 'no'
env:
ECDH: 'yes'
RECOVERY: 'yes'
SCHNORRSIG: 'yes'
ELLSWIFT: 'yes'
- CTIMETESTS: 'yes'
CC: 'clang'
SECP256K1_TEST_ITERS: 32
ASM: 'no'
@@ -585,10 +591,10 @@ jobs:
run: env
if: ${{ always() }}
- macos-native:
- name: "x86_64: macOS Monterey"
+ x86_64-macos-native:
+ name: "x86_64: macOS Monterey, Valgrind"
# See: https://github.com/actions/runner-images#available-images.
- runs-on: macos-12 # Use M1 once available https://github.com/github/roadmap/issues/528
+ runs-on: macos-12
env:
CC: 'clang'
@@ -644,6 +650,62 @@ jobs:
run: env
if: ${{ always() }}
+ arm64-macos-native:
+ name: "ARM64: macOS Sonoma"
+ # See: https://github.com/actions/runner-images#available-images.
+ runs-on: macos-14
+
+ env:
+ CC: 'clang'
+ HOMEBREW_NO_AUTO_UPDATE: 1
+ HOMEBREW_NO_INSTALL_CLEANUP: 1
+ WITH_VALGRIND: 'no'
+ CTIMETESTS: 'no'
+
+ strategy:
+ fail-fast: false
+ matrix:
+ env_vars:
+ - { WIDEMUL: 'int64', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
+ - { WIDEMUL: 'int128_struct', ECMULTGENPRECISION: 2, ECMULTWINDOW: 4 }
+ - { WIDEMUL: 'int128', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
+ - { WIDEMUL: 'int128', RECOVERY: 'yes' }
+ - { WIDEMUL: 'int128', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes' }
+ - { WIDEMUL: 'int128', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes', CC: 'gcc' }
+ - { WIDEMUL: 'int128', RECOVERY: 'yes', ECDH: 'yes', SCHNORRSIG: 'yes', ELLSWIFT: 'yes', CPPFLAGS: '-DVERIFY' }
+ - BUILD: 'distcheck'
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v4
+
+ - name: Install Homebrew packages
+ run: |
+ brew install automake libtool gcc
+ ln -s $(brew --prefix gcc)/bin/gcc-?? /usr/local/bin/gcc
+
+ - name: CI script
+ env: ${{ matrix.env_vars }}
+ run: ./ci/ci.sh
+
+ - run: cat tests.log || true
+ if: ${{ always() }}
+ - run: cat noverify_tests.log || true
+ if: ${{ always() }}
+ - run: cat exhaustive_tests.log || true
+ if: ${{ always() }}
+ - run: cat ctime_tests.log || true
+ if: ${{ always() }}
+ - run: cat bench.log || true
+ if: ${{ always() }}
+ - run: cat config.log || true
+ if: ${{ always() }}
+ - run: cat test_env.log || true
+ if: ${{ always() }}
+ - name: CI env
+ run: env
+ if: ${{ always() }}
+
win64-native:
name: ${{ matrix.configuration.job_name }}
# See: https://github.com/actions/runner-images#available-images.
diff --git a/src/secp256k1/CMakeLists.txt b/src/secp256k1/CMakeLists.txt
index 88994d828a..7e3465a75b 100644
--- a/src/secp256k1/CMakeLists.txt
+++ b/src/secp256k1/CMakeLists.txt
@@ -18,15 +18,14 @@ project(libsecp256k1
)
if(CMAKE_VERSION VERSION_LESS 3.21)
- get_directory_property(parent_directory PARENT_DIRECTORY)
- if(parent_directory)
- set(PROJECT_IS_TOP_LEVEL OFF CACHE INTERNAL "Emulates CMake 3.21+ behavior.")
- set(${PROJECT_NAME}_IS_TOP_LEVEL OFF CACHE INTERNAL "Emulates CMake 3.21+ behavior.")
+ # Emulates CMake 3.21+ behavior.
+ if(CMAKE_SOURCE_DIR STREQUAL CMAKE_CURRENT_SOURCE_DIR)
+ set(PROJECT_IS_TOP_LEVEL ON)
+ set(${PROJECT_NAME}_IS_TOP_LEVEL ON)
else()
- set(PROJECT_IS_TOP_LEVEL ON CACHE INTERNAL "Emulates CMake 3.21+ behavior.")
- set(${PROJECT_NAME}_IS_TOP_LEVEL ON CACHE INTERNAL "Emulates CMake 3.21+ behavior.")
+ set(PROJECT_IS_TOP_LEVEL OFF)
+ set(${PROJECT_NAME}_IS_TOP_LEVEL OFF)
endif()
- unset(parent_directory)
endif()
# The library version is based on libtool versioning of the ABI. The set of
@@ -92,21 +91,15 @@ if(SECP256K1_USE_EXTERNAL_DEFAULT_CALLBACKS)
add_compile_definitions(USE_EXTERNAL_DEFAULT_CALLBACKS=1)
endif()
-set(SECP256K1_ECMULT_WINDOW_SIZE "AUTO" CACHE STRING "Window size for ecmult precomputation for verification, specified as integer in range [2..24]. \"AUTO\" is a reasonable setting for desktop machines (currently 15). [default=AUTO]")
-set_property(CACHE SECP256K1_ECMULT_WINDOW_SIZE PROPERTY STRINGS "AUTO" 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24)
+set(SECP256K1_ECMULT_WINDOW_SIZE 15 CACHE STRING "Window size for ecmult precomputation for verification, specified as integer in range [2..24]. The default value is a reasonable setting for desktop machines (currently 15). [default=15]")
+set_property(CACHE SECP256K1_ECMULT_WINDOW_SIZE PROPERTY STRINGS 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24)
include(CheckStringOptionValue)
check_string_option_value(SECP256K1_ECMULT_WINDOW_SIZE)
-if(SECP256K1_ECMULT_WINDOW_SIZE STREQUAL "AUTO")
- set(SECP256K1_ECMULT_WINDOW_SIZE 15)
-endif()
add_compile_definitions(ECMULT_WINDOW_SIZE=${SECP256K1_ECMULT_WINDOW_SIZE})
-set(SECP256K1_ECMULT_GEN_KB "AUTO" CACHE STRING "The size of the precomputed table for signing in multiples of 1024 bytes (on typical platforms). Larger values result in possibly better signing or key generation performance at the cost of a larger table. Valid choices are 2, 22, 86. \"AUTO\" is a reasonable setting for desktop machines (currently 22). [default=AUTO]")
-set_property(CACHE SECP256K1_ECMULT_GEN_KB PROPERTY STRINGS "AUTO" 2 22 86)
+set(SECP256K1_ECMULT_GEN_KB 22 CACHE STRING "The size of the precomputed table for signing in multiples of 1024 bytes (on typical platforms). Larger values result in possibly better signing or key generation performance at the cost of a larger table. Valid choices are 2, 22, 86. The default value is a reasonable setting for desktop machines (currently 22). [default=22]")
+set_property(CACHE SECP256K1_ECMULT_GEN_KB PROPERTY STRINGS 2 22 86)
check_string_option_value(SECP256K1_ECMULT_GEN_KB)
-if(SECP256K1_ECMULT_GEN_KB STREQUAL "AUTO")
- set(SECP256K1_ECMULT_GEN_KB 22)
-endif()
if(SECP256K1_ECMULT_GEN_KB EQUAL 2)
add_compile_definitions(COMB_BLOCKS=2)
add_compile_definitions(COMB_TEETH=5)
@@ -214,23 +207,25 @@ mark_as_advanced(
CMAKE_SHARED_LINKER_FLAGS_COVERAGE
)
-get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
-set(default_build_type "RelWithDebInfo")
-if(is_multi_config)
- set(CMAKE_CONFIGURATION_TYPES "${default_build_type}" "Release" "Debug" "MinSizeRel" "Coverage" CACHE STRING
- "Supported configuration types."
- FORCE
- )
-else()
- set_property(CACHE CMAKE_BUILD_TYPE PROPERTY
- STRINGS "${default_build_type}" "Release" "Debug" "MinSizeRel" "Coverage"
- )
- if(NOT CMAKE_BUILD_TYPE)
- message(STATUS "Setting build type to \"${default_build_type}\" as none was specified")
- set(CMAKE_BUILD_TYPE "${default_build_type}" CACHE STRING
- "Choose the type of build."
+if(PROJECT_IS_TOP_LEVEL)
+ get_property(is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG)
+ set(default_build_type "RelWithDebInfo")
+ if(is_multi_config)
+ set(CMAKE_CONFIGURATION_TYPES "${default_build_type}" "Release" "Debug" "MinSizeRel" "Coverage" CACHE STRING
+ "Supported configuration types."
FORCE
)
+ else()
+ set_property(CACHE CMAKE_BUILD_TYPE PROPERTY
+ STRINGS "${default_build_type}" "Release" "Debug" "MinSizeRel" "Coverage"
+ )
+ if(NOT CMAKE_BUILD_TYPE)
+ message(STATUS "Setting build type to \"${default_build_type}\" as none was specified")
+ set(CMAKE_BUILD_TYPE "${default_build_type}" CACHE STRING
+ "Choose the type of build."
+ FORCE
+ )
+ endif()
endif()
endif()
@@ -263,10 +258,17 @@ endif()
set(CMAKE_C_VISIBILITY_PRESET hidden)
-# Ask CTest to create a "check" target (e.g., make check) as alias for the "test" target.
-# CTEST_TEST_TARGET_ALIAS is not documented but supposed to be user-facing.
-# See: https://gitlab.kitware.com/cmake/cmake/-/commit/816c9d1aa1f2b42d40c81a991b68c96eb12b6d2
-set(CTEST_TEST_TARGET_ALIAS check)
+set(print_msan_notice)
+if(SECP256K1_BUILD_CTIME_TESTS)
+ include(CheckMemorySanitizer)
+ check_memory_sanitizer(msan_enabled)
+ if(msan_enabled)
+ try_append_c_flags(-fno-sanitize-memory-param-retval)
+ set(print_msan_notice YES)
+ endif()
+ unset(msan_enabled)
+endif()
+
include(CTest)
# We do not use CTest's BUILD_TESTING because a single toggle for all tests is too coarse for our needs.
mark_as_advanced(BUILD_TESTING)
@@ -274,14 +276,16 @@ if(SECP256K1_BUILD_BENCHMARK OR SECP256K1_BUILD_TESTS OR SECP256K1_BUILD_EXHAUST
enable_testing()
endif()
-set(SECP256K1_LATE_CFLAGS "" CACHE STRING "Compiler flags that are added to the command line after all other flags added by the build system.")
-include(AllTargetsCompileOptions)
+set(SECP256K1_APPEND_CFLAGS "" CACHE STRING "Compiler flags that are appended to the command line after all other flags added by the build system. This variable is intended for debugging and special builds.")
+if(SECP256K1_APPEND_CFLAGS)
+ # Appending to this low-level rule variable is the only way to
+ # guarantee that the flags appear at the end of the command line.
+ string(APPEND CMAKE_C_COMPILE_OBJECT " ${SECP256K1_APPEND_CFLAGS}")
+endif()
add_subdirectory(src)
-all_targets_compile_options(src "${SECP256K1_LATE_CFLAGS}")
if(SECP256K1_BUILD_EXAMPLES)
add_subdirectory(examples)
- all_targets_compile_options(examples "${SECP256K1_LATE_CFLAGS}")
endif()
message("\n")
@@ -332,7 +336,7 @@ message("Valgrind .............................. ${SECP256K1_VALGRIND}")
get_directory_property(definitions COMPILE_DEFINITIONS)
string(REPLACE ";" " " definitions "${definitions}")
message("Preprocessor defined macros ........... ${definitions}")
-message("C compiler ............................ ${CMAKE_C_COMPILER}")
+message("C compiler ............................ ${CMAKE_C_COMPILER_ID} ${CMAKE_C_COMPILER_VERSION}, ${CMAKE_C_COMPILER}")
message("CFLAGS ................................ ${CMAKE_C_FLAGS}")
get_directory_property(compile_options COMPILE_OPTIONS)
string(REPLACE ";" " " compile_options "${compile_options}")
@@ -355,10 +359,17 @@ else()
message(" - LDFLAGS for executables ............ ${CMAKE_EXE_LINKER_FLAGS_DEBUG}")
message(" - LDFLAGS for shared libraries ....... ${CMAKE_SHARED_LINKER_FLAGS_DEBUG}")
endif()
-if(SECP256K1_LATE_CFLAGS)
- message("SECP256K1_LATE_CFLAGS ................. ${SECP256K1_LATE_CFLAGS}")
+if(SECP256K1_APPEND_CFLAGS)
+ message("SECP256K1_APPEND_CFLAGS ............... ${SECP256K1_APPEND_CFLAGS}")
+endif()
+message("")
+if(print_msan_notice)
+ message(
+ "Note:\n"
+ " MemorySanitizer detected, tried to add -fno-sanitize-memory-param-retval to compile options\n"
+ " to avoid false positives in ctime_tests. Pass -DSECP256K1_BUILD_CTIME_TESTS=OFF to avoid this.\n"
+ )
endif()
-message("\n")
if(SECP256K1_EXPERIMENTAL)
message(
" ******\n"
diff --git a/src/secp256k1/README.md b/src/secp256k1/README.md
index 6e88eb4ecb..e8d4123ab9 100644
--- a/src/secp256k1/README.md
+++ b/src/secp256k1/README.md
@@ -20,6 +20,7 @@ Features:
* Optional module for public key recovery.
* Optional module for ECDH key exchange.
* Optional module for Schnorr signatures according to [BIP-340](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki).
+* Optional module for ElligatorSwift key exchange according to [BIP-324](https://github.com/bitcoin/bips/blob/master/bip-0324.mediawiki).
Implementation details
----------------------
diff --git a/src/secp256k1/build-aux/m4/bitcoin_secp.m4 b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
index 11adef4f22..048267fa6e 100644
--- a/src/secp256k1/build-aux/m4/bitcoin_secp.m4
+++ b/src/secp256k1/build-aux/m4/bitcoin_secp.m4
@@ -45,6 +45,22 @@ fi
AC_MSG_RESULT($has_valgrind)
])
+AC_DEFUN([SECP_MSAN_CHECK], [
+AC_MSG_CHECKING(whether MemorySanitizer is enabled)
+AC_COMPILE_IFELSE([AC_LANG_SOURCE([[
+ #if defined(__has_feature)
+ # if __has_feature(memory_sanitizer)
+ /* MemorySanitizer is enabled. */
+ # elif
+ # error "MemorySanitizer is disabled."
+ # endif
+ #else
+ # error "__has_feature is not defined."
+ #endif
+ ]])], [msan_enabled=yes], [msan_enabled=no])
+AC_MSG_RESULT([$msan_enabled])
+])
+
dnl SECP_TRY_APPEND_CFLAGS(flags, VAR)
dnl Append flags to VAR if CC accepts them.
AC_DEFUN([SECP_TRY_APPEND_CFLAGS], [
diff --git a/src/secp256k1/cmake/AllTargetsCompileOptions.cmake b/src/secp256k1/cmake/AllTargetsCompileOptions.cmake
deleted file mode 100644
index 6e420e0fde..0000000000
--- a/src/secp256k1/cmake/AllTargetsCompileOptions.cmake
+++ /dev/null
@@ -1,12 +0,0 @@
-# Add compile options to all targets added in the subdirectory.
-function(all_targets_compile_options dir options)
- get_directory_property(targets DIRECTORY ${dir} BUILDSYSTEM_TARGETS)
- separate_arguments(options)
- set(compiled_target_types STATIC_LIBRARY SHARED_LIBRARY OBJECT_LIBRARY EXECUTABLE)
- foreach(target ${targets})
- get_target_property(type ${target} TYPE)
- if(type IN_LIST compiled_target_types)
- target_compile_options(${target} PRIVATE ${options})
- endif()
- endforeach()
-endfunction()
diff --git a/src/secp256k1/cmake/CheckMemorySanitizer.cmake b/src/secp256k1/cmake/CheckMemorySanitizer.cmake
new file mode 100644
index 0000000000..d9ef681e65
--- /dev/null
+++ b/src/secp256k1/cmake/CheckMemorySanitizer.cmake
@@ -0,0 +1,18 @@
+include_guard(GLOBAL)
+include(CheckCSourceCompiles)
+
+function(check_memory_sanitizer output)
+ set(CMAKE_TRY_COMPILE_TARGET_TYPE STATIC_LIBRARY)
+ check_c_source_compiles("
+ #if defined(__has_feature)
+ # if __has_feature(memory_sanitizer)
+ /* MemorySanitizer is enabled. */
+ # elif
+ # error \"MemorySanitizer is disabled.\"
+ # endif
+ #else
+ # error \"__has_feature is not defined.\"
+ #endif
+ " HAVE_MSAN)
+ set(${output} ${HAVE_MSAN} PARENT_SCOPE)
+endfunction()
diff --git a/src/secp256k1/configure.ac b/src/secp256k1/configure.ac
index 8b62e1dbfd..adae189787 100644
--- a/src/secp256k1/configure.ac
+++ b/src/secp256k1/configure.ac
@@ -203,22 +203,22 @@ AC_ARG_WITH([test-override-wide-multiply], [] ,[set_widemul=$withval], [set_wide
AC_ARG_WITH([asm], [AS_HELP_STRING([--with-asm=x86_64|arm32|no|auto],
[assembly to use (experimental: arm32) [default=auto]])],[req_asm=$withval], [req_asm=auto])
-AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE|auto],
+AC_ARG_WITH([ecmult-window], [AS_HELP_STRING([--with-ecmult-window=SIZE],
[window size for ecmult precomputation for verification, specified as integer in range [2..24].]
[Larger values result in possibly better performance at the cost of an exponentially larger precomputed table.]
[The table will store 2^(SIZE-1) * 64 bytes of data but can be larger in memory due to platform-specific padding and alignment.]
[A window size larger than 15 will require you delete the prebuilt precomputed_ecmult.c file so that it can be rebuilt.]
[For very large window sizes, use "make -j 1" to reduce memory use during compilation.]
-["auto" is a reasonable setting for desktop machines (currently 15). [default=auto]]
+[The default value is a reasonable setting for desktop machines (currently 15). [default=15]]
)],
-[req_ecmult_window=$withval], [req_ecmult_window=auto])
+[set_ecmult_window=$withval], [set_ecmult_window=15])
-AC_ARG_WITH([ecmult-gen-kb], [AS_HELP_STRING([--with-ecmult-gen-kb=2|22|86|auto],
+AC_ARG_WITH([ecmult-gen-kb], [AS_HELP_STRING([--with-ecmult-gen-kb=2|22|86],
[The size of the precomputed table for signing in multiples of 1024 bytes (on typical platforms).]
[Larger values result in possibly better signing/keygeneration performance at the cost of a larger table.]
-["auto" is a reasonable setting for desktop machines (currently 22). [default=auto]]
+[The default value is a reasonable setting for desktop machines (currently 22). [default=22]]
)],
-[req_ecmult_gen_kb=$withval], [req_ecmult_gen_kb=auto])
+[set_ecmult_gen_kb=$withval], [set_ecmult_gen_kb=22])
AC_ARG_WITH([valgrind], [AS_HELP_STRING([--with-valgrind=yes|no|auto],
[Build with extra checks for running inside Valgrind [default=auto]]
@@ -247,6 +247,20 @@ if test x"$enable_ctime_tests" = x"auto"; then
enable_ctime_tests=$enable_valgrind
fi
+print_msan_notice=no
+if test x"$enable_ctime_tests" = x"yes"; then
+ SECP_MSAN_CHECK
+ # MSan on Clang >=16 reports unitialized memory in function parameters and return values, even if
+ # the uninitalized variable is never actually "used". This is called "eager" checking, and it's
+ # sounds like good idea for normal use of MSan. However, it yields many false positives in the
+ # ctime_tests because many return values depend on secret (i.e., "uninitialized") values, and
+ # we're only interested in detecting branches (which count as "uses") on secret data.
+ if test x"$msan_enabled" = x"yes"; then
+ SECP_TRY_APPEND_CFLAGS([-fno-sanitize-memory-param-retval], SECP_CFLAGS)
+ print_msan_notice=yes
+ fi
+fi
+
if test x"$enable_coverage" = x"yes"; then
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DCOVERAGE=1"
SECP_CFLAGS="-O0 --coverage $SECP_CFLAGS"
@@ -335,14 +349,7 @@ auto)
;;
esac
-# Set ecmult window size
-if test x"$req_ecmult_window" = x"auto"; then
- set_ecmult_window=15
-else
- set_ecmult_window=$req_ecmult_window
-fi
-
-error_window_size=['window size for ecmult precomputation not an integer in range [2..24] or "auto"']
+error_window_size=['window size for ecmult precomputation not an integer in range [2..24]']
case $set_ecmult_window in
''|*[[!0-9]]*)
# no valid integer
@@ -357,13 +364,6 @@ case $set_ecmult_window in
;;
esac
-# Set ecmult gen kb
-if test x"$req_ecmult_gen_kb" = x"auto"; then
- set_ecmult_gen_kb=22
-else
- set_ecmult_gen_kb=$req_ecmult_gen_kb
-fi
-
case $set_ecmult_gen_kb in
2)
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DCOMB_BLOCKS=2 -DCOMB_TEETH=5"
@@ -375,7 +375,7 @@ case $set_ecmult_gen_kb in
SECP_CONFIG_DEFINES="$SECP_CONFIG_DEFINES -DCOMB_BLOCKS=43 -DCOMB_TEETH=6"
;;
*)
- AC_MSG_ERROR(['ecmult gen table size not 2, 22, 86 or "auto"'])
+ AC_MSG_ERROR(['ecmult gen table size not 2, 22 or 86'])
;;
esac
@@ -426,12 +426,7 @@ fi
### Check for --enable-experimental if necessary
###
-if test x"$enable_experimental" = x"yes"; then
- AC_MSG_NOTICE([******])
- AC_MSG_NOTICE([WARNING: experimental build])
- AC_MSG_NOTICE([Experimental features do not have stable APIs or properties, and may not be safe for production use.])
- AC_MSG_NOTICE([******])
-else
+if test x"$enable_experimental" = x"no"; then
if test x"$set_asm" = x"arm32"; then
AC_MSG_ERROR([ARM32 assembly is experimental. Use --enable-experimental to allow.])
fi
@@ -492,3 +487,17 @@ echo " CPPFLAGS = $CPPFLAGS"
echo " SECP_CFLAGS = $SECP_CFLAGS"
echo " CFLAGS = $CFLAGS"
echo " LDFLAGS = $LDFLAGS"
+
+if test x"$print_msan_notice" = x"yes"; then
+ echo
+ echo "Note:"
+ echo " MemorySanitizer detected, tried to add -fno-sanitize-memory-param-retval to SECP_CFLAGS"
+ echo " to avoid false positives in ctime_tests. Pass --disable-ctime-tests to avoid this."
+fi
+
+if test x"$enable_experimental" = x"yes"; then
+ echo
+ echo "WARNING: Experimental build"
+ echo " Experimental features do not have stable APIs or properties, and may not be safe for"
+ echo " production use."
+fi
diff --git a/src/secp256k1/src/modules/ecdh/tests_impl.h b/src/secp256k1/src/modules/ecdh/tests_impl.h
index 6be96eacbe..3c3acdaf8c 100644
--- a/src/secp256k1/src/modules/ecdh/tests_impl.h
+++ b/src/secp256k1/src/modules/ecdh/tests_impl.h
@@ -56,7 +56,7 @@ static void test_ecdh_generator_basepoint(void) {
size_t point_ser_len = sizeof(point_ser);
secp256k1_scalar s;
- random_scalar_order(&s);
+ testutil_random_scalar_order(&s);
secp256k1_scalar_get_b32(s_b32, &s);
CHECK(secp256k1_ec_pubkey_create(CTX, &point[0], s_one) == 1);
@@ -95,7 +95,7 @@ static void test_bad_scalar(void) {
secp256k1_pubkey point;
/* Create random point */
- random_scalar_order(&rand);
+ testutil_random_scalar_order(&rand);
secp256k1_scalar_get_b32(s_rand, &rand);
CHECK(secp256k1_ec_pubkey_create(CTX, &point, s_rand) == 1);
@@ -127,7 +127,7 @@ static void test_result_basepoint(void) {
CHECK(secp256k1_ecdh(CTX, out_base, &point, s_one, NULL, NULL) == 1);
for (i = 0; i < 2 * COUNT; i++) {
- random_scalar_order(&rand);
+ testutil_random_scalar_order(&rand);
secp256k1_scalar_get_b32(s, &rand);
secp256k1_scalar_inverse(&rand, &rand);
secp256k1_scalar_get_b32(s_inv, &rand);
diff --git a/src/secp256k1/src/modules/ellswift/tests_impl.h b/src/secp256k1/src/modules/ellswift/tests_impl.h
index f96e3a1268..ed5658f34c 100644
--- a/src/secp256k1/src/modules/ellswift/tests_impl.h
+++ b/src/secp256k1/src/modules/ellswift/tests_impl.h
@@ -229,9 +229,9 @@ void run_ellswift_tests(void) {
secp256k1_ge g, g2;
secp256k1_pubkey pubkey, pubkey2;
/* Generate random public key and random randomizer. */
- random_group_element_test(&g);
+ testutil_random_ge_test(&g);
secp256k1_pubkey_save(&pubkey, &g);
- secp256k1_testrand256(rnd32);
+ testrand256(rnd32);
/* Convert the public key to ElligatorSwift and back. */
secp256k1_ellswift_encode(CTX, ell64, &pubkey, rnd32);
secp256k1_ellswift_decode(CTX, &pubkey2, ell64);
@@ -249,8 +249,8 @@ void run_ellswift_tests(void) {
unsigned char ell64[64];
int ret;
/* Generate random secret key and random randomizer. */
- if (i & 1) secp256k1_testrand256_test(auxrnd32);
- random_scalar_order_test(&sec);
+ if (i & 1) testrand256_test(auxrnd32);
+ testutil_random_scalar_order_test(&sec);
secp256k1_scalar_get_b32(sec32, &sec);
/* Construct ElligatorSwift-encoded public keys for that key. */
ret = secp256k1_ellswift_create(CTX, ell64, sec32, (i & 1) ? auxrnd32 : NULL);
@@ -271,11 +271,11 @@ void run_ellswift_tests(void) {
secp256k1_pubkey pub;
int ret;
/* Generate random secret key. */
- random_scalar_order_test(&sec);
+ testutil_random_scalar_order_test(&sec);
secp256k1_scalar_get_b32(sec32, &sec);
/* Generate random ElligatorSwift encoding for the remote key and decode it. */
- secp256k1_testrand256_test(ell64);
- secp256k1_testrand256_test(ell64 + 32);
+ testrand256_test(ell64);
+ testrand256_test(ell64 + 32);
secp256k1_ellswift_decode(CTX, &pub, ell64);
secp256k1_pubkey_load(CTX, &dec, &pub);
secp256k1_gej_set_ge(&decj, &dec);
@@ -313,18 +313,18 @@ void run_ellswift_tests(void) {
data = NULL;
} else {
hash_function = secp256k1_ellswift_xdh_hash_function_prefix;
- secp256k1_testrand256_test(prefix64);
- secp256k1_testrand256_test(prefix64 + 32);
+ testrand256_test(prefix64);
+ testrand256_test(prefix64 + 32);
data = prefix64;
}
/* Generate random secret keys and random randomizers. */
- secp256k1_testrand256_test(auxrnd32a);
- secp256k1_testrand256_test(auxrnd32b);
- random_scalar_order_test(&seca);
+ testrand256_test(auxrnd32a);
+ testrand256_test(auxrnd32b);
+ testutil_random_scalar_order_test(&seca);
/* Draw secb uniformly at random to make sure that the secret keys
* differ */
- random_scalar_order(&secb);
+ testutil_random_scalar_order(&secb);
secp256k1_scalar_get_b32(sec32a, &seca);
secp256k1_scalar_get_b32(sec32b, &secb);
@@ -349,13 +349,13 @@ void run_ellswift_tests(void) {
/* Verify that the shared secret doesn't match if other side's public key is incorrect. */
/* For A (using a bad public key for B): */
memcpy(ell64b_bad, ell64b, sizeof(ell64a_bad));
- secp256k1_testrand_flip(ell64b_bad, sizeof(ell64b_bad));
+ testrand_flip(ell64b_bad, sizeof(ell64b_bad));
ret = secp256k1_ellswift_xdh(CTX, share32_bad, ell64a, ell64b_bad, sec32a, 0, hash_function, data);
CHECK(ret); /* Mismatching encodings don't get detected by secp256k1_ellswift_xdh. */
CHECK(secp256k1_memcmp_var(share32_bad, share32a, 32) != 0);
/* For B (using a bad public key for A): */
memcpy(ell64a_bad, ell64a, sizeof(ell64a_bad));
- secp256k1_testrand_flip(ell64a_bad, sizeof(ell64a_bad));
+ testrand_flip(ell64a_bad, sizeof(ell64a_bad));
ret = secp256k1_ellswift_xdh(CTX, share32_bad, ell64a_bad, ell64b, sec32b, 1, hash_function, data);
CHECK(ret);
CHECK(secp256k1_memcmp_var(share32_bad, share32b, 32) != 0);
@@ -363,12 +363,12 @@ void run_ellswift_tests(void) {
/* Verify that the shared secret doesn't match if the private key is incorrect. */
/* For A: */
memcpy(sec32a_bad, sec32a, sizeof(sec32a_bad));
- secp256k1_testrand_flip(sec32a_bad, sizeof(sec32a_bad));
+ testrand_flip(sec32a_bad, sizeof(sec32a_bad));
ret = secp256k1_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32a_bad, 0, hash_function, data);
CHECK(!ret || secp256k1_memcmp_var(share32_bad, share32a, 32) != 0);
/* For B: */
memcpy(sec32b_bad, sec32b, sizeof(sec32b_bad));
- secp256k1_testrand_flip(sec32b_bad, sizeof(sec32b_bad));
+ testrand_flip(sec32b_bad, sizeof(sec32b_bad));
ret = secp256k1_ellswift_xdh(CTX, share32_bad, ell64a, ell64b, sec32b_bad, 1, hash_function, data);
CHECK(!ret || secp256k1_memcmp_var(share32_bad, share32b, 32) != 0);
@@ -376,7 +376,7 @@ void run_ellswift_tests(void) {
/* Verify that the shared secret doesn't match when a different encoding of the same public key is used. */
/* For A (changing B's public key): */
memcpy(auxrnd32b_bad, auxrnd32b, sizeof(auxrnd32b_bad));
- secp256k1_testrand_flip(auxrnd32b_bad, sizeof(auxrnd32b_bad));
+ testrand_flip(auxrnd32b_bad, sizeof(auxrnd32b_bad));
ret = secp256k1_ellswift_create(CTX, ell64b_bad, sec32b, auxrnd32b_bad);
CHECK(ret);
ret = secp256k1_ellswift_xdh(CTX, share32_bad, ell64a, ell64b_bad, sec32a, 0, hash_function, data);
@@ -384,7 +384,7 @@ void run_ellswift_tests(void) {
CHECK(secp256k1_memcmp_var(share32_bad, share32a, 32) != 0);
/* For B (changing A's public key): */
memcpy(auxrnd32a_bad, auxrnd32a, sizeof(auxrnd32a_bad));
- secp256k1_testrand_flip(auxrnd32a_bad, sizeof(auxrnd32a_bad));
+ testrand_flip(auxrnd32a_bad, sizeof(auxrnd32a_bad));
ret = secp256k1_ellswift_create(CTX, ell64a_bad, sec32a, auxrnd32a_bad);
CHECK(ret);
ret = secp256k1_ellswift_xdh(CTX, share32_bad, ell64a_bad, ell64b, sec32b, 1, hash_function, data);
diff --git a/src/secp256k1/src/modules/extrakeys/tests_impl.h b/src/secp256k1/src/modules/extrakeys/tests_impl.h
index 45521d1742..ab4ef4a74b 100644
--- a/src/secp256k1/src/modules/extrakeys/tests_impl.h
+++ b/src/secp256k1/src/modules/extrakeys/tests_impl.h
@@ -23,9 +23,9 @@ static void test_xonly_pubkey(void) {
int pk_parity;
int i;
- secp256k1_testrand256(sk);
+ testrand256(sk);
memset(ones32, 0xFF, 32);
- secp256k1_testrand256(xy_sk);
+ testrand256(xy_sk);
CHECK(secp256k1_ec_pubkey_create(CTX, &pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &xonly_pk, &pk_parity, &pk) == 1);
@@ -95,7 +95,7 @@ static void test_xonly_pubkey(void) {
* the curve) then xonly_pubkey_parse should fail as well. */
for (i = 0; i < COUNT; i++) {
unsigned char rand33[33];
- secp256k1_testrand256(&rand33[1]);
+ testrand256(&rand33[1]);
rand33[0] = SECP256K1_TAG_PUBKEY_EVEN;
if (!secp256k1_ec_pubkey_parse(CTX, &pk, rand33, 33)) {
memset(&xonly_pk, 1, sizeof(xonly_pk));
@@ -152,8 +152,8 @@ static void test_xonly_pubkey_tweak(void) {
int i;
memset(overflows, 0xff, sizeof(overflows));
- secp256k1_testrand256(tweak);
- secp256k1_testrand256(sk);
+ testrand256(tweak);
+ testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(CTX, &internal_pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
@@ -190,7 +190,7 @@ static void test_xonly_pubkey_tweak(void) {
/* Invalid pk with a valid tweak */
memset(&internal_xonly_pk, 0, sizeof(internal_xonly_pk));
- secp256k1_testrand256(tweak);
+ testrand256(tweak);
CHECK_ILLEGAL(CTX, secp256k1_xonly_pubkey_tweak_add(CTX, &output_pk, &internal_xonly_pk, tweak));
CHECK(secp256k1_memcmp_var(&output_pk, zeros64, sizeof(output_pk)) == 0);
}
@@ -209,8 +209,8 @@ static void test_xonly_pubkey_tweak_check(void) {
unsigned char tweak[32];
memset(overflows, 0xff, sizeof(overflows));
- secp256k1_testrand256(tweak);
- secp256k1_testrand256(sk);
+ testrand256(tweak);
+ testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(CTX, &internal_pk, sk) == 1);
CHECK(secp256k1_xonly_pubkey_from_pubkey(CTX, &internal_xonly_pk, &pk_parity, &internal_pk) == 1);
@@ -256,7 +256,7 @@ static void test_xonly_pubkey_tweak_recursive(void) {
unsigned char tweak[N_PUBKEYS - 1][32];
int i;
- secp256k1_testrand256(sk);
+ testrand256(sk);
CHECK(secp256k1_ec_pubkey_create(CTX, &pk[0], sk) == 1);
/* Add tweaks */
for (i = 0; i < N_PUBKEYS - 1; i++) {
@@ -292,7 +292,7 @@ static void test_keypair(void) {
memset(overflows, 0xFF, sizeof(overflows));
/* Test keypair_create */
- secp256k1_testrand256(sk);
+ testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) != 0);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
@@ -311,7 +311,7 @@ static void test_keypair(void) {
CHECK(secp256k1_memcmp_var(zeros96, &keypair, sizeof(keypair)) == 0);
/* Test keypair_pub */
- secp256k1_testrand256(sk);
+ testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_keypair_pub(CTX, &pk, &keypair) == 1);
CHECK_ILLEGAL(CTX, secp256k1_keypair_pub(CTX, NULL, &keypair));
@@ -330,7 +330,7 @@ static void test_keypair(void) {
CHECK(secp256k1_memcmp_var(&pk, &pk_tmp, sizeof(pk)) == 0);
/** Test keypair_xonly_pub **/
- secp256k1_testrand256(sk);
+ testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(CTX, &xonly_pk, &pk_parity, &keypair) == 1);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_pub(CTX, NULL, &pk_parity, &keypair));
@@ -353,7 +353,7 @@ static void test_keypair(void) {
CHECK(pk_parity == pk_parity_tmp);
/* Test keypair_seckey */
- secp256k1_testrand256(sk);
+ testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_keypair_sec(CTX, sk_tmp, &keypair) == 1);
CHECK_ILLEGAL(CTX, secp256k1_keypair_sec(CTX, NULL, &keypair));
@@ -381,8 +381,8 @@ static void test_keypair_add(void) {
int i;
CHECK(sizeof(zeros96) == sizeof(keypair));
- secp256k1_testrand256(sk);
- secp256k1_testrand256(tweak);
+ testrand256(sk);
+ testrand256(tweak);
memset(overflows, 0xFF, 32);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
@@ -407,7 +407,7 @@ static void test_keypair_add(void) {
for (i = 0; i < COUNT; i++) {
secp256k1_scalar scalar_tweak;
secp256k1_keypair keypair_tmp;
- secp256k1_testrand256(sk);
+ testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
memcpy(&keypair_tmp, &keypair, sizeof(keypair));
/* Because sk may be negated before adding, we need to try with tweak =
@@ -423,7 +423,7 @@ static void test_keypair_add(void) {
/* Invalid keypair with a valid tweak */
memset(&keypair, 0, sizeof(keypair));
- secp256k1_testrand256(tweak);
+ testrand256(tweak);
CHECK_ILLEGAL(CTX, secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak));
CHECK(secp256k1_memcmp_var(&keypair, zeros96, sizeof(keypair)) == 0);
/* Only seckey part of keypair invalid */
@@ -446,7 +446,7 @@ static void test_keypair_add(void) {
unsigned char sk32[32];
int pk_parity;
- secp256k1_testrand256(tweak);
+ testrand256(tweak);
CHECK(secp256k1_keypair_xonly_pub(CTX, &internal_pk, NULL, &keypair) == 1);
CHECK(secp256k1_keypair_xonly_tweak_add(CTX, &keypair, tweak) == 1);
CHECK(secp256k1_keypair_xonly_pub(CTX, &output_pk, &pk_parity, &keypair) == 1);
diff --git a/src/secp256k1/src/modules/recovery/tests_impl.h b/src/secp256k1/src/modules/recovery/tests_impl.h
index 728ccfed8d..7a28a3ce65 100644
--- a/src/secp256k1/src/modules/recovery/tests_impl.h
+++ b/src/secp256k1/src/modules/recovery/tests_impl.h
@@ -25,7 +25,7 @@ static int recovery_test_nonce_function(unsigned char *nonce32, const unsigned c
}
/* On the next run, return a valid nonce, but flip a coin as to whether or not to fail signing. */
memset(nonce32, 1, 32);
- return secp256k1_testrand_bits(1);
+ return testrand_bits(1);
}
static void test_ecdsa_recovery_api(void) {
@@ -106,8 +106,8 @@ static void test_ecdsa_recovery_end_to_end(void) {
/* Generate a random key and message. */
{
secp256k1_scalar msg, key;
- random_scalar_order_test(&msg);
- random_scalar_order_test(&key);
+ testutil_random_scalar_order_test(&msg);
+ testutil_random_scalar_order_test(&key);
secp256k1_scalar_get_b32(privkey, &key);
secp256k1_scalar_get_b32(message, &msg);
}
@@ -141,7 +141,7 @@ static void test_ecdsa_recovery_end_to_end(void) {
CHECK(secp256k1_memcmp_var(&pubkey, &recpubkey, sizeof(pubkey)) == 0);
/* Serialize/destroy/parse signature and verify again. */
CHECK(secp256k1_ecdsa_recoverable_signature_serialize_compact(CTX, sig, &recid, &rsignature[4]) == 1);
- sig[secp256k1_testrand_bits(6)] += 1 + secp256k1_testrand_int(255);
+ sig[testrand_bits(6)] += 1 + testrand_int(255);
CHECK(secp256k1_ecdsa_recoverable_signature_parse_compact(CTX, &rsignature[4], sig, recid) == 1);
CHECK(secp256k1_ecdsa_recoverable_signature_convert(CTX, &signature[4], &rsignature[4]) == 1);
CHECK(secp256k1_ecdsa_verify(CTX, &signature[4], message, &pubkey) == 0);
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
index bc31d81107..601b54975d 100644
--- a/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/tests_exhaustive_impl.h
@@ -104,7 +104,7 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons
while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
secp256k1_scalar e;
unsigned char msg32[32];
- secp256k1_testrand256(msg32);
+ testrand256(msg32);
secp256k1_schnorrsig_challenge(&e, sig64, msg32, sizeof(msg32), pk32);
/* Only do work if we hit a challenge we haven't tried before. */
if (!e_done[e]) {
@@ -120,7 +120,7 @@ static void test_exhaustive_schnorrsig_verify(const secp256k1_context *ctx, cons
expect_valid = actual_k != -1 && s != EXHAUSTIVE_TEST_ORDER &&
(s == (actual_k + actual_d * e) % EXHAUSTIVE_TEST_ORDER);
} else {
- secp256k1_testrand256(sig64 + 32);
+ testrand256(sig64 + 32);
expect_valid = 0;
}
valid = secp256k1_schnorrsig_verify(ctx, sig64, msg32, sizeof(msg32), &pubkeys[d - 1]);
@@ -161,7 +161,7 @@ static void test_exhaustive_schnorrsig_sign(const secp256k1_context *ctx, unsign
/* Generate random messages until all challenges have been tried. */
while (e_count_done < EXHAUSTIVE_TEST_ORDER) {
secp256k1_scalar e;
- secp256k1_testrand256(msg32);
+ testrand256(msg32);
secp256k1_schnorrsig_challenge(&e, xonly_pubkey_bytes[k - 1], msg32, sizeof(msg32), xonly_pubkey_bytes[d - 1]);
/* Only do work if we hit a challenge we haven't tried before. */
if (!e_done[e]) {
diff --git a/src/secp256k1/src/modules/schnorrsig/tests_impl.h b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
index 8ada90a87b..aa4fc38270 100644
--- a/src/secp256k1/src/modules/schnorrsig/tests_impl.h
+++ b/src/secp256k1/src/modules/schnorrsig/tests_impl.h
@@ -15,7 +15,7 @@
static void nonce_function_bip340_bitflip(unsigned char **args, size_t n_flip, size_t n_bytes, size_t msglen, size_t algolen) {
unsigned char nonces[2][32];
CHECK(nonce_function_bip340(nonces[0], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1);
- secp256k1_testrand_flip(args[n_flip], n_bytes);
+ testrand_flip(args[n_flip], n_bytes);
CHECK(nonce_function_bip340(nonces[1], args[0], msglen, args[1], args[2], args[3], algolen, args[4]) == 1);
CHECK(secp256k1_memcmp_var(nonces[0], nonces[1], 32) != 0);
}
@@ -50,10 +50,10 @@ static void run_nonce_function_bip340_tests(void) {
secp256k1_nonce_function_bip340_sha256_tagged_aux(&sha_optimized);
test_sha256_eq(&sha, &sha_optimized);
- secp256k1_testrand256(msg);
- secp256k1_testrand256(key);
- secp256k1_testrand256(pk);
- secp256k1_testrand256(aux_rand);
+ testrand256(msg);
+ testrand256(key);
+ testrand256(pk);
+ testrand256(aux_rand);
/* Check that a bitflip in an argument results in different nonces. */
args[0] = msg;
@@ -76,12 +76,12 @@ static void run_nonce_function_bip340_tests(void) {
CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, NULL, 0, NULL) == 0);
CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1);
/* Other algo is fine */
- secp256k1_testrand_bytes_test(algo, algolen);
+ testrand_bytes_test(algo, algolen);
CHECK(nonce_function_bip340(nonce, msg, msglen, key, pk, algo, algolen, NULL) == 1);
for (i = 0; i < COUNT; i++) {
unsigned char nonce2[32];
- uint32_t offset = secp256k1_testrand_int(msglen - 1);
+ uint32_t offset = testrand_int(msglen - 1);
size_t msglen_tmp = (msglen + offset) % msglen;
size_t algolen_tmp;
@@ -90,7 +90,7 @@ static void run_nonce_function_bip340_tests(void) {
CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0);
/* Different algolen gives different nonce */
- offset = secp256k1_testrand_int(algolen - 1);
+ offset = testrand_int(algolen - 1);
algolen_tmp = (algolen + offset) % algolen;
CHECK(nonce_function_bip340(nonce2, msg, msglen, key, pk, algo, algolen_tmp, NULL) == 1);
CHECK(secp256k1_memcmp_var(nonce, nonce2, 32) != 0);
@@ -116,10 +116,10 @@ static void test_schnorrsig_api(void) {
secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT;
secp256k1_schnorrsig_extraparams invalid_extraparams = {{ 0 }, NULL, NULL};
- secp256k1_testrand256(sk1);
- secp256k1_testrand256(sk2);
- secp256k1_testrand256(sk3);
- secp256k1_testrand256(msg);
+ testrand256(sk1);
+ testrand256(sk2);
+ testrand256(sk3);
+ testrand256(msg);
CHECK(secp256k1_keypair_create(CTX, &keypairs[0], sk1) == 1);
CHECK(secp256k1_keypair_create(CTX, &keypairs[1], sk2) == 1);
CHECK(secp256k1_keypair_create(CTX, &keypairs[2], sk3) == 1);
@@ -813,8 +813,8 @@ static void test_schnorrsig_sign(void) {
secp256k1_schnorrsig_extraparams extraparams = SECP256K1_SCHNORRSIG_EXTRAPARAMS_INIT;
unsigned char aux_rand[32];
- secp256k1_testrand256(sk);
- secp256k1_testrand256(aux_rand);
+ testrand256(sk);
+ testrand256(aux_rand);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk));
CHECK(secp256k1_keypair_xonly_pub(CTX, &pk, NULL, &keypair));
CHECK(secp256k1_schnorrsig_sign32(CTX, sig, msg, &keypair, NULL) == 1);
@@ -861,12 +861,12 @@ static void test_schnorrsig_sign_verify(void) {
secp256k1_xonly_pubkey pk;
secp256k1_scalar s;
- secp256k1_testrand256(sk);
+ testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk));
CHECK(secp256k1_keypair_xonly_pub(CTX, &pk, NULL, &keypair));
for (i = 0; i < N_SIGS; i++) {
- secp256k1_testrand256(msg[i]);
+ testrand256(msg[i]);
CHECK(secp256k1_schnorrsig_sign32(CTX, sig[i], msg[i], &keypair, NULL));
CHECK(secp256k1_schnorrsig_verify(CTX, sig[i], msg[i], sizeof(msg[i]), &pk));
}
@@ -874,19 +874,19 @@ static void test_schnorrsig_sign_verify(void) {
{
/* Flip a few bits in the signature and in the message and check that
* verify and verify_batch (TODO) fail */
- size_t sig_idx = secp256k1_testrand_int(N_SIGS);
- size_t byte_idx = secp256k1_testrand_bits(5);
- unsigned char xorbyte = secp256k1_testrand_int(254)+1;
+ size_t sig_idx = testrand_int(N_SIGS);
+ size_t byte_idx = testrand_bits(5);
+ unsigned char xorbyte = testrand_int(254)+1;
sig[sig_idx][byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk));
sig[sig_idx][byte_idx] ^= xorbyte;
- byte_idx = secp256k1_testrand_bits(5);
+ byte_idx = testrand_bits(5);
sig[sig_idx][32+byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk));
sig[sig_idx][32+byte_idx] ^= xorbyte;
- byte_idx = secp256k1_testrand_bits(5);
+ byte_idx = testrand_bits(5);
msg[sig_idx][byte_idx] ^= xorbyte;
CHECK(!secp256k1_schnorrsig_verify(CTX, sig[sig_idx], msg[sig_idx], sizeof(msg[sig_idx]), &pk));
msg[sig_idx][byte_idx] ^= xorbyte;
@@ -916,9 +916,9 @@ static void test_schnorrsig_sign_verify(void) {
{
/* Test varying message lengths */
unsigned char msg_large[32 * 8];
- uint32_t msglen = secp256k1_testrand_int(sizeof(msg_large));
+ uint32_t msglen = testrand_int(sizeof(msg_large));
for (i = 0; i < sizeof(msg_large); i += 32) {
- secp256k1_testrand256(&msg_large[i]);
+ testrand256(&msg_large[i]);
}
CHECK(secp256k1_schnorrsig_sign_custom(CTX, sig[0], msg_large, msglen, &keypair, NULL) == 1);
CHECK(secp256k1_schnorrsig_verify(CTX, sig[0], msg_large, msglen, &pk) == 1);
@@ -942,7 +942,7 @@ static void test_schnorrsig_taproot(void) {
unsigned char sig[64];
/* Create output key */
- secp256k1_testrand256(sk);
+ testrand256(sk);
CHECK(secp256k1_keypair_create(CTX, &keypair, sk) == 1);
CHECK(secp256k1_keypair_xonly_pub(CTX, &internal_pk, NULL, &keypair) == 1);
/* In actual taproot the tweak would be hash of internal_pk */
@@ -952,7 +952,7 @@ static void test_schnorrsig_taproot(void) {
CHECK(secp256k1_xonly_pubkey_serialize(CTX, output_pk_bytes, &output_pk) == 1);
/* Key spend */
- secp256k1_testrand256(msg);
+ testrand256(msg);
CHECK(secp256k1_schnorrsig_sign32(CTX, sig, msg, &keypair, NULL) == 1);
/* Verify key spend */
CHECK(secp256k1_xonly_pubkey_parse(CTX, &output_pk, output_pk_bytes) == 1);
diff --git a/src/secp256k1/src/secp256k1.c b/src/secp256k1/src/secp256k1.c
index 7a32769408..72d725a74e 100644
--- a/src/secp256k1/src/secp256k1.c
+++ b/src/secp256k1/src/secp256k1.c
@@ -76,7 +76,7 @@ const secp256k1_context *secp256k1_context_no_precomp = &secp256k1_context_stati
/* Helper function that determines if a context is proper, i.e., is not the static context or a copy thereof.
*
- * This is intended for "context" functions such as secp256k1_context_clone. Function which need specific
+ * This is intended for "context" functions such as secp256k1_context_clone. Functions that need specific
* features of a context should still check for these features directly. For example, a function that needs
* ecmult_gen should directly check for the existence of the ecmult_gen context. */
static int secp256k1_context_is_proper(const secp256k1_context* ctx) {
@@ -544,7 +544,7 @@ static int secp256k1_ecdsa_sign_inner(const secp256k1_context* ctx, secp256k1_sc
break;
}
is_nonce_valid = secp256k1_scalar_set_b32_seckey(&non, nonce32);
- /* The nonce is still secret here, but it being invalid is is less likely than 1:2^255. */
+ /* The nonce is still secret here, but it being invalid is less likely than 1:2^255. */
secp256k1_declassify(ctx, &is_nonce_valid, sizeof(is_nonce_valid));
if (is_nonce_valid) {
ret = secp256k1_ecdsa_sig_sign(&ctx->ecmult_gen_ctx, r, s, &sec, &msg, &non, recid);
diff --git a/src/secp256k1/src/testrand.h b/src/secp256k1/src/testrand.h
index 721099d039..3c1ed3d45d 100644
--- a/src/secp256k1/src/testrand.h
+++ b/src/secp256k1/src/testrand.h
@@ -12,37 +12,37 @@
/* A non-cryptographic RNG used only for test infrastructure. */
/** Seed the pseudorandom number generator for testing. */
-SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16);
+SECP256K1_INLINE static void testrand_seed(const unsigned char *seed16);
/** Generate a pseudorandom number in the range [0..2**32-1]. */
-SECP256K1_INLINE static uint32_t secp256k1_testrand32(void);
+SECP256K1_INLINE static uint32_t testrand32(void);
/** Generate a pseudorandom number in the range [0..2**64-1]. */
-SECP256K1_INLINE static uint64_t secp256k1_testrand64(void);
+SECP256K1_INLINE static uint64_t testrand64(void);
/** Generate a pseudorandom number in the range [0..2**bits-1]. Bits must be 1 or
* more. */
-SECP256K1_INLINE static uint64_t secp256k1_testrand_bits(int bits);
+SECP256K1_INLINE static uint64_t testrand_bits(int bits);
/** Generate a pseudorandom number in the range [0..range-1]. */
-static uint32_t secp256k1_testrand_int(uint32_t range);
+static uint32_t testrand_int(uint32_t range);
/** Generate a pseudorandom 32-byte array. */
-static void secp256k1_testrand256(unsigned char *b32);
+static void testrand256(unsigned char *b32);
/** Generate a pseudorandom 32-byte array with long sequences of zero and one bits. */
-static void secp256k1_testrand256_test(unsigned char *b32);
+static void testrand256_test(unsigned char *b32);
/** Generate pseudorandom bytes with long sequences of zero and one bits. */
-static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len);
+static void testrand_bytes_test(unsigned char *bytes, size_t len);
/** Flip a single random bit in a byte array */
-static void secp256k1_testrand_flip(unsigned char *b, size_t len);
+static void testrand_flip(unsigned char *b, size_t len);
/** Initialize the test RNG using (hex encoded) array up to 16 bytes, or randomly if hexseed is NULL. */
-static void secp256k1_testrand_init(const char* hexseed);
+static void testrand_init(const char* hexseed);
/** Print final test information. */
-static void secp256k1_testrand_finish(void);
+static void testrand_finish(void);
#endif /* SECP256K1_TESTRAND_H */
diff --git a/src/secp256k1/src/testrand_impl.h b/src/secp256k1/src/testrand_impl.h
index fe97620435..07564f7f3f 100644
--- a/src/secp256k1/src/testrand_impl.h
+++ b/src/secp256k1/src/testrand_impl.h
@@ -17,7 +17,7 @@
static uint64_t secp256k1_test_state[4];
-SECP256K1_INLINE static void secp256k1_testrand_seed(const unsigned char *seed16) {
+SECP256K1_INLINE static void testrand_seed(const unsigned char *seed16) {
static const unsigned char PREFIX[19] = "secp256k1 test init";
unsigned char out32[32];
secp256k1_sha256 hash;
@@ -40,7 +40,7 @@ SECP256K1_INLINE static uint64_t rotl(const uint64_t x, int k) {
return (x << k) | (x >> (64 - k));
}
-SECP256K1_INLINE static uint64_t secp256k1_testrand64(void) {
+SECP256K1_INLINE static uint64_t testrand64(void) {
/* Test-only Xoshiro256++ RNG. See https://prng.di.unimi.it/ */
const uint64_t result = rotl(secp256k1_test_state[0] + secp256k1_test_state[3], 23) + secp256k1_test_state[0];
const uint64_t t = secp256k1_test_state[1] << 17;
@@ -53,16 +53,16 @@ SECP256K1_INLINE static uint64_t secp256k1_testrand64(void) {
return result;
}
-SECP256K1_INLINE static uint64_t secp256k1_testrand_bits(int bits) {
+SECP256K1_INLINE static uint64_t testrand_bits(int bits) {
if (bits == 0) return 0;
- return secp256k1_testrand64() >> (64 - bits);
+ return testrand64() >> (64 - bits);
}
-SECP256K1_INLINE static uint32_t secp256k1_testrand32(void) {
- return secp256k1_testrand64() >> 32;
+SECP256K1_INLINE static uint32_t testrand32(void) {
+ return testrand64() >> 32;
}
-static uint32_t secp256k1_testrand_int(uint32_t range) {
+static uint32_t testrand_int(uint32_t range) {
uint32_t mask = 0;
uint32_t range_copy;
/* Reduce range by 1, changing its meaning to "maximum value". */
@@ -76,15 +76,15 @@ static uint32_t secp256k1_testrand_int(uint32_t range) {
}
/* Generation loop. */
while (1) {
- uint32_t val = secp256k1_testrand64() & mask;
+ uint32_t val = testrand64() & mask;
if (val <= range) return val;
}
}
-static void secp256k1_testrand256(unsigned char *b32) {
+static void testrand256(unsigned char *b32) {
int i;
for (i = 0; i < 4; ++i) {
- uint64_t val = secp256k1_testrand64();
+ uint64_t val = testrand64();
b32[0] = val;
b32[1] = val >> 8;
b32[2] = val >> 16;
@@ -97,14 +97,14 @@ static void secp256k1_testrand256(unsigned char *b32) {
}
}
-static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len) {
+static void testrand_bytes_test(unsigned char *bytes, size_t len) {
size_t bits = 0;
memset(bytes, 0, len);
while (bits < len * 8) {
int now;
uint32_t val;
- now = 1 + (secp256k1_testrand_bits(6) * secp256k1_testrand_bits(5) + 16) / 31;
- val = secp256k1_testrand_bits(1);
+ now = 1 + (testrand_bits(6) * testrand_bits(5) + 16) / 31;
+ val = testrand_bits(1);
while (now > 0 && bits < len * 8) {
bytes[bits / 8] |= val << (bits % 8);
now--;
@@ -113,15 +113,15 @@ static void secp256k1_testrand_bytes_test(unsigned char *bytes, size_t len) {
}
}
-static void secp256k1_testrand256_test(unsigned char *b32) {
- secp256k1_testrand_bytes_test(b32, 32);
+static void testrand256_test(unsigned char *b32) {
+ testrand_bytes_test(b32, 32);
}
-static void secp256k1_testrand_flip(unsigned char *b, size_t len) {
- b[secp256k1_testrand_int(len)] ^= (1 << secp256k1_testrand_bits(3));
+static void testrand_flip(unsigned char *b, size_t len) {
+ b[testrand_int(len)] ^= (1 << testrand_bits(3));
}
-static void secp256k1_testrand_init(const char* hexseed) {
+static void testrand_init(const char* hexseed) {
unsigned char seed16[16] = {0};
if (hexseed && strlen(hexseed) != 0) {
int pos = 0;
@@ -155,12 +155,12 @@ static void secp256k1_testrand_init(const char* hexseed) {
}
printf("random seed = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", seed16[0], seed16[1], seed16[2], seed16[3], seed16[4], seed16[5], seed16[6], seed16[7], seed16[8], seed16[9], seed16[10], seed16[11], seed16[12], seed16[13], seed16[14], seed16[15]);
- secp256k1_testrand_seed(seed16);
+ testrand_seed(seed16);
}
-static void secp256k1_testrand_finish(void) {
+static void testrand_finish(void) {
unsigned char run32[32];
- secp256k1_testrand256(run32);
+ testrand256(run32);
printf("random run = %02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", run32[0], run32[1], run32[2], run32[3], run32[4], run32[5], run32[6], run32[7], run32[8], run32[9], run32[10], run32[11], run32[12], run32[13], run32[14], run32[15]);
}
diff --git a/src/secp256k1/src/tests.c b/src/secp256k1/src/tests.c
index dab47608c2..6b401e52c0 100644
--- a/src/secp256k1/src/tests.c
+++ b/src/secp256k1/src/tests.c
@@ -96,122 +96,6 @@ static void uncounting_illegal_callback_fn(const char* str, void* data) {
(*p)--;
}
-static void random_field_element_magnitude(secp256k1_fe *fe, int m) {
- secp256k1_fe zero;
- int n = secp256k1_testrand_int(m + 1);
- secp256k1_fe_normalize(fe);
- if (n == 0) {
- return;
- }
- secp256k1_fe_clear(&zero);
- secp256k1_fe_negate(&zero, &zero, 0);
- secp256k1_fe_mul_int_unchecked(&zero, n - 1);
- secp256k1_fe_add(fe, &zero);
-#ifdef VERIFY
- CHECK(fe->magnitude == n);
-#endif
-}
-
-static void random_fe_test(secp256k1_fe *x) {
- unsigned char bin[32];
- do {
- secp256k1_testrand256_test(bin);
- if (secp256k1_fe_set_b32_limit(x, bin)) {
- return;
- }
- } while(1);
-}
-
-static void random_fe_non_zero_test(secp256k1_fe *fe) {
- do {
- random_fe_test(fe);
- } while(secp256k1_fe_is_zero(fe));
-}
-
-static void random_fe_magnitude(secp256k1_fe *fe) {
- random_field_element_magnitude(fe, 8);
-}
-
-static void random_ge_x_magnitude(secp256k1_ge *ge) {
- random_field_element_magnitude(&ge->x, SECP256K1_GE_X_MAGNITUDE_MAX);
-}
-
-static void random_ge_y_magnitude(secp256k1_ge *ge) {
- random_field_element_magnitude(&ge->y, SECP256K1_GE_Y_MAGNITUDE_MAX);
-}
-
-static void random_gej_x_magnitude(secp256k1_gej *gej) {
- random_field_element_magnitude(&gej->x, SECP256K1_GEJ_X_MAGNITUDE_MAX);
-}
-
-static void random_gej_y_magnitude(secp256k1_gej *gej) {
- random_field_element_magnitude(&gej->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX);
-}
-
-static void random_gej_z_magnitude(secp256k1_gej *gej) {
- random_field_element_magnitude(&gej->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX);
-}
-
-static void random_group_element_test(secp256k1_ge *ge) {
- secp256k1_fe fe;
- do {
- random_fe_test(&fe);
- if (secp256k1_ge_set_xo_var(ge, &fe, secp256k1_testrand_bits(1))) {
- secp256k1_fe_normalize(&ge->y);
- break;
- }
- } while(1);
- ge->infinity = 0;
-}
-
-static void random_group_element_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) {
- secp256k1_fe z2, z3;
- random_fe_non_zero_test(&gej->z);
- secp256k1_fe_sqr(&z2, &gej->z);
- secp256k1_fe_mul(&z3, &z2, &gej->z);
- secp256k1_fe_mul(&gej->x, &ge->x, &z2);
- secp256k1_fe_mul(&gej->y, &ge->y, &z3);
- gej->infinity = ge->infinity;
-}
-
-static void random_gej_test(secp256k1_gej *gej) {
- secp256k1_ge ge;
- random_group_element_test(&ge);
- random_group_element_jacobian_test(gej, &ge);
-}
-
-static void random_scalar_order_test(secp256k1_scalar *num) {
- do {
- unsigned char b32[32];
- int overflow = 0;
- secp256k1_testrand256_test(b32);
- secp256k1_scalar_set_b32(num, b32, &overflow);
- if (overflow || secp256k1_scalar_is_zero(num)) {
- continue;
- }
- break;
- } while(1);
-}
-
-static void random_scalar_order(secp256k1_scalar *num) {
- do {
- unsigned char b32[32];
- int overflow = 0;
- secp256k1_testrand256(b32);
- secp256k1_scalar_set_b32(num, b32, &overflow);
- if (overflow || secp256k1_scalar_is_zero(num)) {
- continue;
- }
- break;
- } while(1);
-}
-
-static void random_scalar_order_b32(unsigned char *b32) {
- secp256k1_scalar num;
- random_scalar_order(&num);
- secp256k1_scalar_get_b32(b32, &num);
-}
-
static void run_xoshiro256pp_tests(void) {
{
size_t i;
@@ -233,9 +117,9 @@ static void run_xoshiro256pp_tests(void) {
0x4C, 0xCC, 0xC1, 0x18, 0xB2, 0xD8, 0x8F, 0xEF,
0x43, 0x26, 0x15, 0x57, 0x37, 0x00, 0xEF, 0x30,
};
- secp256k1_testrand_seed(seed16);
+ testrand_seed(seed16);
for (i = 0; i < 17; i++) {
- secp256k1_testrand256(buf32);
+ testrand256(buf32);
}
CHECK(secp256k1_memcmp_var(buf32, buf32_expected, sizeof(buf32)) == 0);
}
@@ -444,14 +328,14 @@ static void run_proper_context_tests(int use_prealloc) {
CHECK(context_eq(my_ctx, my_ctx_fresh));
/*** attempt to use them ***/
- random_scalar_order_test(&msg);
- random_scalar_order_test(&key);
+ testutil_random_scalar_order_test(&msg);
+ testutil_random_scalar_order_test(&key);
secp256k1_ecmult_gen(&my_ctx->ecmult_gen_ctx, &pubj, &key);
secp256k1_ge_set_gej(&pub, &pubj);
/* obtain a working nonce */
do {
- random_scalar_order_test(&nonce);
+ testutil_random_scalar_order_test(&nonce);
} while(!secp256k1_ecdsa_sig_sign(&my_ctx->ecmult_gen_ctx, &sigr, &sigs, &key, &msg, &nonce, NULL));
/* try signing */
@@ -608,7 +492,7 @@ static void run_sha256_known_output_tests(void) {
CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
/* 2. Run: split the input bytestrings randomly before writing */
if (strlen(inputs[i]) > 0) {
- int split = secp256k1_testrand_int(strlen(inputs[i]));
+ int split = testrand_int(strlen(inputs[i]));
secp256k1_sha256_initialize(&hasher);
j = repeat[i];
while (j > 0) {
@@ -769,7 +653,7 @@ static void run_hmac_sha256_tests(void) {
secp256k1_hmac_sha256_finalize(&hasher, out);
CHECK(secp256k1_memcmp_var(out, outputs[i], 32) == 0);
if (strlen(inputs[i]) > 0) {
- int split = secp256k1_testrand_int(strlen(inputs[i]));
+ int split = testrand_int(strlen(inputs[i]));
secp256k1_hmac_sha256_initialize(&hasher, (const unsigned char*)(keys[i]), strlen(keys[i]));
secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i]), split);
secp256k1_hmac_sha256_write(&hasher, (const unsigned char*)(inputs[i] + split), strlen(inputs[i]) - split);
@@ -969,7 +853,7 @@ static void signed30_to_uint16(uint16_t* out, const secp256k1_modinv32_signed30*
static void mutate_sign_signed30(secp256k1_modinv32_signed30* x) {
int i;
for (i = 0; i < 16; ++i) {
- int pos = secp256k1_testrand_bits(3);
+ int pos = testrand_bits(3);
if (x->v[pos] > 0 && x->v[pos + 1] <= 0x3fffffff) {
x->v[pos] -= 0x40000000;
x->v[pos + 1] += 1;
@@ -1061,7 +945,7 @@ static void mutate_sign_signed62(secp256k1_modinv64_signed62* x) {
static const int64_t M62 = (int64_t)(UINT64_MAX >> 2);
int i;
for (i = 0; i < 8; ++i) {
- int pos = secp256k1_testrand_bits(2);
+ int pos = testrand_bits(2);
if (x->v[pos] > 0 && x->v[pos + 1] <= M62) {
x->v[pos] -= (M62 + 1);
x->v[pos + 1] += 1;
@@ -1774,8 +1658,8 @@ static void run_modinv_tests(void) {
/* generate random xd and md, so that md is odd, md>1, xd<md, and gcd(xd,md)=1 */
do {
/* generate random xd and md (with many subsequent 0s and 1s) */
- secp256k1_testrand256_test((unsigned char*)xd);
- secp256k1_testrand256_test((unsigned char*)md);
+ testrand256_test((unsigned char*)xd);
+ testrand256_test((unsigned char*)md);
md[0] |= 1; /* modulus must be odd */
/* If modulus is 1, find another one. */
ok = md[0] != 1;
@@ -1907,7 +1791,7 @@ static void run_int128_test_case(void) {
int i;
/* Generate 32-byte random value. */
- secp256k1_testrand256_test(buf);
+ testrand256_test(buf);
/* Convert into 4 64-bit integers. */
for (i = 0; i < 4; ++i) {
uint64_t vi = 0;
@@ -2133,13 +2017,13 @@ static void scalar_test(void) {
unsigned char c[32];
/* Set 's' to a random scalar, with value 'snum'. */
- random_scalar_order_test(&s);
+ testutil_random_scalar_order_test(&s);
/* Set 's1' to a random scalar, with value 's1num'. */
- random_scalar_order_test(&s1);
+ testutil_random_scalar_order_test(&s1);
/* Set 's2' to a random scalar, with value 'snum2', and byte array representation 'c'. */
- random_scalar_order_test(&s2);
+ testutil_random_scalar_order_test(&s2);
secp256k1_scalar_get_b32(c, &s2);
{
@@ -2167,7 +2051,7 @@ static void scalar_test(void) {
while (i < 256) {
secp256k1_scalar t;
int j;
- int now = secp256k1_testrand_int(15) + 1;
+ int now = testrand_int(15) + 1;
if (now + i > 256) {
now = 256 - i;
}
@@ -2194,7 +2078,7 @@ static void scalar_test(void) {
secp256k1_scalar b;
int i;
/* Test add_bit. */
- int bit = secp256k1_testrand_bits(8);
+ int bit = testrand_bits(8);
secp256k1_scalar_set_int(&b, 1);
CHECK(secp256k1_scalar_is_one(&b));
for (i = 0; i < bit; i++) {
@@ -2287,7 +2171,7 @@ static void run_scalar_set_b32_seckey_tests(void) {
secp256k1_scalar s2;
/* Usually set_b32 and set_b32_seckey give the same result */
- random_scalar_order_b32(b32);
+ testutil_random_scalar_order_b32(b32);
secp256k1_scalar_set_b32(&s1, b32, NULL);
CHECK(secp256k1_scalar_set_b32_seckey(&s2, b32) == 1);
CHECK(secp256k1_scalar_eq(&s1, &s2) == 1);
@@ -2948,7 +2832,7 @@ static void run_scalar_tests(void) {
static void random_fe_non_square(secp256k1_fe *ns) {
secp256k1_fe r;
- random_fe_non_zero(ns);
+ testutil_random_fe_non_zero(ns);
if (secp256k1_fe_sqrt(&r, ns)) {
secp256k1_fe_negate(ns, ns, 1);
}
@@ -3125,12 +3009,12 @@ static void run_field_misc(void) {
for (i = 0; i < 1000 * COUNT; i++) {
secp256k1_fe_storage xs, ys, zs;
if (i & 1) {
- random_fe(&x);
+ testutil_random_fe(&x);
} else {
- random_fe_test(&x);
+ testutil_random_fe_test(&x);
}
- random_fe_non_zero(&y);
- v = secp256k1_testrand_bits(15);
+ testutil_random_fe_non_zero(&y);
+ v = testrand_bits(15);
/* Test that fe_add_int is equivalent to fe_set_int + fe_add. */
secp256k1_fe_set_int(&q, v); /* q = v */
z = x; /* z = x */
@@ -3268,14 +3152,14 @@ static void run_fe_mul(void) {
int i;
for (i = 0; i < 100 * COUNT; ++i) {
secp256k1_fe a, b, c, d;
- random_fe(&a);
- random_fe_magnitude(&a);
- random_fe(&b);
- random_fe_magnitude(&b);
- random_fe_test(&c);
- random_fe_magnitude(&c);
- random_fe_test(&d);
- random_fe_magnitude(&d);
+ testutil_random_fe(&a);
+ testutil_random_fe_magnitude(&a, 8);
+ testutil_random_fe(&b);
+ testutil_random_fe_magnitude(&b, 8);
+ testutil_random_fe_test(&c);
+ testutil_random_fe_magnitude(&c, 8);
+ testutil_random_fe_test(&d);
+ testutil_random_fe_magnitude(&d, 8);
test_fe_mul(&a, &a, 1);
test_fe_mul(&c, &c, 1);
test_fe_mul(&a, &b, 0);
@@ -3297,7 +3181,7 @@ static void run_sqr(void) {
secp256k1_fe_normalize(&x);
/* Check that (x+y)*(x-y) = x^2 - y*2 for some random values y */
- random_fe_test(&y);
+ testutil_random_fe_test(&y);
lhs = x;
secp256k1_fe_add(&lhs, &y); /* lhs = x+y */
@@ -3351,7 +3235,7 @@ static void run_sqrt(void) {
int j;
random_fe_non_square(&ns);
for (j = 0; j < COUNT; j++) {
- random_fe(&x);
+ testutil_random_fe(&x);
secp256k1_fe_sqr(&s, &x);
CHECK(secp256k1_fe_is_square_var(&s));
test_sqrt(&s, &x);
@@ -3665,7 +3549,7 @@ static void run_inverse_tests(void)
/* test 128*count random inputs; half with testrand256_test, half with testrand256 */
for (testrand = 0; testrand <= 1; ++testrand) {
for (i = 0; i < 64 * COUNT; ++i) {
- (testrand ? secp256k1_testrand256_test : secp256k1_testrand256)(b32);
+ (testrand ? testrand256_test : testrand256)(b32);
secp256k1_scalar_set_b32(&x_scalar, b32, NULL);
secp256k1_fe_set_b32_mod(&x_fe, b32);
for (var = 0; var <= 1; ++var) {
@@ -3731,8 +3615,8 @@ static void test_hsort(size_t element_len) {
/* Test hsort with array of random length n */
for (i = 0; i < COUNT; i++) {
- int n = secp256k1_testrand_int(NUM);
- secp256k1_testrand_bytes_test(elements, n*element_len);
+ int n = testrand_int(NUM);
+ testrand_bytes_test(elements, n*element_len);
secp256k1_hsort(elements, n, element_len, test_hsort_cmp, &data);
test_hsort_is_sorted(elements, n, element_len);
}
@@ -3792,7 +3676,7 @@ static void test_ge(void) {
for (i = 0; i < runs; i++) {
int j, k;
secp256k1_ge g;
- random_group_element_test(&g);
+ testutil_random_ge_test(&g);
if (i >= runs - 2) {
secp256k1_ge_mul_lambda(&g, &ge[1]);
CHECK(!secp256k1_ge_eq_var(&g, &ge[1]));
@@ -3805,15 +3689,15 @@ static void test_ge(void) {
secp256k1_ge_neg(&ge[3 + 4 * i], &g);
secp256k1_ge_neg(&ge[4 + 4 * i], &g);
secp256k1_gej_set_ge(&gej[1 + 4 * i], &ge[1 + 4 * i]);
- random_group_element_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]);
+ testutil_random_ge_jacobian_test(&gej[2 + 4 * i], &ge[2 + 4 * i]);
secp256k1_gej_set_ge(&gej[3 + 4 * i], &ge[3 + 4 * i]);
- random_group_element_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]);
+ testutil_random_ge_jacobian_test(&gej[4 + 4 * i], &ge[4 + 4 * i]);
for (j = 0; j < 4; j++) {
- random_ge_x_magnitude(&ge[1 + j + 4 * i]);
- random_ge_y_magnitude(&ge[1 + j + 4 * i]);
- random_gej_x_magnitude(&gej[1 + j + 4 * i]);
- random_gej_y_magnitude(&gej[1 + j + 4 * i]);
- random_gej_z_magnitude(&gej[1 + j + 4 * i]);
+ testutil_random_ge_x_magnitude(&ge[1 + j + 4 * i]);
+ testutil_random_ge_y_magnitude(&ge[1 + j + 4 * i]);
+ testutil_random_gej_x_magnitude(&gej[1 + j + 4 * i]);
+ testutil_random_gej_y_magnitude(&gej[1 + j + 4 * i]);
+ testutil_random_gej_z_magnitude(&gej[1 + j + 4 * i]);
}
for (j = 0; j < 4; ++j) {
@@ -3828,14 +3712,14 @@ static void test_ge(void) {
}
/* Generate random zf, and zfi2 = 1/zf^2, zfi3 = 1/zf^3 */
- random_fe_non_zero_test(&zf);
- random_fe_magnitude(&zf);
+ testutil_random_fe_non_zero_test(&zf);
+ testutil_random_fe_magnitude(&zf, 8);
secp256k1_fe_inv_var(&zfi3, &zf);
secp256k1_fe_sqr(&zfi2, &zfi3);
secp256k1_fe_mul(&zfi3, &zfi3, &zfi2);
/* Generate random r */
- random_fe_non_zero_test(&r);
+ testutil_random_fe_non_zero_test(&r);
for (i1 = 0; i1 < 1 + 4 * runs; i1++) {
int i2;
@@ -3865,8 +3749,8 @@ static void test_ge(void) {
secp256k1_ge ge2_zfi = ge[i2]; /* the second term with x and y rescaled for z = 1/zf */
secp256k1_fe_mul(&ge2_zfi.x, &ge2_zfi.x, &zfi2);
secp256k1_fe_mul(&ge2_zfi.y, &ge2_zfi.y, &zfi3);
- random_ge_x_magnitude(&ge2_zfi);
- random_ge_y_magnitude(&ge2_zfi);
+ testutil_random_ge_x_magnitude(&ge2_zfi);
+ testutil_random_ge_y_magnitude(&ge2_zfi);
secp256k1_gej_add_zinv_var(&resj, &gej[i1], &ge2_zfi, &zf);
CHECK(secp256k1_gej_eq_ge_var(&resj, &ref));
}
@@ -3922,7 +3806,7 @@ static void test_ge(void) {
gej_shuffled[i] = gej[i];
}
for (i = 0; i < 4 * runs + 1; i++) {
- int swap = i + secp256k1_testrand_int(4 * runs + 1 - i);
+ int swap = i + testrand_int(4 * runs + 1 - i);
if (swap != i) {
secp256k1_gej t = gej_shuffled[i];
gej_shuffled[i] = gej_shuffled[swap];
@@ -3942,7 +3826,7 @@ static void test_ge(void) {
secp256k1_ge_set_all_gej_var(ge_set_all, gej, 4 * runs + 1);
for (i = 0; i < 4 * runs + 1; i++) {
secp256k1_fe s;
- random_fe_non_zero(&s);
+ testutil_random_fe_non_zero(&s);
secp256k1_gej_rescale(&gej[i], &s);
CHECK(secp256k1_gej_eq_ge_var(&gej[i], &ge_set_all[i]));
}
@@ -3975,7 +3859,7 @@ static void test_ge(void) {
/* Test batch gej -> ge conversion with many infinities. */
for (i = 0; i < 4 * runs + 1; i++) {
int odd;
- random_group_element_test(&ge[i]);
+ testutil_random_ge_test(&ge[i]);
odd = secp256k1_fe_is_odd(&ge[i].x);
CHECK(odd == 0 || odd == 1);
/* randomly set half the points to infinity */
@@ -4012,7 +3896,7 @@ static void test_intialized_inf(void) {
secp256k1_fe zinv;
/* Test that adding P+(-P) results in a fully initialized infinity*/
- random_group_element_test(&p);
+ testutil_random_ge_test(&p);
secp256k1_gej_set_ge(&pj, &p);
secp256k1_gej_neg(&npj, &pj);
@@ -4125,14 +4009,14 @@ static void run_gej(void) {
secp256k1_gej_set_infinity(&b);
test_gej_cmov(&a, &b);
- random_gej_test(&a);
+ testutil_random_gej_test(&a);
test_gej_cmov(&a, &b);
test_gej_cmov(&b, &a);
b = a;
test_gej_cmov(&a, &b);
- random_gej_test(&b);
+ testutil_random_gej_test(&b);
test_gej_cmov(&a, &b);
test_gej_cmov(&b, &a);
}
@@ -4140,12 +4024,12 @@ static void run_gej(void) {
/* Tests for secp256k1_gej_eq_var */
for (i = 0; i < COUNT; i++) {
secp256k1_fe fe;
- random_gej_test(&a);
- random_gej_test(&b);
+ testutil_random_gej_test(&a);
+ testutil_random_gej_test(&b);
CHECK(!secp256k1_gej_eq_var(&a, &b));
b = a;
- random_fe_non_zero_test(&fe);
+ testutil_random_fe_non_zero_test(&fe);
secp256k1_gej_rescale(&a, &fe);
CHECK(secp256k1_gej_eq_var(&a, &b));
}
@@ -4162,7 +4046,7 @@ static void test_ec_combine(void) {
int i;
for (i = 1; i <= 6; i++) {
secp256k1_scalar s;
- random_scalar_order_test(&s);
+ testutil_random_scalar_order_test(&s);
secp256k1_scalar_add(&sum, &sum, &s);
secp256k1_ecmult_gen(&CTX->ecmult_gen_ctx, &Qj, &s);
secp256k1_ge_set_gej(&Q, &Qj);
@@ -4222,7 +4106,7 @@ static void run_group_decompress(void) {
int i;
for (i = 0; i < COUNT * 4; i++) {
secp256k1_fe fe;
- random_fe_test(&fe);
+ testutil_random_fe_test(&fe);
test_group_decompress(&fe);
}
}
@@ -4370,7 +4254,7 @@ static void test_point_times_order(const secp256k1_gej *point) {
secp256k1_ge res3;
unsigned char pub[65];
size_t psize = 65;
- random_scalar_order_test(&x);
+ testutil_random_scalar_order_test(&x);
secp256k1_scalar_negate(&nx, &x);
secp256k1_ecmult(&res1, point, &x, &x); /* calc res1 = x * point + x * G; */
secp256k1_ecmult(&res2, point, &nx, &nx); /* calc res2 = (order - x) * point + (order - x) * G; */
@@ -4431,13 +4315,13 @@ static void test_ecmult_target(const secp256k1_scalar* target, int mode) {
secp256k1_gej pj, p1j, p2j, ptj;
/* Generate random n1,n2 such that n1+n2 = -target. */
- random_scalar_order_test(&n1);
+ testutil_random_scalar_order_test(&n1);
secp256k1_scalar_add(&n2, &n1, target);
secp256k1_scalar_negate(&n2, &n2);
/* Generate a random input point. */
if (mode != 0) {
- random_group_element_test(&p);
+ testutil_random_ge_test(&p);
secp256k1_gej_set_ge(&pj, &p);
}
@@ -4529,8 +4413,8 @@ static void ecmult_const_commutativity(void) {
secp256k1_gej res2;
secp256k1_ge mid1;
secp256k1_ge mid2;
- random_scalar_order_test(&a);
- random_scalar_order_test(&b);
+ testutil_random_scalar_order_test(&a);
+ testutil_random_scalar_order_test(&b);
secp256k1_ecmult_const(&res1, &secp256k1_ge_const_g, &a);
secp256k1_ecmult_const(&res2, &secp256k1_ge_const_g, &b);
@@ -4551,9 +4435,9 @@ static void ecmult_const_mult_zero_one(void) {
secp256k1_ge point;
secp256k1_ge inf;
- random_scalar_order_test(&s);
+ testutil_random_scalar_order_test(&s);
secp256k1_scalar_negate(&negone, &secp256k1_scalar_one);
- random_group_element_test(&point);
+ testutil_random_ge_test(&point);
secp256k1_ge_set_infinity(&inf);
/* 0*point */
@@ -4606,7 +4490,7 @@ static void ecmult_const_edges(void) {
secp256k1_scalar_add(&q, &q, &scalars_near_split_bounds[i - 1]);
secp256k1_scalar_add(&q, &q, &scalars_near_split_bounds[i - 1]);
}
- random_group_element_test(&point);
+ testutil_random_ge_test(&point);
secp256k1_ecmult_const(&res, &point, &q);
ecmult_const_check_result(&point, &q, &res);
}
@@ -4623,12 +4507,12 @@ static void ecmult_const_mult_xonly(void) {
secp256k1_scalar q;
int res;
/* Random base point. */
- random_group_element_test(&base);
+ testutil_random_ge_test(&base);
/* Random scalar to multiply it with. */
- random_scalar_order_test(&q);
+ testutil_random_scalar_order_test(&q);
/* If i is odd, n=d*base.x for random non-zero d */
if (i & 1) {
- random_fe_non_zero_test(&d);
+ testutil_random_fe_non_zero_test(&d);
secp256k1_fe_mul(&n, &base.x, &d);
} else {
n = base.x;
@@ -4650,14 +4534,14 @@ static void ecmult_const_mult_xonly(void) {
secp256k1_fe x, n, d, r;
int res;
secp256k1_scalar q;
- random_scalar_order_test(&q);
+ testutil_random_scalar_order_test(&q);
/* Generate random X coordinate not on the curve. */
do {
- random_fe_test(&x);
+ testutil_random_fe_test(&x);
} while (secp256k1_ge_x_on_curve_var(&x));
/* If i is odd, n=d*x for random non-zero d. */
if (i & 1) {
- random_fe_non_zero_test(&d);
+ testutil_random_fe_non_zero_test(&d);
secp256k1_fe_mul(&n, &x, &d);
} else {
n = x;
@@ -4740,10 +4624,10 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
for (ncount = 0; ncount < COUNT; ncount++) {
secp256k1_ge ptg;
secp256k1_gej ptgj;
- random_scalar_order(&sc[0]);
- random_scalar_order(&sc[1]);
+ testutil_random_scalar_order(&sc[0]);
+ testutil_random_scalar_order(&sc[1]);
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
secp256k1_gej_set_ge(&ptgj, &ptg);
pt[0] = ptg;
pt[1] = secp256k1_ge_const_g;
@@ -4780,7 +4664,7 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
for (j = 0; j < 3; j++) {
for (i = 0; i < 32; i++) {
- random_scalar_order(&sc[i]);
+ testutil_random_scalar_order(&sc[i]);
secp256k1_ge_set_infinity(&pt[i]);
}
CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &secp256k1_scalar_zero, ecmult_multi_callback, &data, sizes[j]));
@@ -4789,7 +4673,7 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
for (j = 0; j < 3; j++) {
for (i = 0; i < 32; i++) {
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
pt[i] = ptg;
secp256k1_scalar_set_int(&sc[i], 0);
}
@@ -4798,9 +4682,9 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
}
for (j = 0; j < 3; j++) {
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
for (i = 0; i < 16; i++) {
- random_scalar_order(&sc[2*i]);
+ testutil_random_scalar_order(&sc[2*i]);
secp256k1_scalar_negate(&sc[2*i + 1], &sc[2*i]);
pt[2 * i] = ptg;
pt[2 * i + 1] = ptg;
@@ -4809,9 +4693,9 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
CHECK(ecmult_multi(&CTX->error_callback, scratch, &r, &secp256k1_scalar_zero, ecmult_multi_callback, &data, sizes[j]));
CHECK(secp256k1_gej_is_infinity(&r));
- random_scalar_order(&sc[0]);
+ testutil_random_scalar_order(&sc[0]);
for (i = 0; i < 16; i++) {
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
sc[2*i] = sc[0];
sc[2*i+1] = sc[0];
@@ -4823,13 +4707,13 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
CHECK(secp256k1_gej_is_infinity(&r));
}
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
secp256k1_scalar_set_int(&sc[0], 0);
pt[0] = ptg;
for (i = 1; i < 32; i++) {
pt[i] = ptg;
- random_scalar_order(&sc[i]);
+ testutil_random_scalar_order(&sc[i]);
secp256k1_scalar_add(&sc[0], &sc[0], &sc[i]);
secp256k1_scalar_negate(&sc[i], &sc[i]);
}
@@ -4843,11 +4727,11 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
size_t i;
secp256k1_gej_set_infinity(&r);
- random_scalar_order(&sc[0]);
+ testutil_random_scalar_order(&sc[0]);
for (i = 0; i < 20; i++) {
secp256k1_ge ptg;
sc[i] = sc[0];
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
pt[i] = ptg;
secp256k1_gej_add_ge_var(&r, &r, &pt[i], NULL);
}
@@ -4865,9 +4749,9 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
secp256k1_scalar rs;
secp256k1_scalar_set_int(&rs, 0);
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
for (i = 0; i < 20; i++) {
- random_scalar_order(&sc[i]);
+ testutil_random_scalar_order(&sc[i]);
pt[i] = ptg;
secp256k1_scalar_add(&rs, &rs, &sc[i]);
}
@@ -4880,8 +4764,8 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
/* Sanity check that zero scalars don't cause problems */
for (ncount = 0; ncount < 20; ncount++) {
- random_scalar_order(&sc[ncount]);
- random_group_element_test(&pt[ncount]);
+ testutil_random_scalar_order(&sc[ncount]);
+ testutil_random_ge_test(&pt[ncount]);
}
secp256k1_scalar_clear(&sc[0]);
@@ -4902,7 +4786,7 @@ static void test_ecmult_multi(secp256k1_scratch *scratch, secp256k1_ecmult_multi
secp256k1_ge ptg;
secp256k1_gej ptgj;
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
secp256k1_gej_set_ge(&ptgj, &ptg);
for(t0i = 0; t0i < TOP; t0i++) {
@@ -4973,29 +4857,29 @@ static int test_ecmult_multi_random(secp256k1_scratch *scratch) {
int i;
/* Which multiplication function to use */
- int fn = secp256k1_testrand_int(3);
+ int fn = testrand_int(3);
secp256k1_ecmult_multi_func ecmult_multi = fn == 0 ? secp256k1_ecmult_multi_var :
fn == 1 ? secp256k1_ecmult_strauss_batch_single :
secp256k1_ecmult_pippenger_batch_single;
/* Simulate exponentially distributed num. */
- int num_bits = 2 + secp256k1_testrand_int(6);
+ int num_bits = 2 + testrand_int(6);
/* Number of (scalar, point) inputs (excluding g). */
- int num = secp256k1_testrand_int((1 << num_bits) + 1);
+ int num = testrand_int((1 << num_bits) + 1);
/* Number of those which are nonzero. */
- int num_nonzero = secp256k1_testrand_int(num + 1);
+ int num_nonzero = testrand_int(num + 1);
/* Whether we're aiming to create an input with nonzero expected result. */
- int nonzero_result = secp256k1_testrand_bits(1);
+ int nonzero_result = testrand_bits(1);
/* Whether we will provide nonzero g multiplicand. In some cases our hand
* is forced here based on num_nonzero and nonzero_result. */
int g_nonzero = num_nonzero == 0 ? nonzero_result :
num_nonzero == 1 && !nonzero_result ? 1 :
- (int)secp256k1_testrand_bits(1);
+ (int)testrand_bits(1);
/* Which g_scalar pointer to pass into ecmult_multi(). */
- const secp256k1_scalar* g_scalar_ptr = (g_nonzero || secp256k1_testrand_bits(1)) ? &g_scalar : NULL;
+ const secp256k1_scalar* g_scalar_ptr = (g_nonzero || testrand_bits(1)) ? &g_scalar : NULL;
/* How many EC multiplications were performed in this function. */
int mults = 0;
/* How many randomization steps to apply to the input list. */
- int rands = (int)secp256k1_testrand_bits(3);
+ int rands = (int)testrand_bits(3);
if (rands > num_nonzero) rands = num_nonzero;
secp256k1_gej_set_infinity(&expected);
@@ -5004,11 +4888,11 @@ static int test_ecmult_multi_random(secp256k1_scratch *scratch) {
if (g_nonzero) {
/* If g_nonzero, set g_scalar to nonzero value r. */
- random_scalar_order_test(&g_scalar);
+ testutil_random_scalar_order_test(&g_scalar);
if (!nonzero_result) {
/* If expected=0 is desired, add a (a*r, -(1/a)*g) term to compensate. */
CHECK(num_nonzero > filled);
- random_scalar_order_test(&sc_tmp);
+ testutil_random_scalar_order_test(&sc_tmp);
secp256k1_scalar_mul(&scalars[filled], &sc_tmp, &g_scalar);
secp256k1_scalar_inverse_var(&sc_tmp, &sc_tmp);
secp256k1_scalar_negate(&sc_tmp, &sc_tmp);
@@ -5020,8 +4904,8 @@ static int test_ecmult_multi_random(secp256k1_scratch *scratch) {
if (nonzero_result && filled < num_nonzero) {
/* If a nonzero result is desired, and there is space, add a random nonzero term. */
- random_scalar_order_test(&scalars[filled]);
- random_group_element_test(&ge_tmp);
+ testutil_random_scalar_order_test(&scalars[filled]);
+ testutil_random_ge_test(&ge_tmp);
secp256k1_gej_set_ge(&gejs[filled], &ge_tmp);
++filled;
}
@@ -5040,12 +4924,12 @@ static int test_ecmult_multi_random(secp256k1_scratch *scratch) {
/* Add entries to scalars,gejs so that there are num of them. All the added entries
* either have scalar=0 or point=infinity, so these do not change the expected result. */
while (filled < num) {
- if (secp256k1_testrand_bits(1)) {
+ if (testrand_bits(1)) {
secp256k1_gej_set_infinity(&gejs[filled]);
- random_scalar_order_test(&scalars[filled]);
+ testutil_random_scalar_order_test(&scalars[filled]);
} else {
secp256k1_scalar_set_int(&scalars[filled], 0);
- random_group_element_test(&ge_tmp);
+ testutil_random_ge_test(&ge_tmp);
secp256k1_gej_set_ge(&gejs[filled], &ge_tmp);
}
++filled;
@@ -5059,7 +4943,7 @@ static int test_ecmult_multi_random(secp256k1_scratch *scratch) {
secp256k1_scalar v, iv;
/* Shuffle the entries. */
for (j = 0; j < num_nonzero; ++j) {
- int k = secp256k1_testrand_int(num_nonzero - j);
+ int k = testrand_int(num_nonzero - j);
if (k != 0) {
secp256k1_gej gej = gejs[j];
secp256k1_scalar sc = scalars[j];
@@ -5079,7 +4963,7 @@ static int test_ecmult_multi_random(secp256k1_scratch *scratch) {
}
/* Transform the last input: a*P -> (v*a) * ((1/v)*P). */
CHECK(num_nonzero >= 1);
- random_scalar_order_test(&v);
+ testutil_random_scalar_order_test(&v);
secp256k1_scalar_inverse(&iv, &v);
secp256k1_scalar_mul(&scalars[num_nonzero - 1], &scalars[num_nonzero - 1], &v);
secp256k1_ecmult(&gejs[num_nonzero - 1], &gejs[num_nonzero - 1], &iv, NULL);
@@ -5088,7 +4972,7 @@ static int test_ecmult_multi_random(secp256k1_scratch *scratch) {
/* Shuffle all entries (0..num-1). */
for (i = 0; i < num; ++i) {
- int j = secp256k1_testrand_int(num - i);
+ int j = testrand_int(num - i);
if (j != 0) {
secp256k1_gej gej = gejs[i];
secp256k1_scalar sc = scalars[i];
@@ -5118,8 +5002,8 @@ static void test_ecmult_multi_batch_single(secp256k1_ecmult_multi_func ecmult_mu
ecmult_multi_data data;
secp256k1_scratch *scratch_empty;
- random_group_element_test(&pt);
- random_scalar_order(&sc);
+ testutil_random_ge_test(&pt);
+ testutil_random_scalar_order(&sc);
data.sc = &sc;
data.pt = &pt;
@@ -5150,7 +5034,7 @@ static void test_secp256k1_pippenger_bucket_window_inv(void) {
* for a given scratch space.
*/
static void test_ecmult_multi_pippenger_max_points(void) {
- size_t scratch_size = secp256k1_testrand_bits(8);
+ size_t scratch_size = testrand_bits(8);
size_t max_size = secp256k1_pippenger_scratch_size(secp256k1_pippenger_bucket_window_inv(PIPPENGER_MAX_BUCKET_WINDOW-1)+512, 12);
secp256k1_scratch *scratch;
size_t n_points_supported;
@@ -5244,15 +5128,15 @@ static void test_ecmult_multi_batching(void) {
secp256k1_gej_set_infinity(&r2);
/* Get random scalars and group elements and compute result */
- random_scalar_order(&scG);
+ testutil_random_scalar_order(&scG);
secp256k1_ecmult(&r2, &r2, &secp256k1_scalar_zero, &scG);
for(i = 0; i < n_points; i++) {
secp256k1_ge ptg;
secp256k1_gej ptgj;
- random_group_element_test(&ptg);
+ testutil_random_ge_test(&ptg);
secp256k1_gej_set_ge(&ptgj, &ptg);
pt[i] = ptg;
- random_scalar_order(&sc[i]);
+ testutil_random_scalar_order(&sc[i]);
secp256k1_ecmult(&ptgj, &ptgj, &sc[i], NULL);
secp256k1_gej_add_var(&r2, &r2, &ptgj, NULL);
}
@@ -5464,7 +5348,7 @@ static void run_wnaf(void) {
test_fixed_wnaf_small();
/* Random tests */
for (i = 0; i < COUNT; i++) {
- random_scalar_order(&n);
+ testutil_random_scalar_order(&n);
test_wnaf(&n, 4+(i%10));
test_fixed_wnaf(&n, 4 + (i % 10));
}
@@ -5645,9 +5529,9 @@ static void test_ecmult_gen_blind(void) {
secp256k1_gej pgej2;
secp256k1_ge p;
secp256k1_ge pge;
- random_scalar_order_test(&key);
+ testutil_random_scalar_order_test(&key);
secp256k1_ecmult_gen(&CTX->ecmult_gen_ctx, &pgej, &key);
- secp256k1_testrand256(seed32);
+ testrand256(seed32);
b = CTX->ecmult_gen_ctx.scalar_offset;
p = CTX->ecmult_gen_ctx.ge_offset;
secp256k1_ecmult_gen_blind(&CTX->ecmult_gen_ctx, seed32);
@@ -5741,7 +5625,7 @@ static void run_endomorphism_tests(void) {
for (i = 0; i < 100U * COUNT; ++i) {
secp256k1_scalar full;
- random_scalar_order_test(&full);
+ testutil_random_scalar_order_test(&full);
test_scalar_split(&full);
}
for (i = 0; i < sizeof(scalars_near_split_bounds) / sizeof(scalars_near_split_bounds[0]); ++i) {
@@ -6327,7 +6211,7 @@ static void run_eckey_negate_test(void) {
unsigned char seckey[32];
unsigned char seckey_tmp[32];
- random_scalar_order_b32(seckey);
+ testutil_random_scalar_order_b32(seckey);
memcpy(seckey_tmp, seckey, 32);
/* Verify negation changes the key and changes it back */
@@ -6351,7 +6235,7 @@ static void run_eckey_negate_test(void) {
/* Negating an overflowing seckey fails and the seckey is zeroed. In this
* test, the seckey has 16 random bytes to ensure that ec_seckey_negate
* doesn't just set seckey to a constant value in case of failure. */
- random_scalar_order_b32(seckey);
+ testutil_random_scalar_order_b32(seckey);
memset(seckey, 0xFF, 16);
memset(seckey_tmp, 0, 32);
CHECK(secp256k1_ec_seckey_negate(CTX, seckey) == 0);
@@ -6361,7 +6245,7 @@ static void run_eckey_negate_test(void) {
static void random_sign(secp256k1_scalar *sigr, secp256k1_scalar *sigs, const secp256k1_scalar *key, const secp256k1_scalar *msg, int *recid) {
secp256k1_scalar nonce;
do {
- random_scalar_order_test(&nonce);
+ testutil_random_scalar_order_test(&nonce);
} while(!secp256k1_ecdsa_sig_sign(&CTX->ecmult_gen_ctx, sigr, sigs, key, msg, &nonce, recid));
}
@@ -6373,11 +6257,11 @@ static void test_ecdsa_sign_verify(void) {
secp256k1_scalar sigr, sigs;
int getrec;
int recid;
- random_scalar_order_test(&msg);
- random_scalar_order_test(&key);
+ testutil_random_scalar_order_test(&msg);
+ testutil_random_scalar_order_test(&key);
secp256k1_ecmult_gen(&CTX->ecmult_gen_ctx, &pubj, &key);
secp256k1_ge_set_gej(&pub, &pubj);
- getrec = secp256k1_testrand_bits(1);
+ getrec = testrand_bits(1);
/* The specific way in which this conditional is written sidesteps a potential bug in clang.
See the commit messages of the commit that introduced this comment for details. */
if (getrec) {
@@ -6470,8 +6354,8 @@ static void test_ecdsa_end_to_end(void) {
/* Generate a random key and message. */
{
secp256k1_scalar msg, key;
- random_scalar_order_test(&msg);
- random_scalar_order_test(&key);
+ testutil_random_scalar_order_test(&msg);
+ testutil_random_scalar_order_test(&key);
secp256k1_scalar_get_b32(privkey, &key);
secp256k1_scalar_get_b32(message, &msg);
}
@@ -6481,7 +6365,7 @@ static void test_ecdsa_end_to_end(void) {
CHECK(secp256k1_ec_pubkey_create(CTX, &pubkey, privkey) == 1);
/* Verify exporting and importing public key. */
- CHECK(secp256k1_ec_pubkey_serialize(CTX, pubkeyc, &pubkeyclen, &pubkey, secp256k1_testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED));
+ CHECK(secp256k1_ec_pubkey_serialize(CTX, pubkeyc, &pubkeyclen, &pubkey, testrand_bits(1) == 1 ? SECP256K1_EC_COMPRESSED : SECP256K1_EC_UNCOMPRESSED));
memset(&pubkey, 0, sizeof(pubkey));
CHECK(secp256k1_ec_pubkey_parse(CTX, &pubkey, pubkeyc, pubkeyclen) == 1);
@@ -6493,19 +6377,19 @@ static void test_ecdsa_end_to_end(void) {
CHECK(secp256k1_memcmp_var(&pubkey_tmp, &pubkey, sizeof(pubkey)) == 0);
/* Verify private key import and export. */
- CHECK(ec_privkey_export_der(CTX, seckey, &seckeylen, privkey, secp256k1_testrand_bits(1) == 1));
+ CHECK(ec_privkey_export_der(CTX, seckey, &seckeylen, privkey, testrand_bits(1) == 1));
CHECK(ec_privkey_import_der(CTX, privkey2, seckey, seckeylen) == 1);
CHECK(secp256k1_memcmp_var(privkey, privkey2, 32) == 0);
/* Optionally tweak the keys using addition. */
- if (secp256k1_testrand_int(3) == 0) {
+ if (testrand_int(3) == 0) {
int ret1;
int ret2;
int ret3;
unsigned char rnd[32];
unsigned char privkey_tmp[32];
secp256k1_pubkey pubkey2;
- secp256k1_testrand256_test(rnd);
+ testrand256_test(rnd);
memcpy(privkey_tmp, privkey, 32);
ret1 = secp256k1_ec_seckey_tweak_add(CTX, privkey, rnd);
ret2 = secp256k1_ec_pubkey_tweak_add(CTX, &pubkey, rnd);
@@ -6522,14 +6406,14 @@ static void test_ecdsa_end_to_end(void) {
}
/* Optionally tweak the keys using multiplication. */
- if (secp256k1_testrand_int(3) == 0) {
+ if (testrand_int(3) == 0) {
int ret1;
int ret2;
int ret3;
unsigned char rnd[32];
unsigned char privkey_tmp[32];
secp256k1_pubkey pubkey2;
- secp256k1_testrand256_test(rnd);
+ testrand256_test(rnd);
memcpy(privkey_tmp, privkey, 32);
ret1 = secp256k1_ec_seckey_tweak_mul(CTX, privkey, rnd);
ret2 = secp256k1_ec_pubkey_tweak_mul(CTX, &pubkey, rnd);
@@ -6591,7 +6475,7 @@ static void test_ecdsa_end_to_end(void) {
/* Serialize/destroy/parse DER and verify again. */
siglen = 74;
CHECK(secp256k1_ecdsa_signature_serialize_der(CTX, sig, &siglen, &signature[0]) == 1);
- sig[secp256k1_testrand_int(siglen)] += 1 + secp256k1_testrand_int(255);
+ sig[testrand_int(siglen)] += 1 + testrand_int(255);
CHECK(secp256k1_ecdsa_signature_parse_der(CTX, &signature[0], sig, siglen) == 0 ||
secp256k1_ecdsa_verify(CTX, &signature[0], message, &pubkey) == 0);
}
@@ -6601,23 +6485,23 @@ static void test_random_pubkeys(void) {
secp256k1_ge elem2;
unsigned char in[65];
/* Generate some randomly sized pubkeys. */
- size_t len = secp256k1_testrand_bits(2) == 0 ? 65 : 33;
- if (secp256k1_testrand_bits(2) == 0) {
- len = secp256k1_testrand_bits(6);
+ size_t len = testrand_bits(2) == 0 ? 65 : 33;
+ if (testrand_bits(2) == 0) {
+ len = testrand_bits(6);
}
if (len == 65) {
- in[0] = secp256k1_testrand_bits(1) ? 4 : (secp256k1_testrand_bits(1) ? 6 : 7);
+ in[0] = testrand_bits(1) ? 4 : (testrand_bits(1) ? 6 : 7);
} else {
- in[0] = secp256k1_testrand_bits(1) ? 2 : 3;
+ in[0] = testrand_bits(1) ? 2 : 3;
}
- if (secp256k1_testrand_bits(3) == 0) {
- in[0] = secp256k1_testrand_bits(8);
+ if (testrand_bits(3) == 0) {
+ in[0] = testrand_bits(8);
}
if (len > 1) {
- secp256k1_testrand256(&in[1]);
+ testrand256(&in[1]);
}
if (len > 33) {
- secp256k1_testrand256(&in[33]);
+ testrand256(&in[33]);
}
if (secp256k1_eckey_pubkey_parse(&elem, in, len)) {
unsigned char out[65];
@@ -6639,7 +6523,7 @@ static void test_random_pubkeys(void) {
CHECK(secp256k1_eckey_pubkey_parse(&elem2, in, size));
CHECK(secp256k1_ge_eq_var(&elem2, &elem));
/* Check that the X9.62 hybrid type is checked. */
- in[0] = secp256k1_testrand_bits(1) ? 6 : 7;
+ in[0] = testrand_bits(1) ? 6 : 7;
res = secp256k1_eckey_pubkey_parse(&elem2, in, size);
if (firstb == 2 || firstb == 3) {
if (in[0] == firstb + 4) {
@@ -6718,7 +6602,7 @@ static void permute(size_t *arr, size_t n) {
size_t i;
for (i = n - 1; i >= 1; i--) {
size_t tmp, j;
- j = secp256k1_testrand_int(i + 1);
+ j = testrand_int(i + 1);
tmp = arr[i];
arr[i] = arr[j];
arr[j] = tmp;
@@ -6728,7 +6612,7 @@ static void permute(size_t *arr, size_t n) {
static void rand_pk(secp256k1_pubkey *pk) {
unsigned char seckey[32];
secp256k1_keypair keypair;
- secp256k1_testrand256(seckey);
+ testrand256(seckey);
CHECK(secp256k1_keypair_create(CTX, &keypair, seckey) == 1);
CHECK(secp256k1_keypair_pub(CTX, pk, &keypair) == 1);
}
@@ -6944,27 +6828,27 @@ static void assign_big_endian(unsigned char *ptr, size_t ptrlen, uint32_t val) {
static void damage_array(unsigned char *sig, size_t *len) {
int pos;
- int action = secp256k1_testrand_bits(3);
+ int action = testrand_bits(3);
if (action < 1 && *len > 3) {
/* Delete a byte. */
- pos = secp256k1_testrand_int(*len);
+ pos = testrand_int(*len);
memmove(sig + pos, sig + pos + 1, *len - pos - 1);
(*len)--;
return;
} else if (action < 2 && *len < 2048) {
/* Insert a byte. */
- pos = secp256k1_testrand_int(1 + *len);
+ pos = testrand_int(1 + *len);
memmove(sig + pos + 1, sig + pos, *len - pos);
- sig[pos] = secp256k1_testrand_bits(8);
+ sig[pos] = testrand_bits(8);
(*len)++;
return;
} else if (action < 4) {
/* Modify a byte. */
- sig[secp256k1_testrand_int(*len)] += 1 + secp256k1_testrand_int(255);
+ sig[testrand_int(*len)] += 1 + testrand_int(255);
return;
} else { /* action < 8 */
/* Modify a bit. */
- sig[secp256k1_testrand_int(*len)] ^= 1 << secp256k1_testrand_bits(3);
+ sig[testrand_int(*len)] ^= 1 << testrand_bits(3);
return;
}
}
@@ -6977,23 +6861,23 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
int n;
*len = 0;
- der = secp256k1_testrand_bits(2) == 0;
+ der = testrand_bits(2) == 0;
*certainly_der = der;
*certainly_not_der = 0;
- indet = der ? 0 : secp256k1_testrand_int(10) == 0;
+ indet = der ? 0 : testrand_int(10) == 0;
for (n = 0; n < 2; n++) {
/* We generate two classes of numbers: nlow==1 "low" ones (up to 32 bytes), nlow==0 "high" ones (32 bytes with 129 top bits set, or larger than 32 bytes) */
- nlow[n] = der ? 1 : (secp256k1_testrand_bits(3) != 0);
+ nlow[n] = der ? 1 : (testrand_bits(3) != 0);
/* The length of the number in bytes (the first byte of which will always be nonzero) */
- nlen[n] = nlow[n] ? secp256k1_testrand_int(33) : 32 + secp256k1_testrand_int(200) * secp256k1_testrand_bits(3) / 8;
+ nlen[n] = nlow[n] ? testrand_int(33) : 32 + testrand_int(200) * testrand_bits(3) / 8;
CHECK(nlen[n] <= 232);
/* The top bit of the number. */
- nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : secp256k1_testrand_bits(1));
+ nhbit[n] = (nlow[n] == 0 && nlen[n] == 32) ? 1 : (nlen[n] == 0 ? 0 : testrand_bits(1));
/* The top byte of the number (after the potential hardcoded 16 0xFF characters for "high" 32 bytes numbers) */
- nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + secp256k1_testrand_bits(7) : 1 + secp256k1_testrand_int(127));
+ nhbyte[n] = nlen[n] == 0 ? 0 : (nhbit[n] ? 128 + testrand_bits(7) : 1 + testrand_int(127));
/* The number of zero bytes in front of the number (which is 0 or 1 in case of DER, otherwise we extend up to 300 bytes) */
- nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? secp256k1_testrand_int(3) : secp256k1_testrand_int(300 - nlen[n]) * secp256k1_testrand_bits(3) / 8);
+ nzlen[n] = der ? ((nlen[n] == 0 || nhbit[n]) ? 1 : 0) : (nlow[n] ? testrand_int(3) : testrand_int(300 - nlen[n]) * testrand_bits(3) / 8);
if (nzlen[n] > ((nlen[n] == 0 || nhbit[n]) ? 1 : 0)) {
*certainly_not_der = 1;
}
@@ -7002,7 +6886,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
nlenlen[n] = nlen[n] + nzlen[n] < 128 ? 0 : (nlen[n] + nzlen[n] < 256 ? 1 : 2);
if (!der) {
/* nlenlen[n] max 127 bytes */
- int add = secp256k1_testrand_int(127 - nlenlen[n]) * secp256k1_testrand_bits(4) * secp256k1_testrand_bits(4) / 256;
+ int add = testrand_int(127 - nlenlen[n]) * testrand_bits(4) * testrand_bits(4) / 256;
nlenlen[n] += add;
if (add != 0) {
*certainly_not_der = 1;
@@ -7016,7 +6900,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen <= 856);
/* The length of the garbage inside the tuple. */
- elen = (der || indet) ? 0 : secp256k1_testrand_int(980 - tlen) * secp256k1_testrand_bits(3) / 8;
+ elen = (der || indet) ? 0 : testrand_int(980 - tlen) * testrand_bits(3) / 8;
if (elen != 0) {
*certainly_not_der = 1;
}
@@ -7024,7 +6908,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen <= 980);
/* The length of the garbage after the end of the tuple. */
- glen = der ? 0 : secp256k1_testrand_int(990 - tlen) * secp256k1_testrand_bits(3) / 8;
+ glen = der ? 0 : testrand_int(990 - tlen) * testrand_bits(3) / 8;
if (glen != 0) {
*certainly_not_der = 1;
}
@@ -7039,7 +6923,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
} else {
int tlenlen = tlen < 128 ? 0 : (tlen < 256 ? 1 : 2);
if (!der) {
- int add = secp256k1_testrand_int(127 - tlenlen) * secp256k1_testrand_bits(4) * secp256k1_testrand_bits(4) / 256;
+ int add = testrand_int(127 - tlenlen) * testrand_bits(4) * testrand_bits(4) / 256;
tlenlen += add;
if (add != 0) {
*certainly_not_der = 1;
@@ -7090,13 +6974,13 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
nlen[n]--;
}
/* Generate remaining random bytes of number */
- secp256k1_testrand_bytes_test(sig + *len, nlen[n]);
+ testrand_bytes_test(sig + *len, nlen[n]);
*len += nlen[n];
nlen[n] = 0;
}
/* Generate random garbage inside tuple. */
- secp256k1_testrand_bytes_test(sig + *len, elen);
+ testrand_bytes_test(sig + *len, elen);
*len += elen;
/* Generate end-of-contents bytes. */
@@ -7108,7 +6992,7 @@ static void random_ber_signature(unsigned char *sig, size_t *len, int* certainly
CHECK(tlen + glen <= 1121);
/* Generate random garbage outside tuple. */
- secp256k1_testrand_bytes_test(sig + *len, glen);
+ testrand_bytes_test(sig + *len, glen);
*len += glen;
tlen += glen;
CHECK(tlen <= 1121);
@@ -7771,7 +7655,7 @@ int main(int argc, char **argv) {
run_xoshiro256pp_tests();
/* find random seed */
- secp256k1_testrand_init(argc > 2 ? argv[2] : NULL);
+ testrand_init(argc > 2 ? argv[2] : NULL);
/*** Setup test environment ***/
@@ -7780,9 +7664,9 @@ int main(int argc, char **argv) {
/* Randomize the context only with probability 15/16
to make sure we test without context randomization from time to time.
TODO Reconsider this when recalibrating the tests. */
- if (secp256k1_testrand_bits(4)) {
+ if (testrand_bits(4)) {
unsigned char rand32[32];
- secp256k1_testrand256(rand32);
+ testrand256(rand32);
CHECK(secp256k1_context_randomize(CTX, rand32));
}
/* Make a writable copy of secp256k1_context_static in order to test the effect of API functions
@@ -7909,7 +7793,7 @@ int main(int argc, char **argv) {
free(STATIC_CTX);
secp256k1_context_destroy(CTX);
- secp256k1_testrand_finish();
+ testrand_finish();
printf("no problems found\n");
return 0;
diff --git a/src/secp256k1/src/tests_exhaustive.c b/src/secp256k1/src/tests_exhaustive.c
index 5843b3e1f5..6efa88982e 100644
--- a/src/secp256k1/src/tests_exhaustive.c
+++ b/src/secp256k1/src/tests_exhaustive.c
@@ -171,7 +171,7 @@ static void test_exhaustive_ecmult(const secp256k1_ge *group, const secp256k1_ge
CHECK(secp256k1_fe_equal(&tmpf, &group[(i * j) % EXHAUSTIVE_TEST_ORDER].x));
/* Test secp256k1_ecmult_const_xonly with all curve X coordinates, with random xd. */
- random_fe_non_zero(&xd);
+ testutil_random_fe_non_zero(&xd);
secp256k1_fe_mul(&xn, &xd, &group[i].x);
ret = secp256k1_ecmult_const_xonly(&tmpf, &xn, &xd, &ng, 0);
CHECK(ret);
@@ -375,7 +375,7 @@ int main(int argc, char** argv) {
printf("test count = %i\n", count);
/* find random seed */
- secp256k1_testrand_init(argc > 2 ? argv[2] : NULL);
+ testrand_init(argc > 2 ? argv[2] : NULL);
/* set up split processing */
if (argc > 4) {
@@ -395,7 +395,7 @@ int main(int argc, char** argv) {
while (count--) {
/* Build context */
ctx = secp256k1_context_create(SECP256K1_CONTEXT_NONE);
- secp256k1_testrand256(rand32);
+ testrand256(rand32);
CHECK(secp256k1_context_randomize(ctx, rand32));
/* Generate the entire group */
@@ -408,7 +408,7 @@ int main(int argc, char** argv) {
/* Set a different random z-value for each Jacobian point, except z=1
is used in the last iteration. */
secp256k1_fe z;
- random_fe(&z);
+ testutil_random_fe(&z);
secp256k1_gej_rescale(&groupj[i], &z);
}
@@ -459,7 +459,7 @@ int main(int argc, char** argv) {
secp256k1_context_destroy(ctx);
}
- secp256k1_testrand_finish();
+ testrand_finish();
printf("no problems found\n");
return 0;
diff --git a/src/secp256k1/src/testutil.h b/src/secp256k1/src/testutil.h
index 4e2cb7d5b3..8296a5fb99 100644
--- a/src/secp256k1/src/testutil.h
+++ b/src/secp256k1/src/testutil.h
@@ -7,23 +7,136 @@
#define SECP256K1_TESTUTIL_H
#include "field.h"
+#include "group.h"
#include "testrand.h"
#include "util.h"
-static void random_fe(secp256k1_fe *x) {
+static void testutil_random_fe(secp256k1_fe *x) {
unsigned char bin[32];
do {
- secp256k1_testrand256(bin);
+ testrand256(bin);
if (secp256k1_fe_set_b32_limit(x, bin)) {
return;
}
} while(1);
}
-static void random_fe_non_zero(secp256k1_fe *nz) {
+static void testutil_random_fe_non_zero(secp256k1_fe *nz) {
do {
- random_fe(nz);
+ testutil_random_fe(nz);
} while (secp256k1_fe_is_zero(nz));
}
+static void testutil_random_fe_magnitude(secp256k1_fe *fe, int m) {
+ secp256k1_fe zero;
+ int n = testrand_int(m + 1);
+ secp256k1_fe_normalize(fe);
+ if (n == 0) {
+ return;
+ }
+ secp256k1_fe_clear(&zero);
+ secp256k1_fe_negate(&zero, &zero, 0);
+ secp256k1_fe_mul_int_unchecked(&zero, n - 1);
+ secp256k1_fe_add(fe, &zero);
+#ifdef VERIFY
+ CHECK(fe->magnitude == n);
+#endif
+}
+
+static void testutil_random_fe_test(secp256k1_fe *x) {
+ unsigned char bin[32];
+ do {
+ testrand256_test(bin);
+ if (secp256k1_fe_set_b32_limit(x, bin)) {
+ return;
+ }
+ } while(1);
+}
+
+static void testutil_random_fe_non_zero_test(secp256k1_fe *fe) {
+ do {
+ testutil_random_fe_test(fe);
+ } while(secp256k1_fe_is_zero(fe));
+}
+
+static void testutil_random_ge_x_magnitude(secp256k1_ge *ge) {
+ testutil_random_fe_magnitude(&ge->x, SECP256K1_GE_X_MAGNITUDE_MAX);
+}
+
+static void testutil_random_ge_y_magnitude(secp256k1_ge *ge) {
+ testutil_random_fe_magnitude(&ge->y, SECP256K1_GE_Y_MAGNITUDE_MAX);
+}
+
+static void testutil_random_gej_x_magnitude(secp256k1_gej *gej) {
+ testutil_random_fe_magnitude(&gej->x, SECP256K1_GEJ_X_MAGNITUDE_MAX);
+}
+
+static void testutil_random_gej_y_magnitude(secp256k1_gej *gej) {
+ testutil_random_fe_magnitude(&gej->y, SECP256K1_GEJ_Y_MAGNITUDE_MAX);
+}
+
+static void testutil_random_gej_z_magnitude(secp256k1_gej *gej) {
+ testutil_random_fe_magnitude(&gej->z, SECP256K1_GEJ_Z_MAGNITUDE_MAX);
+}
+
+static void testutil_random_ge_test(secp256k1_ge *ge) {
+ secp256k1_fe fe;
+ do {
+ testutil_random_fe_test(&fe);
+ if (secp256k1_ge_set_xo_var(ge, &fe, testrand_bits(1))) {
+ secp256k1_fe_normalize(&ge->y);
+ break;
+ }
+ } while(1);
+ ge->infinity = 0;
+}
+
+static void testutil_random_ge_jacobian_test(secp256k1_gej *gej, const secp256k1_ge *ge) {
+ secp256k1_fe z2, z3;
+ testutil_random_fe_non_zero_test(&gej->z);
+ secp256k1_fe_sqr(&z2, &gej->z);
+ secp256k1_fe_mul(&z3, &z2, &gej->z);
+ secp256k1_fe_mul(&gej->x, &ge->x, &z2);
+ secp256k1_fe_mul(&gej->y, &ge->y, &z3);
+ gej->infinity = ge->infinity;
+}
+
+static void testutil_random_gej_test(secp256k1_gej *gej) {
+ secp256k1_ge ge;
+ testutil_random_ge_test(&ge);
+ testutil_random_ge_jacobian_test(gej, &ge);
+}
+
+static void testutil_random_scalar_order_test(secp256k1_scalar *num) {
+ do {
+ unsigned char b32[32];
+ int overflow = 0;
+ testrand256_test(b32);
+ secp256k1_scalar_set_b32(num, b32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(num)) {
+ continue;
+ }
+ break;
+ } while(1);
+}
+
+static void testutil_random_scalar_order(secp256k1_scalar *num) {
+ do {
+ unsigned char b32[32];
+ int overflow = 0;
+ testrand256(b32);
+ secp256k1_scalar_set_b32(num, b32, &overflow);
+ if (overflow || secp256k1_scalar_is_zero(num)) {
+ continue;
+ }
+ break;
+ } while(1);
+}
+
+static void testutil_random_scalar_order_b32(unsigned char *b32) {
+ secp256k1_scalar num;
+ testutil_random_scalar_order(&num);
+ secp256k1_scalar_get_b32(b32, &num);
+}
+
#endif /* SECP256K1_TESTUTIL_H */
diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp
index d44d84af93..067a32d6a4 100644
--- a/src/test/blockfilter_index_tests.cpp
+++ b/src/test/blockfilter_index_tests.cpp
@@ -67,7 +67,8 @@ CBlock BuildChainTestingSetup::CreateBlock(const CBlockIndex* prev,
const std::vector<CMutableTransaction>& txns,
const CScript& scriptPubKey)
{
- std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(scriptPubKey);
+ BlockAssembler::Options options;
+ std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get(), options}.CreateNewBlock(scriptPubKey);
CBlock& block = pblocktemplate->block;
block.hashPrevBlock = prev->GetBlockHash();
block.nTime = prev->nTime + 1;
diff --git a/src/test/blockmanager_tests.cpp b/src/test/blockmanager_tests.cpp
index efe0983698..9eb7acc3ca 100644
--- a/src/test/blockmanager_tests.cpp
+++ b/src/test/blockmanager_tests.cpp
@@ -27,7 +27,7 @@ BOOST_FIXTURE_TEST_SUITE(blockmanager_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(blockmanager_find_block_pos)
{
const auto params {CreateChainParams(ArgsManager{}, ChainType::MAIN)};
- KernelNotifications notifications{*Assert(m_node.shutdown), m_node.exit_status};
+ KernelNotifications notifications{*Assert(m_node.shutdown), m_node.exit_status, *Assert(m_node.warnings)};
const BlockManager::Options blockman_opts{
.chainparams = *params,
.blocks_dir = m_args.GetBlocksDirPath(),
@@ -134,7 +134,7 @@ BOOST_FIXTURE_TEST_CASE(blockmanager_block_data_availability, TestChain100Setup)
BOOST_AUTO_TEST_CASE(blockmanager_flush_block_file)
{
- KernelNotifications notifications{*Assert(m_node.shutdown), m_node.exit_status};
+ KernelNotifications notifications{*Assert(m_node.shutdown), m_node.exit_status, *Assert(m_node.warnings)};
node::BlockManager::Options blockman_opts{
.chainparams = Params(),
.blocks_dir = m_args.GetBlocksDirPath(),
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index 5e9ae78681..42db28daf5 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -141,7 +141,7 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
{
NodeId id{0};
auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman, Params());
- auto peerLogic = PeerManager::make(*connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, {});
+ auto peerLogic = PeerManager::make(*connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, *m_node.warnings, {});
constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS;
CConnman::Options options;
@@ -239,7 +239,7 @@ BOOST_AUTO_TEST_CASE(block_relay_only_eviction)
{
NodeId id{0};
auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman, Params());
- auto peerLogic = PeerManager::make(*connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, {});
+ auto peerLogic = PeerManager::make(*connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, *m_node.warnings, {});
constexpr int max_outbound_block_relay{MAX_BLOCK_RELAY_ONLY_CONNECTIONS};
constexpr int64_t MINIMUM_CONNECT_TIME{30};
@@ -300,7 +300,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman, Params());
- auto peerLogic = PeerManager::make(*connman, *m_node.addrman, banman.get(), *m_node.chainman, *m_node.mempool, {});
+ auto peerLogic = PeerManager::make(*connman, *m_node.addrman, banman.get(), *m_node.chainman, *m_node.mempool, *m_node.warnings, {});
CNetAddr tor_netaddr;
BOOST_REQUIRE(
@@ -330,7 +330,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
peerLogic->InitializeNode(*nodes[0], NODE_NETWORK);
nodes[0]->fSuccessfullyConnected = true;
connman->AddTestNode(*nodes[0]);
- peerLogic->UnitTestMisbehaving(nodes[0]->GetId(), DISCOURAGEMENT_THRESHOLD); // Should be discouraged
+ peerLogic->UnitTestMisbehaving(nodes[0]->GetId()); // Should be discouraged
BOOST_CHECK(peerLogic->SendMessages(nodes[0]));
BOOST_CHECK(banman->IsDiscouraged(addr[0]));
@@ -350,7 +350,6 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
peerLogic->InitializeNode(*nodes[1], NODE_NETWORK);
nodes[1]->fSuccessfullyConnected = true;
connman->AddTestNode(*nodes[1]);
- peerLogic->UnitTestMisbehaving(nodes[1]->GetId(), DISCOURAGEMENT_THRESHOLD - 1);
BOOST_CHECK(peerLogic->SendMessages(nodes[1]));
// [0] is still discouraged/disconnected.
BOOST_CHECK(banman->IsDiscouraged(addr[0]));
@@ -358,7 +357,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
// [1] is not discouraged/disconnected yet.
BOOST_CHECK(!banman->IsDiscouraged(addr[1]));
BOOST_CHECK(!nodes[1]->fDisconnect);
- peerLogic->UnitTestMisbehaving(nodes[1]->GetId(), 1); // [1] reaches discouragement threshold
+ peerLogic->UnitTestMisbehaving(nodes[1]->GetId());
BOOST_CHECK(peerLogic->SendMessages(nodes[1]));
// Expect both [0] and [1] to be discouraged/disconnected now.
BOOST_CHECK(banman->IsDiscouraged(addr[0]));
@@ -381,7 +380,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
peerLogic->InitializeNode(*nodes[2], NODE_NETWORK);
nodes[2]->fSuccessfullyConnected = true;
connman->AddTestNode(*nodes[2]);
- peerLogic->UnitTestMisbehaving(nodes[2]->GetId(), DISCOURAGEMENT_THRESHOLD);
+ peerLogic->UnitTestMisbehaving(nodes[2]->GetId());
BOOST_CHECK(peerLogic->SendMessages(nodes[2]));
BOOST_CHECK(banman->IsDiscouraged(addr[0]));
BOOST_CHECK(banman->IsDiscouraged(addr[1]));
@@ -402,7 +401,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman, Params());
- auto peerLogic = PeerManager::make(*connman, *m_node.addrman, banman.get(), *m_node.chainman, *m_node.mempool, {});
+ auto peerLogic = PeerManager::make(*connman, *m_node.addrman, banman.get(), *m_node.chainman, *m_node.mempool, *m_node.warnings, {});
banman->ClearBanned();
int64_t nStartTime = GetTime();
@@ -423,7 +422,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
peerLogic->InitializeNode(dummyNode, NODE_NETWORK);
dummyNode.fSuccessfullyConnected = true;
- peerLogic->UnitTestMisbehaving(dummyNode.GetId(), DISCOURAGEMENT_THRESHOLD);
+ peerLogic->UnitTestMisbehaving(dummyNode.GetId());
BOOST_CHECK(peerLogic->SendMessages(&dummyNode));
BOOST_CHECK(banman->IsDiscouraged(addr));
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index 9a54a44bd3..c1c9945a04 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -101,8 +101,9 @@ void ResetCoverageCounters() {}
void initialize()
{
- // Terminate immediately if a fuzzing harness ever tries to create a TCP socket.
- CreateSock = [](const sa_family_t&) -> std::unique_ptr<Sock> { std::terminate(); };
+ // Terminate immediately if a fuzzing harness ever tries to create a socket.
+ // Individual tests can override this by pointing CreateSock to a mocked alternative.
+ CreateSock = [](int, int, int) -> std::unique_ptr<Sock> { std::terminate(); };
// Terminate immediately if a fuzzing harness ever tries to perform a DNS lookup.
g_dns_lookup = [](const std::string& name, bool allow_lookup) {
diff --git a/src/test/fuzz/i2p.cpp b/src/test/fuzz/i2p.cpp
index 3af5bed30a..51517187a0 100644
--- a/src/test/fuzz/i2p.cpp
+++ b/src/test/fuzz/i2p.cpp
@@ -27,7 +27,7 @@ FUZZ_TARGET(i2p, .init = initialize_i2p)
// Mock CreateSock() to create FuzzedSock.
auto CreateSockOrig = CreateSock;
- CreateSock = [&fuzzed_data_provider](const sa_family_t&) {
+ CreateSock = [&fuzzed_data_provider](int, int, int) {
return std::make_unique<FuzzedSock>(fuzzed_data_provider);
};
diff --git a/src/test/fuzz/package_eval.cpp b/src/test/fuzz/package_eval.cpp
index 122543ebad..53aedf23ea 100644
--- a/src/test/fuzz/package_eval.cpp
+++ b/src/test/fuzz/package_eval.cpp
@@ -314,7 +314,7 @@ FUZZ_TARGET(tx_package_eval, .init = initialize_tx_pool)
// just use result_package.m_state here. This makes the expect_valid check meaningless, but
// we can still verify that the contents of m_tx_results are consistent with m_state.
const bool expect_valid{result_package.m_state.IsValid()};
- Assert(!CheckPackageMempoolAcceptResult(txs, result_package, expect_valid, nullptr));
+ Assert(!CheckPackageMempoolAcceptResult(txs, result_package, expect_valid, &tx_pool));
} else {
// This is empty if it fails early checks, or "full" if transactions are looked at deeper
Assert(result_package.m_tx_results.size() == txs.size() || result_package.m_tx_results.empty());
diff --git a/src/test/fuzz/rbf.cpp b/src/test/fuzz/rbf.cpp
index 4c7e70e3b0..eb981352ec 100644
--- a/src/test/fuzz/rbf.cpp
+++ b/src/test/fuzz/rbf.cpp
@@ -91,8 +91,10 @@ FUZZ_TARGET(package_rbf, .init = initialize_package_rbf)
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
SetMockTime(ConsumeTime(fuzzed_data_provider));
- std::optional<CMutableTransaction> child = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider, TX_WITH_WITNESS);
- if (!child) return;
+ // "Real" virtual size is not important for this test since ConsumeTxMemPoolEntry generates its own virtual size values
+ // so we construct small transactions for performance reasons. Child simply needs an input for later to perhaps connect to parent.
+ CMutableTransaction child;
+ child.vin.resize(1);
bilingual_str error;
CTxMemPool pool{MemPoolOptionsForTest(g_setup->m_node), error};
@@ -113,15 +115,13 @@ FUZZ_TARGET(package_rbf, .init = initialize_package_rbf)
LIMITED_WHILE(fuzzed_data_provider.ConsumeBool(), NUM_ITERS)
{
// Make sure txns only have one input, and that a unique input is given to avoid circular references
- std::optional<CMutableTransaction> parent = ConsumeDeserializable<CMutableTransaction>(fuzzed_data_provider, TX_WITH_WITNESS);
- if (!parent) {
- return;
- }
+ CMutableTransaction parent;
assert(iter <= g_outpoints.size());
- parent->vin.resize(1);
- parent->vin[0].prevout = g_outpoints[iter++];
+ parent.vin.resize(1);
+ parent.vin[0].prevout = g_outpoints[iter++];
+ parent.vout.emplace_back(0, CScript());
- mempool_txs.emplace_back(*parent);
+ mempool_txs.emplace_back(parent);
const auto parent_entry = ConsumeTxMemPoolEntry(fuzzed_data_provider, mempool_txs.back());
running_vsize_total += parent_entry.GetTxSize();
if (running_vsize_total > std::numeric_limits<int32_t>::max()) {
@@ -130,10 +130,10 @@ FUZZ_TARGET(package_rbf, .init = initialize_package_rbf)
break;
}
pool.addUnchecked(parent_entry);
- if (fuzzed_data_provider.ConsumeBool() && !child->vin.empty()) {
- child->vin[0].prevout = COutPoint{mempool_txs.back().GetHash(), 0};
+ if (fuzzed_data_provider.ConsumeBool()) {
+ child.vin[0].prevout = COutPoint{mempool_txs.back().GetHash(), 0};
}
- mempool_txs.emplace_back(*child);
+ mempool_txs.emplace_back(child);
const auto child_entry = ConsumeTxMemPoolEntry(fuzzed_data_provider, mempool_txs.back());
running_vsize_total += child_entry.GetTxSize();
if (running_vsize_total > std::numeric_limits<int32_t>::max()) {
diff --git a/src/test/fuzz/timeoffsets.cpp b/src/test/fuzz/timeoffsets.cpp
index 019337a94a..dfa4dd705d 100644
--- a/src/test/fuzz/timeoffsets.cpp
+++ b/src/test/fuzz/timeoffsets.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <node/timeoffsets.h>
+#include <node/warnings.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/util/setup_common.h>
@@ -19,7 +20,8 @@ void initialize_timeoffsets()
FUZZ_TARGET(timeoffsets, .init = initialize_timeoffsets)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
- TimeOffsets offsets{};
+ node::Warnings warnings{};
+ TimeOffsets offsets{warnings};
LIMITED_WHILE(fuzzed_data_provider.remaining_bytes() > 0, 4'000) {
(void)offsets.Median();
offsets.Add(std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<std::chrono::seconds::rep>()});
diff --git a/src/test/i2p_tests.cpp b/src/test/i2p_tests.cpp
index d7249d88f4..0512c6134f 100644
--- a/src/test/i2p_tests.cpp
+++ b/src/test/i2p_tests.cpp
@@ -39,15 +39,14 @@ public:
private:
const BCLog::Level m_prev_log_level;
- const std::function<std::unique_ptr<Sock>(const sa_family_t&)> m_create_sock_orig;
+ const decltype(CreateSock) m_create_sock_orig;
};
BOOST_FIXTURE_TEST_SUITE(i2p_tests, EnvTestingSetup)
BOOST_AUTO_TEST_CASE(unlimited_recv)
{
- // Mock CreateSock() to create MockSock.
- CreateSock = [](const sa_family_t&) {
+ CreateSock = [](int, int, int) {
return std::make_unique<StaticContentsSock>(std::string(i2p::sam::MAX_MSG_SIZE + 1, 'a'));
};
@@ -69,7 +68,7 @@ BOOST_AUTO_TEST_CASE(unlimited_recv)
BOOST_AUTO_TEST_CASE(listen_ok_accept_fail)
{
size_t num_sockets{0};
- CreateSock = [&num_sockets](const sa_family_t&) {
+ CreateSock = [&num_sockets](int, int, int) {
// clang-format off
++num_sockets;
// First socket is the control socket for creating the session.
@@ -133,9 +132,7 @@ BOOST_AUTO_TEST_CASE(listen_ok_accept_fail)
BOOST_AUTO_TEST_CASE(damaged_private_key)
{
- const auto CreateSockOrig = CreateSock;
-
- CreateSock = [](const sa_family_t&) {
+ CreateSock = [](int, int, int) {
return std::make_unique<StaticContentsSock>("HELLO REPLY RESULT=OK VERSION=3.1\n"
"SESSION STATUS RESULT=OK DESTINATION=\n");
};
@@ -172,8 +169,6 @@ BOOST_AUTO_TEST_CASE(damaged_private_key)
BOOST_CHECK(!session.Connect(CService{}, conn, proxy_error));
}
}
-
- CreateSock = CreateSockOrig;
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/net_peer_connection_tests.cpp b/src/test/net_peer_connection_tests.cpp
index 00bc1fdb6a..5f38ce112c 100644
--- a/src/test/net_peer_connection_tests.cpp
+++ b/src/test/net_peer_connection_tests.cpp
@@ -84,7 +84,7 @@ static void AddPeer(NodeId& id, std::vector<CNode*>& nodes, PeerManager& peerman
BOOST_AUTO_TEST_CASE(test_addnode_getaddednodeinfo_and_connection_detection)
{
auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman, *m_node.netgroupman, Params());
- auto peerman = PeerManager::make(*connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, {});
+ auto peerman = PeerManager::make(*connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, *m_node.warnings, {});
NodeId id{0};
std::vector<CNode*> nodes;
diff --git a/src/test/node_warnings_tests.cpp b/src/test/node_warnings_tests.cpp
new file mode 100644
index 0000000000..2bcc2c95ed
--- /dev/null
+++ b/src/test/node_warnings_tests.cpp
@@ -0,0 +1,52 @@
+// Copyright (c) 2024-present The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+
+#include <node/warnings.h>
+#include <util/translation.h>
+
+#include <test/util/setup_common.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(node_warnings_tests, BasicTestingSetup)
+
+BOOST_AUTO_TEST_CASE(warnings)
+{
+ node::Warnings warnings;
+ // On pre-release builds, a warning is generated automatically
+ warnings.Unset(node::Warning::PRE_RELEASE_TEST_BUILD);
+
+ // For these tests, we don't care what the exact warnings are, so
+ // just refer to them as warning_1 and warning_2
+ const auto warning_1{node::Warning::CLOCK_OUT_OF_SYNC};
+ const auto warning_2{node::Warning::FATAL_INTERNAL_ERROR};
+
+ // Ensure we start without any warnings
+ BOOST_CHECK(warnings.GetMessages().size() == 0);
+ // Add two warnings
+ BOOST_CHECK(warnings.Set(warning_1, _("warning 1")));
+ BOOST_CHECK(warnings.Set(warning_2, _("warning 2")));
+ // Unset the second one
+ BOOST_CHECK(warnings.Unset(warning_2));
+ // Since it's already been unset, this should return false
+ BOOST_CHECK(!warnings.Unset(warning_2));
+ // We should now be able to set w2 again
+ BOOST_CHECK(warnings.Set(warning_2, _("warning 2 - revision 1")));
+ // Setting w2 again should return false since it's already set
+ BOOST_CHECK(!warnings.Set(warning_2, _("warning 2 - revision 2")));
+
+ // Verify messages are correct
+ const auto messages{warnings.GetMessages()};
+ BOOST_CHECK(messages.size() == 2);
+ BOOST_CHECK(messages[0].original == "warning 1");
+ BOOST_CHECK(messages[1].original == "warning 2 - revision 1");
+
+ // Clearing all warnings should also clear all messages
+ BOOST_CHECK(warnings.Unset(warning_1));
+ BOOST_CHECK(warnings.Unset(warning_2));
+ BOOST_CHECK(warnings.GetMessages().size() == 0);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/peerman_tests.cpp b/src/test/peerman_tests.cpp
index 28866695bc..6de373eef2 100644
--- a/src/test/peerman_tests.cpp
+++ b/src/test/peerman_tests.cpp
@@ -20,7 +20,8 @@ static void mineBlock(const node::NodeContext& node, std::chrono::seconds block_
{
auto curr_time = GetTime<std::chrono::seconds>();
SetMockTime(block_time); // update time so the block is created with it
- CBlock block = node::BlockAssembler{node.chainman->ActiveChainstate(), nullptr}.CreateNewBlock(CScript() << OP_TRUE)->block;
+ node::BlockAssembler::Options options;
+ CBlock block = node::BlockAssembler{node.chainman->ActiveChainstate(), nullptr, options}.CreateNewBlock(CScript() << OP_TRUE)->block;
while (!CheckProofOfWork(block.GetHash(), block.nBits, node.chainman->GetConsensus())) ++block.nNonce;
block.fChecked = true; // little speedup
SetMockTime(curr_time); // process block at current time
@@ -31,7 +32,7 @@ static void mineBlock(const node::NodeContext& node, std::chrono::seconds block_
// Verifying when network-limited peer connections are desirable based on the node's proximity to the tip
BOOST_AUTO_TEST_CASE(connections_desirable_service_flags)
{
- std::unique_ptr<PeerManager> peerman = PeerManager::make(*m_node.connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, {});
+ std::unique_ptr<PeerManager> peerman = PeerManager::make(*m_node.connman, *m_node.addrman, nullptr, *m_node.chainman, *m_node.mempool, *m_node.warnings, {});
auto consensus = m_node.chainman->GetParams().GetConsensus();
// Check we start connecting to full nodes
diff --git a/src/test/timeoffsets_tests.cpp b/src/test/timeoffsets_tests.cpp
index 008f1a9db9..5f85a5feeb 100644
--- a/src/test/timeoffsets_tests.cpp
+++ b/src/test/timeoffsets_tests.cpp
@@ -4,6 +4,7 @@
//
#include <node/timeoffsets.h>
+#include <node/warnings.h>
#include <test/util/setup_common.h>
#include <boost/test/unit_test.hpp>
@@ -24,7 +25,8 @@ BOOST_FIXTURE_TEST_SUITE(timeoffsets_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(timeoffsets)
{
- TimeOffsets offsets{};
+ node::Warnings warnings{};
+ TimeOffsets offsets{warnings};
BOOST_CHECK(offsets.Median() == 0s);
AddMulti(offsets, {{0s, -1s, -2s, -3s}});
@@ -50,7 +52,8 @@ BOOST_AUTO_TEST_CASE(timeoffsets)
static bool IsWarningRaised(const std::vector<std::chrono::seconds>& check_offsets)
{
- TimeOffsets offsets{};
+ node::Warnings warnings{};
+ TimeOffsets offsets{warnings};
AddMulti(offsets, check_offsets);
return offsets.WarnIfOutOfSync();
}
diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp
index 11d5a8ae55..34176626f0 100644
--- a/src/test/transaction_tests.cpp
+++ b/src/test/transaction_tests.cpp
@@ -72,17 +72,16 @@ static std::map<std::string, unsigned int> mapFlagNames = {
unsigned int ParseScriptFlags(std::string strFlags)
{
- if (strFlags.empty() || strFlags == "NONE") return 0;
- unsigned int flags = 0;
- std::vector<std::string> words = SplitString(strFlags, ',');
+ unsigned int flags = SCRIPT_VERIFY_NONE;
+ if (strFlags.empty() || strFlags == "NONE") return flags;
+ std::vector<std::string> words = SplitString(strFlags, ',');
for (const std::string& word : words)
{
if (!mapFlagNames.count(word))
BOOST_ERROR("Bad test: unknown verification flag '" << word << "'");
flags |= mapFlagNames[word];
}
-
return flags;
}
@@ -98,7 +97,7 @@ bool CheckMapFlagNames()
std::string FormatScriptFlags(unsigned int flags)
{
- if (flags == 0) {
+ if (flags == SCRIPT_VERIFY_NONE) {
return "";
}
std::string ret;
@@ -370,6 +369,41 @@ BOOST_AUTO_TEST_CASE(tx_invalid)
}
}
+BOOST_AUTO_TEST_CASE(tx_no_inputs)
+{
+ CMutableTransaction empty;
+
+ TxValidationState state;
+ BOOST_CHECK_MESSAGE(!CheckTransaction(CTransaction(empty), state), "Transaction with no inputs should be invalid.");
+ BOOST_CHECK(state.GetRejectReason() == "bad-txns-vin-empty");
+}
+
+BOOST_AUTO_TEST_CASE(tx_oversized)
+{
+ auto createTransaction =[](size_t payloadSize) {
+ CMutableTransaction tx;
+ tx.vin.resize(1);
+ tx.vout.emplace_back(1, CScript() << OP_RETURN << std::vector<unsigned char>(payloadSize));
+ return CTransaction(tx);
+ };
+ const auto maxTransactionSize = MAX_BLOCK_WEIGHT / WITNESS_SCALE_FACTOR;
+ const auto oversizedTransactionBaseSize = ::GetSerializeSize(TX_NO_WITNESS(createTransaction(maxTransactionSize))) - maxTransactionSize;
+
+ auto maxPayloadSize = maxTransactionSize - oversizedTransactionBaseSize;
+ {
+ TxValidationState state;
+ CheckTransaction(createTransaction(maxPayloadSize), state);
+ BOOST_CHECK(state.GetRejectReason() != "bad-txns-oversize");
+ }
+
+ maxPayloadSize += 1;
+ {
+ TxValidationState state;
+ BOOST_CHECK_MESSAGE(!CheckTransaction(createTransaction(maxPayloadSize), state), "Oversized transaction should be invalid");
+ BOOST_CHECK(state.GetRejectReason() == "bad-txns-oversize");
+ }
+}
+
BOOST_AUTO_TEST_CASE(basic_transaction_tests)
{
// Random real transaction (e2769b09e784f32f62ef849763d4f45b98e07ba658647343b915ff832b110436)
@@ -615,11 +649,11 @@ BOOST_AUTO_TEST_CASE(test_witness)
// Normal pay-to-compressed-pubkey.
CreateCreditAndSpend(keystore, scriptPubkey1, output1, input1);
CreateCreditAndSpend(keystore, scriptPubkey2, output2, input2);
- CheckWithFlag(output1, input1, 0, true);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
- CheckWithFlag(output1, input2, 0, false);
+ CheckWithFlag(output1, input2, SCRIPT_VERIFY_NONE, false);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
@@ -628,11 +662,11 @@ BOOST_AUTO_TEST_CASE(test_witness)
CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(scriptPubkey1)), output1, input1);
CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(scriptPubkey2)), output2, input2);
ReplaceRedeemScript(input2.vin[0].scriptSig, scriptPubkey1);
- CheckWithFlag(output1, input1, 0, true);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
- CheckWithFlag(output1, input2, 0, true);
+ CheckWithFlag(output1, input2, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
@@ -640,11 +674,11 @@ BOOST_AUTO_TEST_CASE(test_witness)
// Witness pay-to-compressed-pubkey (v0).
CreateCreditAndSpend(keystore, destination_script_1, output1, input1);
CreateCreditAndSpend(keystore, destination_script_2, output2, input2);
- CheckWithFlag(output1, input1, 0, true);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
- CheckWithFlag(output1, input2, 0, true);
+ CheckWithFlag(output1, input2, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
@@ -653,11 +687,11 @@ BOOST_AUTO_TEST_CASE(test_witness)
CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(destination_script_1)), output1, input1);
CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(destination_script_2)), output2, input2);
ReplaceRedeemScript(input2.vin[0].scriptSig, destination_script_1);
- CheckWithFlag(output1, input1, 0, true);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
- CheckWithFlag(output1, input2, 0, true);
+ CheckWithFlag(output1, input2, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
@@ -665,11 +699,11 @@ BOOST_AUTO_TEST_CASE(test_witness)
// Normal pay-to-uncompressed-pubkey.
CreateCreditAndSpend(keystore, scriptPubkey1L, output1, input1);
CreateCreditAndSpend(keystore, scriptPubkey2L, output2, input2);
- CheckWithFlag(output1, input1, 0, true);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
- CheckWithFlag(output1, input2, 0, false);
+ CheckWithFlag(output1, input2, SCRIPT_VERIFY_NONE, false);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
@@ -678,11 +712,11 @@ BOOST_AUTO_TEST_CASE(test_witness)
CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(scriptPubkey1L)), output1, input1);
CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(scriptPubkey2L)), output2, input2);
ReplaceRedeemScript(input2.vin[0].scriptSig, scriptPubkey1L);
- CheckWithFlag(output1, input1, 0, true);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, true);
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
- CheckWithFlag(output1, input2, 0, true);
+ CheckWithFlag(output1, input2, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, SCRIPT_VERIFY_WITNESS | SCRIPT_VERIFY_P2SH, false);
CheckWithFlag(output1, input2, STANDARD_SCRIPT_VERIFY_FLAGS, false);
@@ -697,19 +731,19 @@ BOOST_AUTO_TEST_CASE(test_witness)
// Normal 2-of-2 multisig
CreateCreditAndSpend(keystore, scriptMulti, output1, input1, false);
- CheckWithFlag(output1, input1, 0, false);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, false);
CreateCreditAndSpend(keystore2, scriptMulti, output2, input2, false);
- CheckWithFlag(output2, input2, 0, false);
+ CheckWithFlag(output2, input2, SCRIPT_VERIFY_NONE, false);
BOOST_CHECK(*output1 == *output2);
UpdateInput(input1.vin[0], CombineSignatures(input1, input2, output1));
CheckWithFlag(output1, input1, STANDARD_SCRIPT_VERIFY_FLAGS, true);
// P2SH 2-of-2 multisig
CreateCreditAndSpend(keystore, GetScriptForDestination(ScriptHash(scriptMulti)), output1, input1, false);
- CheckWithFlag(output1, input1, 0, true);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH, false);
CreateCreditAndSpend(keystore2, GetScriptForDestination(ScriptHash(scriptMulti)), output2, input2, false);
- CheckWithFlag(output2, input2, 0, true);
+ CheckWithFlag(output2, input2, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH, false);
BOOST_CHECK(*output1 == *output2);
UpdateInput(input1.vin[0], CombineSignatures(input1, input2, output1));
@@ -718,10 +752,10 @@ BOOST_AUTO_TEST_CASE(test_witness)
// Witness 2-of-2 multisig
CreateCreditAndSpend(keystore, destination_script_multi, output1, input1, false);
- CheckWithFlag(output1, input1, 0, true);
+ CheckWithFlag(output1, input1, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output1, input1, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
CreateCreditAndSpend(keystore2, destination_script_multi, output2, input2, false);
- CheckWithFlag(output2, input2, 0, true);
+ CheckWithFlag(output2, input2, SCRIPT_VERIFY_NONE, true);
CheckWithFlag(output2, input2, SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, false);
BOOST_CHECK(*output1 == *output2);
UpdateInput(input1.vin[0], CombineSignatures(input1, input2, output1));
diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp
index d5f3412aed..478121cc6f 100644
--- a/src/test/txpackage_tests.cpp
+++ b/src/test/txpackage_tests.cpp
@@ -6,6 +6,7 @@
#include <key_io.h>
#include <policy/packages.h>
#include <policy/policy.h>
+#include <policy/rbf.h>
#include <primitives/transaction.h>
#include <script/script.h>
#include <serialize.h>
@@ -938,4 +939,147 @@ BOOST_FIXTURE_TEST_CASE(package_cpfp_tests, TestChain100Setup)
BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
}
}
+
+BOOST_FIXTURE_TEST_CASE(package_rbf_tests, TestChain100Setup)
+{
+ mineBlocks(5);
+ LOCK(::cs_main);
+ size_t expected_pool_size = m_node.mempool->size();
+ CKey child_key{GenerateRandomKey()};
+ CScript parent_spk = GetScriptForDestination(WitnessV0KeyHash(child_key.GetPubKey()));
+ CKey grandchild_key{GenerateRandomKey()};
+ CScript child_spk = GetScriptForDestination(WitnessV0KeyHash(grandchild_key.GetPubKey()));
+
+ const CAmount coinbase_value{50 * COIN};
+ // Test that de-duplication works. This is not actually package rbf.
+ {
+ // 1 parent paying 200sat, 1 child paying 300sat
+ Package package1;
+ // 1 parent paying 200sat, 1 child paying 500sat
+ Package package2;
+ // Package1 and package2 have the same parent. The children conflict.
+ auto mtx_parent = CreateValidMempoolTransaction(/*input_transaction=*/m_coinbase_txns[0], /*input_vout=*/0,
+ /*input_height=*/0, /*input_signing_key=*/coinbaseKey,
+ /*output_destination=*/parent_spk,
+ /*output_amount=*/coinbase_value - low_fee_amt, /*submit=*/false);
+ CTransactionRef tx_parent = MakeTransactionRef(mtx_parent);
+ package1.push_back(tx_parent);
+ package2.push_back(tx_parent);
+
+ CTransactionRef tx_child_1 = MakeTransactionRef(CreateValidMempoolTransaction(tx_parent, 0, 101, child_key, child_spk, coinbase_value - low_fee_amt - 300, false));
+ package1.push_back(tx_child_1);
+ CTransactionRef tx_child_2 = MakeTransactionRef(CreateValidMempoolTransaction(tx_parent, 0, 101, child_key, child_spk, coinbase_value - low_fee_amt - 500, false));
+ package2.push_back(tx_child_2);
+
+ LOCK(m_node.mempool->cs);
+ const auto submit1 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package1, /*test_accept=*/false, std::nullopt);
+ if (auto err_1{CheckPackageMempoolAcceptResult(package1, submit1, /*expect_valid=*/true, m_node.mempool.get())}) {
+ BOOST_ERROR(err_1.value());
+ }
+
+ // Check precise ResultTypes and mempool size. We know it_parent_1 and it_child_1 exist from above call
+ auto it_parent_1 = submit1.m_tx_results.find(tx_parent->GetWitnessHash());
+ auto it_child_1 = submit1.m_tx_results.find(tx_child_1->GetWitnessHash());
+ BOOST_CHECK_EQUAL(it_parent_1->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK_EQUAL(it_child_1->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+ expected_pool_size += 2;
+ BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
+
+ const auto submit2 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package2, /*test_accept=*/false, std::nullopt);
+ if (auto err_2{CheckPackageMempoolAcceptResult(package2, submit2, /*expect_valid=*/true, m_node.mempool.get())}) {
+ BOOST_ERROR(err_2.value());
+ }
+
+ // Check precise ResultTypes and mempool size. We know it_parent_2 and it_child_2 exist from above call
+ auto it_parent_2 = submit2.m_tx_results.find(tx_parent->GetWitnessHash());
+ auto it_child_2 = submit2.m_tx_results.find(tx_child_2->GetWitnessHash());
+ BOOST_CHECK_EQUAL(it_parent_2->second.m_result_type, MempoolAcceptResult::ResultType::MEMPOOL_ENTRY);
+ BOOST_CHECK_EQUAL(it_child_2->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
+
+ // child1 has been replaced
+ BOOST_CHECK(!m_node.mempool->exists(GenTxid::Txid(tx_child_1->GetHash())));
+ }
+
+ // Test package rbf.
+ {
+ CTransactionRef tx_parent_1 = MakeTransactionRef(CreateValidMempoolTransaction(
+ m_coinbase_txns[1], /*input_vout=*/0, /*input_height=*/0,
+ coinbaseKey, parent_spk, coinbase_value - 200, /*submit=*/false));
+ CTransactionRef tx_child_1 = MakeTransactionRef(CreateValidMempoolTransaction(
+ tx_parent_1, /*input_vout=*/0, /*input_height=*/101,
+ child_key, child_spk, coinbase_value - 400, /*submit=*/false));
+
+ CTransactionRef tx_parent_2 = MakeTransactionRef(CreateValidMempoolTransaction(
+ m_coinbase_txns[1], /*input_vout=*/0, /*input_height=*/0,
+ coinbaseKey, parent_spk, coinbase_value - 800, /*submit=*/false));
+ CTransactionRef tx_child_2 = MakeTransactionRef(CreateValidMempoolTransaction(
+ tx_parent_2, /*input_vout=*/0, /*input_height=*/101,
+ child_key, child_spk, coinbase_value - 800 - 200, /*submit=*/false));
+
+ CTransactionRef tx_parent_3 = MakeTransactionRef(CreateValidMempoolTransaction(
+ m_coinbase_txns[1], /*input_vout=*/0, /*input_height=*/0,
+ coinbaseKey, parent_spk, coinbase_value - 199, /*submit=*/false));
+ CTransactionRef tx_child_3 = MakeTransactionRef(CreateValidMempoolTransaction(
+ tx_parent_3, /*input_vout=*/0, /*input_height=*/101,
+ child_key, child_spk, coinbase_value - 199 - 1300, /*submit=*/false));
+
+ // In all packages, the parents conflict with each other
+ BOOST_CHECK(tx_parent_1->GetHash() != tx_parent_2->GetHash() && tx_parent_2->GetHash() != tx_parent_3->GetHash());
+
+ // 1 parent paying 200sat, 1 child paying 200sat.
+ Package package1{tx_parent_1, tx_child_1};
+ // 1 parent paying 800sat, 1 child paying 200sat.
+ Package package2{tx_parent_2, tx_child_2};
+ // 1 parent paying 199sat, 1 child paying 1300sat.
+ Package package3{tx_parent_3, tx_child_3};
+
+ const auto submit1 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package1, false, std::nullopt);
+ if (auto err_1{CheckPackageMempoolAcceptResult(package1, submit1, /*expect_valid=*/true, m_node.mempool.get())}) {
+ BOOST_ERROR(err_1.value());
+ }
+ auto it_parent_1 = submit1.m_tx_results.find(tx_parent_1->GetWitnessHash());
+ auto it_child_1 = submit1.m_tx_results.find(tx_child_1->GetWitnessHash());
+ BOOST_CHECK_EQUAL(it_parent_1->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK_EQUAL(it_child_1->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+ expected_pool_size += 2;
+ BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
+
+ // This replacement is actually not package rbf; the parent carries enough fees
+ // to replace the entire package on its own.
+ const auto submit2 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package2, false, std::nullopt);
+ if (auto err_2{CheckPackageMempoolAcceptResult(package2, submit2, /*expect_valid=*/true, m_node.mempool.get())}) {
+ BOOST_ERROR(err_2.value());
+ }
+ auto it_parent_2 = submit2.m_tx_results.find(tx_parent_2->GetWitnessHash());
+ auto it_child_2 = submit2.m_tx_results.find(tx_child_2->GetWitnessHash());
+ BOOST_CHECK_EQUAL(it_parent_2->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK_EQUAL(it_child_2->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
+
+ // Package RBF, in which the replacement transaction's child sponsors the fees to meet RBF feerate rules
+ const auto submit3 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package3, false, std::nullopt);
+ if (auto err_3{CheckPackageMempoolAcceptResult(package3, submit3, /*expect_valid=*/true, m_node.mempool.get())}) {
+ BOOST_ERROR(err_3.value());
+ }
+ auto it_parent_3 = submit3.m_tx_results.find(tx_parent_3->GetWitnessHash());
+ auto it_child_3 = submit3.m_tx_results.find(tx_child_3->GetWitnessHash());
+ BOOST_CHECK_EQUAL(it_parent_3->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+ BOOST_CHECK_EQUAL(it_child_3->second.m_result_type, MempoolAcceptResult::ResultType::VALID);
+
+ // package3 was considered as a package to replace both package2 transactions
+ BOOST_CHECK(it_parent_3->second.m_replaced_transactions.size() == 2);
+ BOOST_CHECK(it_child_3->second.m_replaced_transactions.empty());
+
+ std::vector<Wtxid> expected_package3_wtxids({tx_parent_3->GetWitnessHash(), tx_child_3->GetWitnessHash()});
+ const auto package3_total_vsize{GetVirtualTransactionSize(*tx_parent_3) + GetVirtualTransactionSize(*tx_child_3)};
+ BOOST_CHECK(it_parent_3->second.m_wtxids_fee_calculations.value() == expected_package3_wtxids);
+ BOOST_CHECK(it_child_3->second.m_wtxids_fee_calculations.value() == expected_package3_wtxids);
+ BOOST_CHECK_EQUAL(it_parent_3->second.m_effective_feerate.value().GetFee(package3_total_vsize), 199 + 1300);
+ BOOST_CHECK_EQUAL(it_child_3->second.m_effective_feerate.value().GetFee(package3_total_vsize), 199 + 1300);
+
+ BOOST_CHECK_EQUAL(m_node.mempool->size(), expected_pool_size);
+ }
+
+}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 9e9db32351..cc7b2d6546 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -31,6 +31,7 @@
#include <node/miner.h>
#include <node/peerman_args.h>
#include <node/validation_cache_args.h>
+#include <node/warnings.h>
#include <noui.h>
#include <policy/fees.h>
#include <policy/fees_args.h>
@@ -182,6 +183,7 @@ BasicTestingSetup::BasicTestingSetup(const ChainType chainType, const std::vecto
InitLogging(*m_node.args);
AppInitParameterInteraction(*m_node.args);
LogInstance().StartLogging();
+ m_node.warnings = std::make_unique<node::Warnings>();
m_node.kernel = std::make_unique<kernel::Context>();
m_node.ecc_context = std::make_unique<ECC_Context>();
SetupEnvironment();
@@ -230,10 +232,11 @@ ChainTestingSetup::ChainTestingSetup(const ChainType chainType, const std::vecto
bilingual_str error{};
m_node.mempool = std::make_unique<CTxMemPool>(MemPoolOptionsForTest(m_node), error);
Assert(error.empty());
+ m_node.warnings = std::make_unique<node::Warnings>();
m_cache_sizes = CalculateCacheSizes(m_args);
- m_node.notifications = std::make_unique<KernelNotifications>(*Assert(m_node.shutdown), m_node.exit_status);
+ m_node.notifications = std::make_unique<KernelNotifications>(*Assert(m_node.shutdown), m_node.exit_status, *Assert(m_node.warnings));
const ChainstateManager::Options chainman_opts{
.chainparams = chainparams,
@@ -322,7 +325,8 @@ TestingSetup::TestingSetup(
peerman_opts.deterministic_rng = true;
m_node.peerman = PeerManager::make(*m_node.connman, *m_node.addrman,
m_node.banman.get(), *m_node.chainman,
- *m_node.mempool, peerman_opts);
+ *m_node.mempool, *m_node.warnings,
+ peerman_opts);
{
CConnman::Options options;
@@ -370,7 +374,8 @@ CBlock TestChain100Setup::CreateBlock(
const CScript& scriptPubKey,
Chainstate& chainstate)
{
- CBlock block = BlockAssembler{chainstate, nullptr}.CreateNewBlock(scriptPubKey)->block;
+ BlockAssembler::Options options;
+ CBlock block = BlockAssembler{chainstate, nullptr, options}.CreateNewBlock(scriptPubKey)->block;
Assert(block.vtx.size() == 1);
for (const CMutableTransaction& tx : txns) {
diff --git a/src/test/util/txmempool.cpp b/src/test/util/txmempool.cpp
index 249ce9503c..94d50bba50 100644
--- a/src/test/util/txmempool.cpp
+++ b/src/test/util/txmempool.cpp
@@ -7,6 +7,7 @@
#include <chainparams.h>
#include <node/context.h>
#include <node/mempool_args.h>
+#include <policy/rbf.h>
#include <policy/v3_policy.h>
#include <txmempool.h>
#include <util/check.h>
@@ -68,6 +69,28 @@ std::optional<std::string> CheckPackageMempoolAcceptResult(const Package& txns,
return strprintf("tx %s unexpectedly failed: %s", wtxid.ToString(), atmp_result.m_state.ToString());
}
+ // Each subpackage is allowed MAX_REPLACEMENT_CANDIDATES replacements (only checking individually here)
+ if (atmp_result.m_replaced_transactions.size() > MAX_REPLACEMENT_CANDIDATES) {
+ return strprintf("tx %s result replaced too many transactions",
+ wtxid.ToString());
+ }
+
+ // Replacements can't happen for subpackages larger than 2
+ if (!atmp_result.m_replaced_transactions.empty() &&
+ atmp_result.m_wtxids_fee_calculations.has_value() && atmp_result.m_wtxids_fee_calculations.value().size() > 2) {
+ return strprintf("tx %s was part of a too-large package RBF subpackage",
+ wtxid.ToString());
+ }
+
+ if (!atmp_result.m_replaced_transactions.empty() && mempool) {
+ LOCK(mempool->cs);
+ // If replacements occurred and it used 2 transactions, this is a package RBF and should result in a cluster of size 2
+ if (atmp_result.m_wtxids_fee_calculations.has_value() && atmp_result.m_wtxids_fee_calculations.value().size() == 2) {
+ const auto cluster = mempool->GatherClusters({tx->GetHash()});
+ if (cluster.size() != 2) return strprintf("tx %s has too many ancestors or descendants for a package rbf", wtxid.ToString());
+ }
+ }
+
// m_vsize and m_base_fees should exist iff the result was VALID or MEMPOOL_ENTRY
const bool mempool_entry{atmp_result.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY};
if (atmp_result.m_base_fees.has_value() != (valid || mempool_entry)) {
@@ -108,6 +131,11 @@ std::optional<std::string> CheckPackageMempoolAcceptResult(const Package& txns,
return strprintf("wtxid %s should not be in mempool", wtxid.ToString());
}
}
+ for (const auto& tx_ref : atmp_result.m_replaced_transactions) {
+ if (mempool->exists(GenTxid::Txid(tx_ref->GetHash()))) {
+ return strprintf("tx %s should not be in mempool as it was replaced", tx_ref->GetWitnessHash().ToString());
+ }
+ }
}
}
return std::nullopt;
diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp
index 69f4e305ab..588ac60498 100644
--- a/src/test/validation_block_tests.cpp
+++ b/src/test/validation_block_tests.cpp
@@ -65,7 +65,8 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash)
static int i = 0;
static uint64_t time = Params().GenesisBlock().nTime;
- auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(CScript{} << i++ << OP_TRUE);
+ BlockAssembler::Options options;
+ auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get(), options}.CreateNewBlock(CScript{} << i++ << OP_TRUE);
auto pblock = std::make_shared<CBlock>(ptemplate->block);
pblock->hashPrevBlock = prev_hash;
pblock->nTime = ++time;
@@ -329,7 +330,8 @@ BOOST_AUTO_TEST_CASE(witness_commitment_index)
LOCK(Assert(m_node.chainman)->GetMutex());
CScript pubKey;
pubKey << 1 << OP_TRUE;
- auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get()}.CreateNewBlock(pubKey);
+ BlockAssembler::Options options;
+ auto ptemplate = BlockAssembler{m_node.chainman->ActiveChainstate(), m_node.mempool.get(), options}.CreateNewBlock(pubKey);
CBlock pblock = ptemplate->block;
CTxOut witness;
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 1f6b7368a2..1641c4cd22 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -378,7 +378,7 @@ struct SnapshotTestSetup : TestChain100Setup {
LOCK(::cs_main);
chainman.ResetChainstates();
BOOST_CHECK_EQUAL(chainman.GetAll().size(), 0);
- m_node.notifications = std::make_unique<KernelNotifications>(*Assert(m_node.shutdown), m_node.exit_status);
+ m_node.notifications = std::make_unique<KernelNotifications>(*Assert(m_node.shutdown), m_node.exit_status, *Assert(m_node.warnings));
const ChainstateManager::Options chainman_opts{
.chainparams = ::Params(),
.datadir = chainman.m_options.datadir,
diff --git a/src/util/fs_helpers.cpp b/src/util/fs_helpers.cpp
index 8952f20f79..41c8fe3b8f 100644
--- a/src/util/fs_helpers.cpp
+++ b/src/util/fs_helpers.cpp
@@ -16,6 +16,7 @@
#include <fstream>
#include <map>
#include <memory>
+#include <optional>
#include <string>
#include <system_error>
#include <utility>
@@ -269,3 +270,42 @@ bool TryCreateDirectories(const fs::path& p)
// create_directories didn't create the directory, it had to have existed already
return false;
}
+
+std::string PermsToSymbolicString(fs::perms p)
+{
+ std::string perm_str(9, '-');
+
+ auto set_perm = [&](size_t pos, fs::perms required_perm, char letter) {
+ if ((p & required_perm) != fs::perms::none) {
+ perm_str[pos] = letter;
+ }
+ };
+
+ set_perm(0, fs::perms::owner_read, 'r');
+ set_perm(1, fs::perms::owner_write, 'w');
+ set_perm(2, fs::perms::owner_exec, 'x');
+ set_perm(3, fs::perms::group_read, 'r');
+ set_perm(4, fs::perms::group_write, 'w');
+ set_perm(5, fs::perms::group_exec, 'x');
+ set_perm(6, fs::perms::others_read, 'r');
+ set_perm(7, fs::perms::others_write, 'w');
+ set_perm(8, fs::perms::others_exec, 'x');
+
+ return perm_str;
+}
+
+std::optional<fs::perms> InterpretPermString(const std::string& s)
+{
+ if (s == "owner") {
+ return fs::perms::owner_read | fs::perms::owner_write;
+ } else if (s == "group") {
+ return fs::perms::owner_read | fs::perms::owner_write |
+ fs::perms::group_read;
+ } else if (s == "all") {
+ return fs::perms::owner_read | fs::perms::owner_write |
+ fs::perms::group_read |
+ fs::perms::others_read;
+ } else {
+ return std::nullopt;
+ }
+}
diff --git a/src/util/fs_helpers.h b/src/util/fs_helpers.h
index ea3778eac3..28dd6d979d 100644
--- a/src/util/fs_helpers.h
+++ b/src/util/fs_helpers.h
@@ -12,6 +12,7 @@
#include <cstdio>
#include <iosfwd>
#include <limits>
+#include <optional>
/**
* Ensure file contents are fully committed to disk, using a platform-specific
@@ -62,6 +63,19 @@ void ReleaseDirectoryLocks();
bool TryCreateDirectories(const fs::path& p);
fs::path GetDefaultDataDir();
+/** Convert fs::perms to symbolic string of the form 'rwxrwxrwx'
+ *
+ * @param[in] p the perms to be converted
+ * @return Symbolic permissions string
+ */
+std::string PermsToSymbolicString(fs::perms p);
+/** Interpret a custom permissions level string as fs::perms
+ *
+ * @param[in] s Permission level string
+ * @return Permissions as fs::perms
+ */
+std::optional<fs::perms> InterpretPermString(const std::string& s);
+
#ifdef WIN32
fs::path GetSpecialFolderPath(int nFolder, bool fCreate = true);
#endif
diff --git a/src/validation.cpp b/src/validation.cpp
index e066ee16cb..3e9ba08bb1 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -27,14 +27,15 @@
#include <kernel/mempool_entry.h>
#include <kernel/messagestartchars.h>
#include <kernel/notifications_interface.h>
+#include <kernel/warning.h>
#include <logging.h>
#include <logging/timer.h>
#include <node/blockstorage.h>
#include <node/utxo_snapshot.h>
-#include <policy/v3_policy.h>
#include <policy/policy.h>
#include <policy/rbf.h>
#include <policy/settings.h>
+#include <policy/v3_policy.h>
#include <pow.h>
#include <primitives/block.h>
#include <primitives/transaction.h>
@@ -57,11 +58,11 @@
#include <util/result.h>
#include <util/signalinterrupt.h>
#include <util/strencodings.h>
+#include <util/string.h>
#include <util/time.h>
#include <util/trace.h>
#include <util/translation.h>
#include <validationinterface.h>
-#include <warnings.h>
#include <algorithm>
#include <cassert>
@@ -524,7 +525,7 @@ public:
/* m_bypass_limits */ false,
/* m_coins_to_uncache */ coins_to_uncache,
/* m_test_accept */ false,
- /* m_allow_replacement */ false,
+ /* m_allow_replacement */ true,
/* m_allow_sibling_eviction */ false,
/* m_package_submission */ true,
/* m_package_feerates */ true,
@@ -602,8 +603,8 @@ public:
/**
* Submission of a subpackage.
* If subpackage size == 1, calls AcceptSingleTransaction() with adjusted ATMPArgs to avoid
- * package policy restrictions like no CPFP carve out (PackageMempoolChecks) and disabled RBF
- * (m_allow_replacement), and creates a PackageMempoolAcceptResult wrapping the result.
+ * package policy restrictions like no CPFP carve out (PackageMempoolChecks)
+ * and creates a PackageMempoolAcceptResult wrapping the result.
*
* If subpackage size > 1, calls AcceptMultipleTransactions() with the provided ATMPArgs.
*
@@ -666,12 +667,13 @@ private:
// only tests that are fast should be done here (to avoid CPU DoS).
bool PreChecks(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
- // Run checks for mempool replace-by-fee.
+ // Run checks for mempool replace-by-fee, only used in AcceptSingleTransaction.
bool ReplacementChecks(Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
// Enforce package mempool ancestor/descendant limits (distinct from individual
- // ancestor/descendant limits done in PreChecks).
+ // ancestor/descendant limits done in PreChecks) and run Package RBF checks.
bool PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
+ std::vector<Workspace>& workspaces,
int64_t total_vsize,
PackageValidationState& package_state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs);
@@ -949,7 +951,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
ws.m_iters_conflicting = m_pool.GetIterSet(ws.m_conflicts);
// Note that these modifications are only applicable to single transaction scenarios;
- // carve-outs and package RBF are disabled for multi-transaction evaluations.
+ // carve-outs are disabled for multi-transaction evaluations.
CTxMemPool::Limits maybe_rbf_limits = m_pool.m_opts.limits;
// Calculate in-mempool ancestors, up to a limit.
@@ -1088,10 +1090,9 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws)
// descendant transaction of a direct conflict to pay a higher feerate than the transaction that
// might replace them, under these rules.
if (const auto err_string{PaysMoreThanConflicts(ws.m_iters_conflicting, newFeeRate, hash)}) {
- // Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not
- // TX_RECONSIDERABLE, because it cannot be bypassed using package validation.
- // This must be changed if package RBF is enabled.
- return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
+ // This fee-related failure is TX_RECONSIDERABLE because validating in a package may change
+ // the result.
+ return state.Invalid(TxValidationResult::TX_RECONSIDERABLE,
strprintf("insufficient fee%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
@@ -1116,16 +1117,15 @@ bool MemPoolAccept::ReplacementChecks(Workspace& ws)
}
if (const auto err_string{PaysForRBF(m_subpackage.m_conflicting_fees, ws.m_modified_fees, ws.m_vsize,
m_pool.m_opts.incremental_relay_feerate, hash)}) {
- // Even though this is a fee-related failure, this result is TX_MEMPOOL_POLICY, not
- // TX_RECONSIDERABLE, because it cannot be bypassed using package validation.
- // This must be changed if package RBF is enabled.
- return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY,
+ // Result may change in a package context
+ return state.Invalid(TxValidationResult::TX_RECONSIDERABLE,
strprintf("insufficient fee%s", ws.m_sibling_eviction ? " (including sibling eviction)" : ""), *err_string);
}
return true;
}
bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txns,
+ std::vector<Workspace>& workspaces,
const int64_t total_vsize,
PackageValidationState& package_state)
{
@@ -1136,12 +1136,88 @@ bool MemPoolAccept::PackageMempoolChecks(const std::vector<CTransactionRef>& txn
assert(std::all_of(txns.cbegin(), txns.cend(), [this](const auto& tx)
{ return !m_pool.exists(GenTxid::Txid(tx->GetHash()));}));
+ assert(txns.size() == workspaces.size());
+
auto result = m_pool.CheckPackageLimits(txns, total_vsize);
if (!result) {
// This is a package-wide error, separate from an individual transaction error.
return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-mempool-limits", util::ErrorString(result).original);
}
- return true;
+
+ // No conflicts means we're finished. Further checks are all RBF-only.
+ if (!m_subpackage.m_rbf) return true;
+
+ // We're in package RBF context; replacement proposal must be size 2
+ if (workspaces.size() != 2 || !Assume(IsChildWithParents(txns))) {
+ return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package RBF failed: package must be 1-parent-1-child");
+ }
+
+ // If the package has in-mempool ancestors, we won't consider a package RBF
+ // since it would result in a cluster larger than 2.
+ // N.B. To relax this constraint we will need to revisit how CCoinsViewMemPool::PackageAddTransaction
+ // is being used inside AcceptMultipleTransactions to track available inputs while processing a package.
+ for (const auto& ws : workspaces) {
+ if (!ws.m_ancestors.empty()) {
+ return package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package RBF failed: new transaction cannot have mempool ancestors");
+ }
+ }
+
+ // Aggregate all conflicts into one set.
+ CTxMemPool::setEntries direct_conflict_iters;
+ for (Workspace& ws : workspaces) {
+ // Aggregate all conflicts into one set.
+ direct_conflict_iters.merge(ws.m_iters_conflicting);
+ }
+
+ const auto& parent_ws = workspaces[0];
+ const auto& child_ws = workspaces[1];
+
+ // Don't consider replacements that would cause us to remove a large number of mempool entries.
+ // This limit is not increased in a package RBF. Use the aggregate number of transactions.
+ if (const auto err_string{GetEntriesForConflicts(*child_ws.m_ptx, m_pool, direct_conflict_iters,
+ m_subpackage.m_all_conflicts)}) {
+ return package_state.Invalid(PackageValidationResult::PCKG_POLICY,
+ "package RBF failed: too many potential replacements", *err_string);
+ }
+
+ for (CTxMemPool::txiter it : m_subpackage.m_all_conflicts) {
+ m_subpackage.m_conflicting_fees += it->GetModifiedFee();
+ m_subpackage.m_conflicting_size += it->GetTxSize();
+ }
+
+ // Use the child as the transaction for attributing errors to.
+ const Txid& child_hash = child_ws.m_ptx->GetHash();
+ if (const auto err_string{PaysForRBF(/*original_fees=*/m_subpackage.m_conflicting_fees,
+ /*replacement_fees=*/m_subpackage.m_total_modified_fees,
+ /*replacement_vsize=*/m_subpackage.m_total_vsize,
+ m_pool.m_opts.incremental_relay_feerate, child_hash)}) {
+ return package_state.Invalid(PackageValidationResult::PCKG_POLICY,
+ "package RBF failed: insufficient anti-DoS fees", *err_string);
+ }
+
+ // Ensure this two transaction package is a "chunk" on its own; we don't want the child
+ // to be only paying anti-DoS fees
+ const CFeeRate parent_feerate(parent_ws.m_modified_fees, parent_ws.m_vsize);
+ const CFeeRate package_feerate(m_subpackage.m_total_modified_fees, m_subpackage.m_total_vsize);
+ if (package_feerate <= parent_feerate) {
+ return package_state.Invalid(PackageValidationResult::PCKG_POLICY,
+ "package RBF failed: package feerate is less than parent feerate",
+ strprintf("package feerate %s <= parent feerate is %s", package_feerate.ToString(), parent_feerate.ToString()));
+ }
+
+ // Check if it's economically rational to mine this package rather than the ones it replaces.
+ // This takes the place of ReplacementChecks()'s PaysMoreThanConflicts() in the package RBF setting.
+ if (const auto err_tup{ImprovesFeerateDiagram(m_pool, direct_conflict_iters, m_subpackage.m_all_conflicts, m_subpackage.m_total_modified_fees, m_subpackage.m_total_vsize)}) {
+ return package_state.Invalid(PackageValidationResult::PCKG_POLICY,
+ "package RBF failed: " + err_tup.value().second, "");
+ }
+
+ LogPrint(BCLog::TXPACKAGES, "package RBF checks passed: parent %s (wtxid=%s), child %s (wtxid=%s)\n",
+ txns.front()->GetHash().ToString(), txns.front()->GetWitnessHash().ToString(),
+ txns.back()->GetHash().ToString(), txns.back()->GetWitnessHash().ToString());
+
+
+ return true;
}
bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws)
@@ -1215,16 +1291,19 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
const bool bypass_limits = args.m_bypass_limits;
std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
+ if (!m_subpackage.m_all_conflicts.empty()) Assume(args.m_allow_replacement);
// Remove conflicting transactions from the mempool
for (CTxMemPool::txiter it : m_subpackage.m_all_conflicts)
{
- LogPrint(BCLog::MEMPOOL, "replacing tx %s (wtxid=%s) with %s (wtxid=%s) for %s additional fees, %d delta bytes\n",
+ LogPrint(BCLog::MEMPOOL, "replacing mempool tx %s (wtxid=%s, fees=%s, vsize=%s). New tx %s (wtxid=%s, fees=%s, vsize=%s)\n",
it->GetTx().GetHash().ToString(),
it->GetTx().GetWitnessHash().ToString(),
+ it->GetFee(),
+ it->GetTxSize(),
hash.ToString(),
tx.GetWitnessHash().ToString(),
- FormatMoney(ws.m_modified_fees - m_subpackage.m_conflicting_fees),
- (int)entry->GetTxSize() - (int)m_subpackage.m_conflicting_size);
+ entry->GetFee(),
+ entry->GetTxSize());
TRACE7(mempool, replaced,
it->GetTx().GetHash().data(),
it->GetTxSize(),
@@ -1318,6 +1397,13 @@ bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>&
std::transform(workspaces.cbegin(), workspaces.cend(), std::back_inserter(all_package_wtxids),
[](const auto& ws) { return ws.m_ptx->GetWitnessHash(); });
+ if (!m_subpackage.m_replaced_transactions.empty()) {
+ LogPrint(BCLog::MEMPOOL, "replaced %u mempool transactions with %u new one(s) for %s additional fees, %d delta bytes\n",
+ m_subpackage.m_replaced_transactions.size(), workspaces.size(),
+ m_subpackage.m_total_modified_fees - m_subpackage.m_conflicting_fees,
+ m_subpackage.m_total_vsize - static_cast<int>(m_subpackage.m_conflicting_size));
+ }
+
// Add successful results. The returned results may change later if LimitMempoolSize() evicts them.
for (Workspace& ws : workspaces) {
const auto effective_feerate = args.m_package_feerates ? ws.m_package_feerate :
@@ -1361,7 +1447,13 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef
return MempoolAcceptResult::Failure(ws.m_state);
}
- if (m_subpackage.m_rbf && !ReplacementChecks(ws)) return MempoolAcceptResult::Failure(ws.m_state);
+ if (m_subpackage.m_rbf && !ReplacementChecks(ws)) {
+ if (ws.m_state.GetResult() == TxValidationResult::TX_RECONSIDERABLE) {
+ // Failed for incentives-based fee reasons. Provide the effective feerate and which tx was included.
+ return MempoolAcceptResult::FeeFailure(ws.m_state, CFeeRate(ws.m_modified_fees, ws.m_vsize), single_wtxid);
+ }
+ return MempoolAcceptResult::Failure(ws.m_state);
+ }
// Perform the inexpensive checks first and avoid hashing and signature verification unless
// those checks pass, to mitigate CPU exhaustion denial-of-service attacks.
@@ -1393,6 +1485,13 @@ MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef
m_pool.m_opts.signals->TransactionAddedToMempool(tx_info, m_pool.GetAndIncrementSequence());
}
+ if (!m_subpackage.m_replaced_transactions.empty()) {
+ LogPrint(BCLog::MEMPOOL, "replaced %u mempool transactions with 1 new transaction for %s additional fees, %d delta bytes\n",
+ m_subpackage.m_replaced_transactions.size(),
+ ws.m_modified_fees - m_subpackage.m_conflicting_fees,
+ ws.m_vsize - static_cast<int>(m_subpackage.m_conflicting_size));
+ }
+
return MempoolAcceptResult::Success(std::move(m_subpackage.m_replaced_transactions), ws.m_vsize, ws.m_base_fees,
effective_feerate, single_wtxid);
}
@@ -1434,11 +1533,14 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
}
// Make the coins created by this transaction available for subsequent transactions in the
- // package to spend. Since we already checked conflicts in the package and we don't allow
- // replacements, we don't need to track the coins spent. Note that this logic will need to be
- // updated if package replace-by-fee is allowed in the future.
- assert(!args.m_allow_replacement);
- assert(!m_subpackage.m_rbf);
+ // package to spend. If there are no conflicts within the package, no transaction can spend a coin
+ // needed by another transaction in the package. We also need to make sure that no package
+ // tx replaces (or replaces the ancestor of) the parent of another package tx. As long as we
+ // check these two things, we don't need to track the coins spent.
+ // If a package tx conflicts with a mempool tx, PackageMempoolChecks() ensures later that any package RBF attempt
+ // has *no* in-mempool ancestors, so we don't have to worry about subsequent transactions in
+ // same package spending the same in-mempool outpoints. This needs to be revisited for general
+ // package RBF.
m_viewmempool.PackageAddTransaction(ws.m_ptx);
}
@@ -1479,7 +1581,7 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std::
// Apply package mempool ancestor/descendant limits. Skip if there is only one transaction,
// because it's unnecessary.
- if (txns.size() > 1 && !PackageMempoolChecks(txns, m_subpackage.m_total_vsize, package_state)) {
+ if (txns.size() > 1 && !PackageMempoolChecks(txns, workspaces, m_subpackage.m_total_vsize, package_state)) {
return PackageMempoolAcceptResult(package_state, std::move(results));
}
@@ -1747,7 +1849,6 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package,
MempoolAcceptResult AcceptToMemoryPool(Chainstate& active_chainstate, const CTransactionRef& tx,
int64_t accept_time, bool bypass_limits, bool test_accept)
- EXCLUSIVE_LOCKS_REQUIRED(::cs_main)
{
AssertLockHeld(::cs_main);
const CChainParams& chainparams{active_chainstate.m_chainman.GetParams()};
@@ -1921,9 +2022,11 @@ void Chainstate::CheckForkWarningConditions()
if (m_chainman.m_best_invalid && m_chainman.m_best_invalid->nChainWork > m_chain.Tip()->nChainWork + (GetBlockProof(*m_chain.Tip()) * 6)) {
LogPrintf("%s: Warning: Found invalid chain at least ~6 blocks longer than our best chain.\nChain state database corruption likely.\n", __func__);
- SetfLargeWorkInvalidChainFound(true);
+ m_chainman.GetNotifications().warningSet(
+ kernel::Warning::LARGE_WORK_INVALID_CHAIN,
+ _("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade."));
} else {
- SetfLargeWorkInvalidChainFound(false);
+ m_chainman.GetNotifications().warningUnset(kernel::Warning::LARGE_WORK_INVALID_CHAIN);
}
}
@@ -2847,13 +2950,6 @@ void Chainstate::PruneAndFlush()
}
}
-/** Private helper function that concatenates warning messages. */
-static void AppendWarning(bilingual_str& res, const bilingual_str& warn)
-{
- if (!res.empty()) res += Untranslated(", ");
- res += warn;
-}
-
static void UpdateTipLog(
const CCoinsViewCache& coins_tip,
const CBlockIndex* tip,
@@ -2904,7 +3000,7 @@ void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
g_best_block_cv.notify_all();
}
- bilingual_str warning_messages;
+ std::vector<bilingual_str> warning_messages;
if (!m_chainman.IsInitialBlockDownload()) {
const CBlockIndex* pindex = pindexNew;
for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) {
@@ -2913,14 +3009,15 @@ void Chainstate::UpdateTip(const CBlockIndex* pindexNew)
if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) {
const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit);
if (state == ThresholdState::ACTIVE) {
- m_chainman.GetNotifications().warning(warning);
+ m_chainman.GetNotifications().warningSet(kernel::Warning::UNKNOWN_NEW_RULES_ACTIVATED, warning);
} else {
- AppendWarning(warning_messages, warning);
+ warning_messages.push_back(warning);
}
}
}
}
- UpdateTipLog(coins_tip, pindexNew, params, __func__, "", warning_messages.original);
+ UpdateTipLog(coins_tip, pindexNew, params, __func__, "",
+ util::Join(warning_messages, Untranslated(", ")).original);
}
/** Disconnect m_chain's tip.
@@ -5967,8 +6064,8 @@ SnapshotCompletionResult ChainstateManager::MaybeCompleteSnapshotValidation()
PACKAGE_NAME, snapshot_tip_height, snapshot_base_height, snapshot_base_height, PACKAGE_BUGREPORT
);
- LogPrintf("[snapshot] !!! %s\n", user_error.original);
- LogPrintf("[snapshot] deleting snapshot, reverting to validated chain, and stopping node\n");
+ LogError("[snapshot] !!! %s\n", user_error.original);
+ LogError("[snapshot] deleting snapshot, reverting to validated chain, and stopping node\n");
m_active_chainstate = m_ibd_chainstate.get();
m_snapshot_chainstate->m_disabled = true;
@@ -6320,7 +6417,7 @@ bool ChainstateManager::ValidatedSnapshotCleanup()
fs::path p_old,
fs::path p_new,
const fs::filesystem_error& err) {
- LogPrintf("Error renaming path (%s) -> (%s): %s\n",
+ LogError("[snapshot] Error renaming path (%s) -> (%s): %s\n",
fs::PathToString(p_old), fs::PathToString(p_new), err.what());
GetNotifications().fatalError(strprintf(_(
"Rename of '%s' -> '%s' failed. "
diff --git a/src/wallet/feebumper.cpp b/src/wallet/feebumper.cpp
index 1288d3b418..3184d0f3b0 100644
--- a/src/wallet/feebumper.cpp
+++ b/src/wallet/feebumper.cpp
@@ -93,7 +93,7 @@ static feebumper::Result CheckFeeRate(const CWallet& wallet, const CMutableTrans
}
CAmount new_total_fee = newFeerate.GetFee(maxTxSize) + combined_bump_fee.value();
- CFeeRate incrementalRelayFee = std::max(wallet.chain().relayIncrementalFee(), CFeeRate(WALLET_INCREMENTAL_RELAY_FEE));
+ CFeeRate incrementalRelayFee = wallet.chain().relayIncrementalFee();
// Min total fee is old fee + relay fee
CAmount minTotalFee = old_fee + incrementalRelayFee.GetFee(maxTxSize);
diff --git a/src/wallet/migrate.cpp b/src/wallet/migrate.cpp
index 09254a76ad..d7d8577374 100644
--- a/src/wallet/migrate.cpp
+++ b/src/wallet/migrate.cpp
@@ -551,7 +551,7 @@ void BerkeleyRODatabase::Open()
// }
// Check the last page number
- uint32_t expected_last_page = (size / page_size) - 1;
+ uint32_t expected_last_page{uint32_t((size / page_size) - 1)};
if (outer_meta.last_page != expected_last_page) {
throw std::runtime_error("Last page number could not fit in file");
}
diff --git a/src/wallet/rpc/util.cpp b/src/wallet/rpc/util.cpp
index eb23c4555b..67b5ae0fe2 100644
--- a/src/wallet/rpc/util.cpp
+++ b/src/wallet/rpc/util.cpp
@@ -179,7 +179,7 @@ void HandleWalletError(const std::shared_ptr<CWallet> wallet, DatabaseStatus& st
}
}
-void AppendLastProcessedBlock(UniValue& entry, const CWallet& wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet)
+void AppendLastProcessedBlock(UniValue& entry, const CWallet& wallet)
{
AssertLockHeld(wallet.cs_wallet);
UniValue lastprocessedblock{UniValue::VOBJ};
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index 4cbcfdb60f..0a59353052 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -260,7 +260,7 @@ static OutputType GetOutputType(TxoutType type, bool is_from_p2sh)
// Fetch and validate the coin control selected inputs.
// Coins could be internal (from the wallet) or external.
util::Result<PreSelectedInputs> FetchSelectedInputs(const CWallet& wallet, const CCoinControl& coin_control,
- const CoinSelectionParams& coin_selection_params) EXCLUSIVE_LOCKS_REQUIRED(wallet.cs_wallet)
+ const CoinSelectionParams& coin_selection_params)
{
PreSelectedInputs result;
const bool can_grind_r = wallet.CanGrindR();
@@ -799,6 +799,13 @@ util::Result<SelectionResult> SelectCoins(const CWallet& wallet, CoinsResult& av
op_selection_result->RecalculateWaste(coin_selection_params.min_viable_change,
coin_selection_params.m_cost_of_change,
coin_selection_params.m_change_fee);
+
+ // Verify we haven't exceeded the maximum allowed weight
+ int max_inputs_weight = MAX_STANDARD_TX_WEIGHT - (coin_selection_params.tx_noinputs_size * WITNESS_SCALE_FACTOR);
+ if (op_selection_result->GetWeight() > max_inputs_weight) {
+ return util::Error{_("The combination of the pre-selected inputs and the wallet automatic inputs selection exceeds the transaction maximum weight. "
+ "Please try sending a smaller amount or manually consolidating your wallet's UTXOs")};
+ }
}
return op_selection_result;
}
@@ -976,6 +983,16 @@ static void DiscourageFeeSniping(CMutableTransaction& tx, FastRandomContext& rng
}
}
+size_t GetSerializeSizeForRecipient(const CRecipient& recipient)
+{
+ return ::GetSerializeSize(CTxOut(recipient.nAmount, GetScriptForDestination(recipient.dest)));
+}
+
+bool IsDust(const CRecipient& recipient, const CFeeRate& dustRelayFee)
+{
+ return ::IsDust(CTxOut(recipient.nAmount, GetScriptForDestination(recipient.dest)), dustRelayFee);
+}
+
static util::Result<CreatedTransactionResult> CreateTransactionInternal(
CWallet& wallet,
const std::vector<CRecipient>& vecSend,
@@ -998,12 +1015,20 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal(
// Set the long term feerate estimate to the wallet's consolidate feerate
coin_selection_params.m_long_term_feerate = wallet.m_consolidate_feerate;
+ // Static vsize overhead + outputs vsize. 4 nVersion, 4 nLocktime, 1 input count, 1 witness overhead (dummy, flag, stack size)
+ coin_selection_params.tx_noinputs_size = 10 + GetSizeOfCompactSize(vecSend.size()); // bytes for output count
CAmount recipients_sum = 0;
const OutputType change_type = wallet.TransactionChangeType(coin_control.m_change_type ? *coin_control.m_change_type : wallet.m_default_change_type, vecSend);
ReserveDestination reservedest(&wallet, change_type);
unsigned int outputs_to_subtract_fee_from = 0; // The number of outputs which we are subtracting the fee from
for (const auto& recipient : vecSend) {
+ if (IsDust(recipient, wallet.chain().relayDustFee())) {
+ return util::Error{_("Transaction amount too small")};
+ }
+
+ // Include the fee cost for outputs.
+ coin_selection_params.tx_noinputs_size += GetSerializeSizeForRecipient(recipient);
recipients_sum += recipient.nAmount;
if (recipient.fSubtractFeeFromAmount) {
@@ -1088,23 +1113,6 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal(
const auto change_spend_fee = coin_selection_params.m_discard_feerate.GetFee(coin_selection_params.change_spend_size);
coin_selection_params.min_viable_change = std::max(change_spend_fee + 1, dust);
- // Static vsize overhead + outputs vsize. 4 version, 4 nLocktime, 1 input count, 1 witness overhead (dummy, flag, stack size)
- coin_selection_params.tx_noinputs_size = 10 + GetSizeOfCompactSize(vecSend.size()); // bytes for output count
-
- // vouts to the payees
- for (const auto& recipient : vecSend)
- {
- CTxOut txout(recipient.nAmount, GetScriptForDestination(recipient.dest));
-
- // Include the fee cost for outputs.
- coin_selection_params.tx_noinputs_size += ::GetSerializeSize(txout);
-
- if (IsDust(txout, wallet.chain().relayDustFee())) {
- return util::Error{_("Transaction amount too small")};
- }
- txNew.vout.push_back(txout);
- }
-
// Include the fees for things that aren't inputs, excluding the change output
const CAmount not_input_fees = coin_selection_params.m_effective_feerate.GetFee(coin_selection_params.m_subtract_fee_outputs ? 0 : coin_selection_params.tx_noinputs_size);
CAmount selection_target = recipients_sum + not_input_fees;
@@ -1145,6 +1153,11 @@ static util::Result<CreatedTransactionResult> CreateTransactionInternal(
result.GetWaste(),
result.GetSelectedValue());
+ // vouts to the payees
+ for (const auto& recipient : vecSend)
+ {
+ txNew.vout.emplace_back(recipient.nAmount, GetScriptForDestination(recipient.dest));
+ }
const CAmount change_amount = result.GetChange(coin_selection_params.min_viable_change, coin_selection_params.m_change_fee);
if (change_amount > 0) {
CTxOut newTxOut(change_amount, scriptChange);
diff --git a/src/wallet/test/fuzz/wallet_bdb_parser.cpp b/src/wallet/test/fuzz/wallet_bdb_parser.cpp
index 5216e09769..6fbd695fc5 100644
--- a/src/wallet/test/fuzz/wallet_bdb_parser.cpp
+++ b/src/wallet/test/fuzz/wallet_bdb_parser.cpp
@@ -1,4 +1,4 @@
-// Copyright (c) 2023 The Bitcoin Core developers
+// Copyright (c) 2023-present The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -63,6 +63,7 @@ FUZZ_TARGET(wallet_bdb_parser, .init = initialize_wallet_bdb_parser)
#endif
if (error.original.starts_with("AutoFile::ignore: end of file") ||
error.original.starts_with("AutoFile::read: end of file") ||
+ error.original.starts_with("AutoFile::seek: ") ||
error.original == "Not a BDB file" ||
error.original == "Unexpected page type, should be 9 (BTree Metadata)" ||
error.original == "Unexpected database flags, should only be 0x20 (subdatabases)" ||
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 85cd67dab9..d569c64b43 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -1366,13 +1366,13 @@ void CWallet::MarkConflicted(const uint256& hashBlock, int conflicting_height, c
}
-void CWallet::RecursiveUpdateTxState(const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) {
+void CWallet::RecursiveUpdateTxState(const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) {
// Do not flush the wallet here for performance reasons
WalletBatch batch(GetDatabase(), false);
RecursiveUpdateTxState(&batch, tx_hash, try_updating_state);
}
-void CWallet::RecursiveUpdateTxState(WalletBatch* batch, const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet) {
+void CWallet::RecursiveUpdateTxState(WalletBatch* batch, const uint256& tx_hash, const TryUpdatingStateFn& try_updating_state) {
std::set<uint256> todo;
std::set<uint256> done;
diff --git a/src/warnings.cpp b/src/warnings.cpp
deleted file mode 100644
index 38c0554cf2..0000000000
--- a/src/warnings.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2022 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#include <config/bitcoin-config.h> // IWYU pragma: keep
-
-#include <warnings.h>
-
-#include <common/system.h>
-#include <sync.h>
-#include <util/translation.h>
-
-#include <optional>
-#include <vector>
-
-static GlobalMutex g_warnings_mutex;
-static bilingual_str g_misc_warnings GUARDED_BY(g_warnings_mutex);
-static bool fLargeWorkInvalidChainFound GUARDED_BY(g_warnings_mutex) = false;
-static std::optional<bilingual_str> g_timeoffset_warning GUARDED_BY(g_warnings_mutex){};
-
-void SetMiscWarning(const bilingual_str& warning)
-{
- LOCK(g_warnings_mutex);
- g_misc_warnings = warning;
-}
-
-void SetfLargeWorkInvalidChainFound(bool flag)
-{
- LOCK(g_warnings_mutex);
- fLargeWorkInvalidChainFound = flag;
-}
-
-void SetMedianTimeOffsetWarning(std::optional<bilingual_str> warning)
-{
- LOCK(g_warnings_mutex);
- g_timeoffset_warning = warning;
-}
-
-std::vector<bilingual_str> GetWarnings()
-{
- std::vector<bilingual_str> warnings;
-
- LOCK(g_warnings_mutex);
-
- // Pre-release build warning
- if (!CLIENT_VERSION_IS_RELEASE) {
- warnings.emplace_back(_("This is a pre-release test build - use at your own risk - do not use for mining or merchant applications"));
- }
-
- // Misc warnings like out of disk space and clock is wrong
- if (!g_misc_warnings.empty()) {
- warnings.emplace_back(g_misc_warnings);
- }
-
- if (fLargeWorkInvalidChainFound) {
- warnings.emplace_back(_("Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade."));
- }
-
- if (g_timeoffset_warning) {
- warnings.emplace_back(g_timeoffset_warning.value());
- }
-
- return warnings;
-}
diff --git a/src/warnings.h b/src/warnings.h
deleted file mode 100644
index 79dc2ffabf..0000000000
--- a/src/warnings.h
+++ /dev/null
@@ -1,22 +0,0 @@
-// Copyright (c) 2009-2010 Satoshi Nakamoto
-// Copyright (c) 2009-2021 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#ifndef BITCOIN_WARNINGS_H
-#define BITCOIN_WARNINGS_H
-
-#include <optional>
-#include <string>
-#include <vector>
-
-struct bilingual_str;
-
-void SetMiscWarning(const bilingual_str& warning);
-void SetfLargeWorkInvalidChainFound(bool flag);
-/** Pass std::nullopt to disable the warning */
-void SetMedianTimeOffsetWarning(std::optional<bilingual_str> warning);
-/** Return potential problems detected by the node. */
-std::vector<bilingual_str> GetWarnings();
-
-#endif // BITCOIN_WARNINGS_H
diff --git a/test/functional/feature_framework_unit_tests.py b/test/functional/feature_framework_unit_tests.py
index f03f084bed..14d83f8a70 100755
--- a/test/functional/feature_framework_unit_tests.py
+++ b/test/functional/feature_framework_unit_tests.py
@@ -27,6 +27,7 @@ TEST_FRAMEWORK_MODULES = [
"crypto.ripemd160",
"crypto.secp256k1",
"script",
+ "script_util",
"segwit_addr",
"wallet_util",
]
diff --git a/test/functional/feature_settings.py b/test/functional/feature_settings.py
index 0214e781de..1cd0aeabd3 100755
--- a/test/functional/feature_settings.py
+++ b/test/functional/feature_settings.py
@@ -25,7 +25,7 @@ class SettingsTest(BitcoinTestFramework):
# Assert default settings file was created
self.stop_node(0)
- default_settings = {"_warning_": "This file is automatically generated and updated by Bitcoin Core. Please do not edit this file while the node is running, as any changes might be ignored or overwritten."}
+ default_settings = {"_warning_": f"This file is automatically generated and updated by {self.config['environment']['PACKAGE_NAME']}. Please do not edit this file while the node is running, as any changes might be ignored or overwritten."}
with settings.open() as fp:
assert_equal(json.load(fp), default_settings)
diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py
index 3d205ffa62..e1cee46839 100755
--- a/test/functional/mempool_accept.py
+++ b/test/functional/mempool_accept.py
@@ -18,6 +18,7 @@ from test_framework.messages import (
CTxInWitness,
CTxOut,
MAX_BLOCK_WEIGHT,
+ WITNESS_SCALE_FACTOR,
MAX_MONEY,
SEQUENCE_FINAL,
tx_from_hex,
@@ -228,7 +229,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework):
self.log.info('A really large transaction')
tx = tx_from_hex(raw_tx_reference)
- tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_WEIGHT // 4 / len(tx.vin[0].serialize()))
+ tx.vin = [tx.vin[0]] * math.ceil((MAX_BLOCK_WEIGHT // WITNESS_SCALE_FACTOR) / len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}],
rawtxs=[tx.serialize().hex()],
diff --git a/test/functional/mempool_package_rbf.py b/test/functional/mempool_package_rbf.py
new file mode 100755
index 0000000000..ceb9530394
--- /dev/null
+++ b/test/functional/mempool_package_rbf.py
@@ -0,0 +1,587 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+from decimal import Decimal
+
+from test_framework.messages import (
+ COIN,
+ MAX_BIP125_RBF_SEQUENCE,
+)
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.mempool_util import fill_mempool
+from test_framework.util import (
+ assert_greater_than_or_equal,
+ assert_equal,
+)
+from test_framework.wallet import (
+ DEFAULT_FEE,
+ MiniWallet,
+)
+
+MAX_REPLACEMENT_CANDIDATES = 100
+
+# Value high enough to cause evictions in each subtest
+# for typical cases
+DEFAULT_CHILD_FEE = DEFAULT_FEE * 4
+
+class PackageRBFTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 2
+ self.setup_clean_chain = True
+ # Required for fill_mempool()
+ self.extra_args = [[
+ "-datacarriersize=100000",
+ "-maxmempool=5",
+ ]] * self.num_nodes
+
+ def assert_mempool_contents(self, expected=None):
+ """Assert that all transactions in expected are in the mempool,
+ and no additional ones exist.
+ """
+ if not expected:
+ expected = []
+ mempool = self.nodes[0].getrawmempool(verbose=False)
+ assert_equal(len(mempool), len(expected))
+ for tx in expected:
+ assert tx.rehash() in mempool
+
+ def create_simple_package(self, parent_coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE, heavy_child=False):
+ """Create a 1 parent 1 child package using the coin passed in as the parent's input. The
+ parent has 1 output, used to fund 1 child transaction.
+ All transactions signal BIP125 replaceability, but nSequence changes based on self.ctr. This
+ prevents identical txids between packages when the parents spend the same coin and have the
+ same fee (i.e. 0sat).
+
+ returns tuple (hex serialized txns, CTransaction objects)
+ """
+ self.ctr += 1
+ # Use fee_rate=0 because create_self_transfer will use the default fee_rate value otherwise.
+ # Passing in fee>0 overrides fee_rate, so this still works for non-zero parent_fee.
+ parent_result = self.wallet.create_self_transfer(
+ fee=parent_fee,
+ utxo_to_spend=parent_coin,
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ num_child_outputs = 10 if heavy_child else 1
+ child_result = self.wallet.create_self_transfer_multi(
+ utxos_to_spend=[parent_result["new_utxo"]],
+ num_outputs=num_child_outputs,
+ fee_per_output=int(child_fee * COIN // num_child_outputs),
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+ package_hex = [parent_result["hex"], child_result["hex"]]
+ package_txns = [parent_result["tx"], child_result["tx"]]
+ return package_hex, package_txns
+
+ def run_test(self):
+ # Counter used to count the number of times we constructed packages. Since we're constructing parent transactions with the same
+ # coins (to create conflicts), and perhaps giving them the same fee, we might accidentally just create the same transaction again.
+ # To prevent this, set nSequences to MAX_BIP125_RBF_SEQUENCE - self.ctr.
+ self.ctr = 0
+
+ self.log.info("Generate blocks to create UTXOs")
+ self.wallet = MiniWallet(self.nodes[0])
+
+ # Make more than enough coins for the sum of all tests,
+ # otherwise a wallet rescan is needed later
+ self.generate(self.wallet, 300)
+ self.coins = self.wallet.get_utxos(mark_as_spent=False)
+
+ self.test_package_rbf_basic()
+ self.test_package_rbf_singleton()
+ self.test_package_rbf_additional_fees()
+ self.test_package_rbf_max_conflicts()
+ self.test_too_numerous_ancestors()
+ self.test_package_rbf_with_wrong_pkg_size()
+ self.test_insufficient_feerate()
+ self.test_wrong_conflict_cluster_size_linear()
+ self.test_wrong_conflict_cluster_size_parents_child()
+ self.test_wrong_conflict_cluster_size_parent_children()
+ self.test_0fee_package_rbf()
+ self.test_child_conflicts_parent_mempool_ancestor()
+
+ def test_package_rbf_basic(self):
+ self.log.info("Test that a child can pay to replace its parents' conflicts of cluster size 2")
+ node = self.nodes[0]
+ # Reuse the same coins so that the transactions conflict with one another.
+ parent_coin = self.coins.pop()
+ package_hex1, package_txns1 = self.create_simple_package(parent_coin, DEFAULT_FEE, DEFAULT_FEE)
+ package_hex2, package_txns2 = self.create_simple_package(parent_coin, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ node.submitpackage(package_hex1)
+ self.assert_mempool_contents(expected=package_txns1)
+
+ # Make sure 2nd node gets set up for basic package RBF
+ self.sync_all()
+
+ # Test run rejected because conflicts are not allowed in subpackage evaluation
+ testres = node.testmempoolaccept(package_hex2)
+ assert_equal(testres[0]["reject-reason"], "bip125-replacement-disallowed")
+
+ # But accepted during normal submission
+ submitres = node.submitpackage(package_hex2)
+ assert_equal(set(submitres["replaced-transactions"]), set([tx.rehash() for tx in package_txns1]))
+ self.assert_mempool_contents(expected=package_txns2)
+
+ # Make sure 2nd node gets a basic package RBF over p2p
+ self.sync_all()
+
+ self.generate(node, 1)
+
+ def test_package_rbf_singleton(self):
+ self.log.info("Test child can pay to replace a parent's single conflicted tx")
+ node = self.nodes[0]
+
+ # Make singleton tx to conflict with in next batch
+ singleton_coin = self.coins.pop()
+ singleton_tx = self.wallet.create_self_transfer(utxo_to_spend=singleton_coin)
+ node.sendrawtransaction(singleton_tx["hex"])
+ self.assert_mempool_contents(expected=[singleton_tx["tx"]])
+
+ package_hex, package_txns = self.create_simple_package(singleton_coin, DEFAULT_FEE, singleton_tx["fee"] * 2)
+
+ submitres = node.submitpackage(package_hex)
+ assert_equal(submitres["replaced-transactions"], [singleton_tx["tx"].rehash()])
+ self.assert_mempool_contents(expected=package_txns)
+
+ self.generate(node, 1)
+
+ def test_package_rbf_additional_fees(self):
+ self.log.info("Check Package RBF must increase the absolute fee")
+ node = self.nodes[0]
+ coin = self.coins.pop()
+
+ package_hex1, package_txns1 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE, heavy_child=True)
+ assert_greater_than_or_equal(1000, package_txns1[-1].get_vsize())
+ node.submitpackage(package_hex1)
+ self.assert_mempool_contents(expected=package_txns1)
+
+ PACKAGE_FEE = DEFAULT_FEE + DEFAULT_CHILD_FEE
+ PACKAGE_FEE_MINUS_ONE = PACKAGE_FEE - Decimal("0.00000001")
+
+ # Package 2 has a higher feerate but lower absolute fee
+ package_hex2, package_txns2 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE - Decimal("0.00000001"))
+ pkg_results2 = node.submitpackage(package_hex2)
+ assert_equal(f"package RBF failed: insufficient anti-DoS fees, rejecting replacement {package_txns2[1].rehash()}, less fees than conflicting txs; {PACKAGE_FEE_MINUS_ONE} < {PACKAGE_FEE}", pkg_results2["package_msg"])
+ self.assert_mempool_contents(expected=package_txns1)
+
+ self.log.info("Check replacement pays for incremental bandwidth")
+ package_hex3, package_txns3 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE)
+ pkg_results3 = node.submitpackage(package_hex3)
+ assert_equal(f"package RBF failed: insufficient anti-DoS fees, rejecting replacement {package_txns3[1].rehash()}, not enough additional fees to relay; 0.00 < 0.00000{sum([tx.get_vsize() for tx in package_txns3])}", pkg_results3["package_msg"])
+
+ self.assert_mempool_contents(expected=package_txns1)
+ self.generate(node, 1)
+
+ self.log.info("Check Package RBF must have strict cpfp structure")
+ coin = self.coins.pop()
+ package_hex4, package_txns4 = self.create_simple_package(coin, parent_fee=DEFAULT_FEE, child_fee=DEFAULT_CHILD_FEE)
+ node.submitpackage(package_hex4)
+ self.assert_mempool_contents(expected=package_txns4)
+ package_hex5, package_txns5 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE, child_fee=DEFAULT_CHILD_FEE - Decimal("0.00000001"))
+ pkg_results5 = node.submitpackage(package_hex5)
+ assert 'package RBF failed: package feerate is less than parent feerate' in pkg_results5["package_msg"]
+
+ self.assert_mempool_contents(expected=package_txns4)
+ self.generate(node, 1)
+
+ def test_package_rbf_max_conflicts(self):
+ node = self.nodes[0]
+ self.log.info("Check Package RBF cannot replace more than MAX_REPLACEMENT_CANDIDATES transactions")
+ num_coins = 51
+ parent_coins = self.coins[:num_coins]
+ del self.coins[:num_coins]
+
+ # Original transactions: 51 transactions with 1 descendants each -> 102 total transactions
+ size_two_clusters = []
+ for coin in parent_coins:
+ size_two_clusters.append(self.wallet.send_self_transfer_chain(from_node=node, chain_length=2, utxo_to_spend=coin))
+ expected_txns = [txn["tx"] for parent_child_txns in size_two_clusters for txn in parent_child_txns]
+ assert_equal(len(expected_txns), num_coins * 2)
+ self.assert_mempool_contents(expected=expected_txns)
+
+ # parent feeerate needs to be high enough for minrelay
+ # child feerate needs to be large enough to trigger package rbf with a very large parent and
+ # pay for all evicted fees. maxfeerate turned off for all submissions since child feerate
+ # is extremely high
+ parent_fee_per_conflict = 10000
+ child_feerate = 10000 * DEFAULT_FEE
+
+ # Conflict against all transactions by double-spending each parent, causing 102 evictions
+ package_parent = self.wallet.create_self_transfer_multi(utxos_to_spend=parent_coins, fee_per_output=parent_fee_per_conflict)
+ package_child = self.wallet.create_self_transfer(fee_rate=child_feerate, utxo_to_spend=package_parent["new_utxos"][0])
+
+ pkg_results = node.submitpackage([package_parent["hex"], package_child["hex"]], maxfeerate=0)
+ assert_equal(f"package RBF failed: too many potential replacements, rejecting replacement {package_child['tx'].rehash()}; too many potential replacements (102 > 100)\n", pkg_results["package_msg"])
+ self.assert_mempool_contents(expected=expected_txns)
+
+ # Make singleton tx to conflict with in next batch
+ singleton_coin = self.coins.pop()
+ singleton_tx = self.wallet.create_self_transfer(utxo_to_spend=singleton_coin)
+ node.sendrawtransaction(singleton_tx["hex"])
+ expected_txns.append(singleton_tx["tx"])
+
+ # Double-spend same set minus last, and double-spend singleton. This hits 101 evictions; should still fail.
+ # N.B. we can't RBF just a child tx in the clusters, as that would make resulting cluster of size 3.
+ double_spending_coins = parent_coins[:-1] + [singleton_coin]
+ package_parent = self.wallet.create_self_transfer_multi(utxos_to_spend=double_spending_coins, fee_per_output=parent_fee_per_conflict)
+ package_child = self.wallet.create_self_transfer(fee_rate=child_feerate, utxo_to_spend=package_parent["new_utxos"][0])
+ pkg_results = node.submitpackage([package_parent["hex"], package_child["hex"]], maxfeerate=0)
+ assert_equal(f"package RBF failed: too many potential replacements, rejecting replacement {package_child['tx'].rehash()}; too many potential replacements (101 > 100)\n", pkg_results["package_msg"])
+ self.assert_mempool_contents(expected=expected_txns)
+
+ # Finally, evict MAX_REPLACEMENT_CANDIDATES
+ package_parent = self.wallet.create_self_transfer_multi(utxos_to_spend=parent_coins[:-1], fee_per_output=parent_fee_per_conflict)
+ package_child = self.wallet.create_self_transfer(fee_rate=child_feerate, utxo_to_spend=package_parent["new_utxos"][0])
+ pkg_results = node.submitpackage([package_parent["hex"], package_child["hex"]], maxfeerate=0)
+ assert_equal(pkg_results["package_msg"], "success")
+ self.assert_mempool_contents(expected=[singleton_tx["tx"], size_two_clusters[-1][0]["tx"], size_two_clusters[-1][1]["tx"], package_parent["tx"], package_child["tx"]] )
+
+ self.generate(node, 1)
+
+ def test_too_numerous_ancestors(self):
+ self.log.info("Test that package RBF doesn't work with packages larger than 2 due to ancestors")
+ node = self.nodes[0]
+ coin = self.coins.pop()
+
+ package_hex1, package_txns1 = self.create_simple_package(coin, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ node.submitpackage(package_hex1)
+ self.assert_mempool_contents(expected=package_txns1)
+
+ # Double-spends the original package
+ self.ctr += 1
+ parent_result1 = self.wallet.create_self_transfer(
+ fee=DEFAULT_FEE,
+ utxo_to_spend=coin,
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ coin2 = self.coins.pop()
+
+ # Added to make package too large for package RBF;
+ # it will enter mempool individually
+ self.ctr += 1
+ parent_result2 = self.wallet.create_self_transfer(
+ fee=DEFAULT_FEE,
+ utxo_to_spend=coin2,
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ # Child that spends both, violating cluster size rule due
+ # to in-mempool ancestry
+ self.ctr += 1
+ child_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_CHILD_FEE * COIN),
+ utxos_to_spend=[parent_result1["new_utxo"], parent_result2["new_utxo"]],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ package_hex2 = [parent_result1["hex"], parent_result2["hex"], child_result["hex"]]
+ package_txns2_succeed = [parent_result2["tx"]]
+
+ pkg_result = node.submitpackage(package_hex2)
+ assert_equal(pkg_result["package_msg"], 'package RBF failed: new transaction cannot have mempool ancestors')
+ self.assert_mempool_contents(expected=package_txns1 + package_txns2_succeed)
+ self.generate(node, 1)
+
+ def test_wrong_conflict_cluster_size_linear(self):
+ self.log.info("Test that conflicting with a cluster not sized two is rejected: linear chain")
+ node = self.nodes[0]
+
+ # Coins we will conflict with
+ coin1 = self.coins.pop()
+ coin2 = self.coins.pop()
+ coin3 = self.coins.pop()
+
+ # Three transactions chained; package RBF against any of these
+ # should be rejected
+ self.ctr += 1
+ parent_result = self.wallet.create_self_transfer(
+ fee=DEFAULT_FEE,
+ utxo_to_spend=coin1,
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ self.ctr += 1
+ child_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_FEE * COIN),
+ utxos_to_spend=[parent_result["new_utxo"], coin2],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ self.ctr += 1
+ grandchild_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_FEE * COIN),
+ utxos_to_spend=[child_result["new_utxos"][0], coin3],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ expected_txns = [parent_result["tx"], child_result["tx"], grandchild_result["tx"]]
+ for tx in expected_txns:
+ node.sendrawtransaction(tx.serialize().hex())
+ self.assert_mempool_contents(expected=expected_txns)
+
+ # Now make conflicting packages for each coin
+ package_hex1, package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+
+ package_result = node.submitpackage(package_hex1)
+ assert_equal(f"package RBF failed: {parent_result['tx'].rehash()} has 2 descendants, max 1 allowed", package_result["package_msg"])
+
+ package_hex2, package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_result = node.submitpackage(package_hex2)
+ assert_equal(f"package RBF failed: {child_result['tx'].rehash()} has both ancestor and descendant, exceeding cluster limit of 2", package_result["package_msg"])
+
+ package_hex3, package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_result = node.submitpackage(package_hex3)
+ assert_equal(f"package RBF failed: {grandchild_result['tx'].rehash()} has 2 ancestors, max 1 allowed", package_result["package_msg"])
+
+ # Check that replacements were actually rejected
+ self.assert_mempool_contents(expected=expected_txns)
+ self.generate(node, 1)
+
+ def test_wrong_conflict_cluster_size_parents_child(self):
+ self.log.info("Test that conflicting with a cluster not sized two is rejected: two parents one child")
+ node = self.nodes[0]
+
+ # Coins we will conflict with
+ coin1 = self.coins.pop()
+ coin2 = self.coins.pop()
+ coin3 = self.coins.pop()
+
+ self.ctr += 1
+ parent1_result = self.wallet.create_self_transfer(
+ fee=DEFAULT_FEE,
+ utxo_to_spend=coin1,
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ self.ctr += 1
+ parent2_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_FEE * COIN),
+ utxos_to_spend=[coin2],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ self.ctr += 1
+ child_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_FEE * COIN),
+ utxos_to_spend=[parent1_result["new_utxo"], parent2_result["new_utxos"][0], coin3],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ expected_txns = [parent1_result["tx"], parent2_result["tx"], child_result["tx"]]
+ for tx in expected_txns:
+ node.sendrawtransaction(tx.serialize().hex())
+ self.assert_mempool_contents(expected=expected_txns)
+
+ # Now make conflicting packages for each coin
+ package_hex1, package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_result = node.submitpackage(package_hex1)
+ assert_equal(f"package RBF failed: {parent1_result['tx'].rehash()} is not the only parent of child {child_result['tx'].rehash()}", package_result["package_msg"])
+
+ package_hex2, package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_result = node.submitpackage(package_hex2)
+ assert_equal(f"package RBF failed: {parent2_result['tx'].rehash()} is not the only parent of child {child_result['tx'].rehash()}", package_result["package_msg"])
+
+ package_hex3, package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_result = node.submitpackage(package_hex3)
+ assert_equal(f"package RBF failed: {child_result['tx'].rehash()} has 2 ancestors, max 1 allowed", package_result["package_msg"])
+
+ # Check that replacements were actually rejected
+ self.assert_mempool_contents(expected=expected_txns)
+ self.generate(node, 1)
+
+ def test_wrong_conflict_cluster_size_parent_children(self):
+ self.log.info("Test that conflicting with a cluster not sized two is rejected: one parent two children")
+ node = self.nodes[0]
+
+ # Coins we will conflict with
+ coin1 = self.coins.pop()
+ coin2 = self.coins.pop()
+ coin3 = self.coins.pop()
+
+ self.ctr += 1
+ parent_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_FEE * COIN),
+ num_outputs=2,
+ utxos_to_spend=[coin1],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ self.ctr += 1
+ child1_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_FEE * COIN),
+ utxos_to_spend=[parent_result["new_utxos"][0], coin2],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ self.ctr += 1
+ child2_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_FEE * COIN),
+ utxos_to_spend=[parent_result["new_utxos"][1], coin3],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ # Submit them to mempool
+ expected_txns = [parent_result["tx"], child1_result["tx"], child2_result["tx"]]
+ for tx in expected_txns:
+ node.sendrawtransaction(tx.serialize().hex())
+ self.assert_mempool_contents(expected=expected_txns)
+
+ # Now make conflicting packages for each coin
+ package_hex1, package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_result = node.submitpackage(package_hex1)
+ assert_equal(f"package RBF failed: {parent_result['tx'].rehash()} has 2 descendants, max 1 allowed", package_result["package_msg"])
+
+ package_hex2, package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_result = node.submitpackage(package_hex2)
+ assert_equal(f"package RBF failed: {child1_result['tx'].rehash()} is not the only child of parent {parent_result['tx'].rehash()}", package_result["package_msg"])
+
+ package_hex3, package_txns3 = self.create_simple_package(coin3, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_result = node.submitpackage(package_hex3)
+ assert_equal(f"package RBF failed: {child2_result['tx'].rehash()} is not the only child of parent {parent_result['tx'].rehash()}", package_result["package_msg"])
+
+ # Check that replacements were actually rejected
+ self.assert_mempool_contents(expected=expected_txns)
+ self.generate(node, 1)
+
+ def test_package_rbf_with_wrong_pkg_size(self):
+ self.log.info("Test that package RBF doesn't work with packages larger than 2 due to pkg size")
+ node = self.nodes[0]
+ coin1 = self.coins.pop()
+ coin2 = self.coins.pop()
+
+ # Two packages to require multiple direct conflicts, easier to set up illicit pkg size
+ package_hex1, package_txns1 = self.create_simple_package(coin1, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+ package_hex2, package_txns2 = self.create_simple_package(coin2, DEFAULT_FEE, DEFAULT_CHILD_FEE)
+
+ node.submitpackage(package_hex1)
+ node.submitpackage(package_hex2)
+
+ self.assert_mempool_contents(expected=package_txns1 + package_txns2)
+ assert_equal(len(node.getrawmempool()), 4)
+
+ # Double-spends the first package
+ self.ctr += 1
+ parent_result1 = self.wallet.create_self_transfer(
+ fee=DEFAULT_FEE,
+ utxo_to_spend=coin1,
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ # Double-spends the second package
+ self.ctr += 1
+ parent_result2 = self.wallet.create_self_transfer(
+ fee=DEFAULT_FEE,
+ utxo_to_spend=coin2,
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ # Child that spends both, violating cluster size rule due
+ # to pkg size
+ self.ctr += 1
+ child_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_CHILD_FEE * COIN),
+ utxos_to_spend=[parent_result1["new_utxo"], parent_result2["new_utxo"]],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ package_hex3 = [parent_result1["hex"], parent_result2["hex"], child_result["hex"]]
+
+ pkg_result = node.submitpackage(package_hex3)
+ assert_equal(pkg_result["package_msg"], 'package RBF failed: package must be 1-parent-1-child')
+ self.assert_mempool_contents(expected=package_txns1 + package_txns2)
+ self.generate(node, 1)
+
+ def test_insufficient_feerate(self):
+ self.log.info("Check Package RBF must beat feerate of direct conflict")
+ node = self.nodes[0]
+ coin = self.coins.pop()
+
+ # Non-cpfp structure
+ package_hex1, package_txns1 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE, child_fee=DEFAULT_FEE)
+ node.submitpackage(package_hex1)
+ self.assert_mempool_contents(expected=package_txns1)
+
+ # Package 2 feerate is below the feerate of directly conflicted parent, so it fails even though
+ # total fees are higher than the original package
+ package_hex2, package_txns2 = self.create_simple_package(coin, parent_fee=DEFAULT_CHILD_FEE - Decimal("0.00000001"), child_fee=DEFAULT_CHILD_FEE)
+ pkg_results2 = node.submitpackage(package_hex2)
+ assert_equal(pkg_results2["package_msg"], 'package RBF failed: insufficient feerate: does not improve feerate diagram')
+ self.assert_mempool_contents(expected=package_txns1)
+ self.generate(node, 1)
+
+ def test_0fee_package_rbf(self):
+ self.log.info("Test package RBF: TRUC 0-fee parent + high-fee child replaces parent's conflicts")
+ node = self.nodes[0]
+ # Reuse the same coins so that the transactions conflict with one another.
+ self.wallet.rescan_utxos()
+ parent_coin = self.wallet.get_utxo(confirmed_only=True)
+
+ # package1 pays default fee on both transactions
+ parent1 = self.wallet.create_self_transfer(utxo_to_spend=parent_coin, version=3)
+ child1 = self.wallet.create_self_transfer(utxo_to_spend=parent1["new_utxo"], version=3)
+ package_hex1 = [parent1["hex"], child1["hex"]]
+ fees_package1 = parent1["fee"] + child1["fee"]
+ submitres1 = node.submitpackage(package_hex1)
+ assert_equal(submitres1["package_msg"], "success")
+ self.assert_mempool_contents([parent1["tx"], child1["tx"]])
+
+ # package2 has a 0-fee parent (conflicting with package1) and very high fee child
+ parent2 = self.wallet.create_self_transfer(utxo_to_spend=parent_coin, fee=0, fee_rate=0, version=3)
+ child2 = self.wallet.create_self_transfer(utxo_to_spend=parent2["new_utxo"], fee=fees_package1*10, version=3)
+ package_hex2 = [parent2["hex"], child2["hex"]]
+
+ submitres2 = node.submitpackage(package_hex2)
+ assert_equal(submitres2["package_msg"], "success")
+ assert_equal(set(submitres2["replaced-transactions"]), set([parent1["txid"], child1["txid"]]))
+ self.assert_mempool_contents([parent2["tx"], child2["tx"]])
+
+ self.generate(node, 1)
+
+ def test_child_conflicts_parent_mempool_ancestor(self):
+ fill_mempool(self, self.nodes[0])
+ # Reset coins since we filled the mempool with current coins
+ self.coins = self.wallet.get_utxos(mark_as_spent=False, confirmed_only=True)
+
+ self.log.info("Test that package RBF doesn't have issues with mempool<->package conflicts via inconsistency")
+ node = self.nodes[0]
+ coin = self.coins.pop()
+
+ self.ctr += 1
+ grandparent_result = self.wallet.create_self_transfer(
+ fee=DEFAULT_FEE,
+ utxo_to_spend=coin,
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ node.sendrawtransaction(grandparent_result["hex"])
+
+ # Now make package of two descendants that looks
+ # like a cpfp where the parent can't get in on its own
+ self.ctr += 1
+ parent_result = self.wallet.create_self_transfer(
+ fee_rate=Decimal('0.00001000'),
+ utxo_to_spend=grandparent_result["new_utxo"],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+ # Last tx double-spends grandparent's coin,
+ # which is not inside the current package
+ self.ctr += 1
+ child_result = self.wallet.create_self_transfer_multi(
+ fee_per_output=int(DEFAULT_CHILD_FEE * COIN),
+ utxos_to_spend=[parent_result["new_utxo"], coin],
+ sequence=MAX_BIP125_RBF_SEQUENCE - self.ctr,
+ )
+
+ pkg_result = node.submitpackage([parent_result["hex"], child_result["hex"]])
+ assert_equal(pkg_result["package_msg"], 'package RBF failed: new transaction cannot have mempool ancestors')
+ mempool_info = node.getrawmempool()
+ assert grandparent_result["txid"] in mempool_info
+ assert parent_result["txid"] not in mempool_info
+ assert child_result["txid"] not in mempool_info
+
+if __name__ == "__main__":
+ PackageRBFTest().main()
diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py
index b23ec1028b..d10e47e036 100755
--- a/test/functional/p2p_addr_relay.py
+++ b/test/functional/p2p_addr_relay.py
@@ -142,7 +142,8 @@ class AddrTest(BitcoinTestFramework):
msg = self.setup_addr_msg(1010)
with self.nodes[0].assert_debug_log(['addr message size = 1010']):
- addr_source.send_and_ping(msg)
+ addr_source.send_message(msg)
+ addr_source.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
diff --git a/test/functional/p2p_addrv2_relay.py b/test/functional/p2p_addrv2_relay.py
index ea114e7d70..4ec8e0bc04 100755
--- a/test/functional/p2p_addrv2_relay.py
+++ b/test/functional/p2p_addrv2_relay.py
@@ -86,11 +86,6 @@ class AddrTest(BitcoinTestFramework):
addr_source = self.nodes[0].add_p2p_connection(P2PInterface())
msg = msg_addrv2()
- self.log.info('Send too-large addrv2 message')
- msg.addrs = ADDRS * 101
- with self.nodes[0].assert_debug_log(['addrv2 message size = 1010']):
- addr_source.send_and_ping(msg)
-
self.log.info('Check that addrv2 message content is relayed and added to addrman')
addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver())
msg.addrs = ADDRS
@@ -106,6 +101,13 @@ class AddrTest(BitcoinTestFramework):
assert addr_receiver.addrv2_received_and_checked
assert_equal(len(self.nodes[0].getnodeaddresses(count=0, network="i2p")), 0)
+ self.log.info('Send too-large addrv2 message')
+ msg.addrs = ADDRS * 101
+ with self.nodes[0].assert_debug_log(['addrv2 message size = 1010']):
+ addr_source.send_message(msg)
+ addr_source.wait_for_disconnect()
+
+
if __name__ == '__main__':
AddrTest().main()
diff --git a/test/functional/p2p_handshake.py b/test/functional/p2p_handshake.py
index dd19fe9333..673322ab10 100755
--- a/test/functional/p2p_handshake.py
+++ b/test/functional/p2p_handshake.py
@@ -17,6 +17,7 @@ from test_framework.messages import (
NODE_WITNESS,
)
from test_framework.p2p import P2PInterface
+from test_framework.util import p2p_port
# Desirable service flags for outbound non-pruned and pruned peers. Note that
@@ -88,6 +89,12 @@ class P2PHandshakeTest(BitcoinTestFramework):
with node.assert_debug_log([f"feeler connection completed"]):
self.add_outbound_connection(node, "feeler", NODE_NONE, wait_for_disconnect=True)
+ self.log.info("Check that connecting to ourself leads to immediate disconnect")
+ with node.assert_debug_log(["connected to self", "disconnecting"]):
+ node_listen_addr = f"127.0.0.1:{p2p_port(0)}"
+ node.addconnection(node_listen_addr, "outbound-full-relay", self.options.v2transport)
+ self.wait_until(lambda: len(node.getpeerinfo()) == 0)
+
if __name__ == '__main__':
P2PHandshakeTest().main()
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index adcbb4fd05..8e459ba676 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -260,7 +260,9 @@ class InvalidMessagesTest(BitcoinTestFramework):
msg_type = msg.msgtype.decode('ascii')
self.log.info("Test {} message of size {} is logged as misbehaving".format(msg_type, size))
with self.nodes[0].assert_debug_log(['Misbehaving', '{} message size = {}'.format(msg_type, size)]):
- self.nodes[0].add_p2p_connection(P2PInterface()).send_and_ping(msg)
+ conn = self.nodes[0].add_p2p_connection(P2PInterface())
+ conn.send_message(msg)
+ conn.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
def test_oversized_inv_msg(self):
@@ -321,7 +323,8 @@ class InvalidMessagesTest(BitcoinTestFramework):
# delete arbitrary block header somewhere in the middle to break link
del block_headers[random.randrange(1, len(block_headers)-1)]
with self.nodes[0].assert_debug_log(expected_msgs=MISBEHAVING_NONCONTINUOUS_HEADERS_MSGS):
- peer.send_and_ping(msg_headers(block_headers))
+ peer.send_message(msg_headers(block_headers))
+ peer.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
def test_resource_exhaustion(self):
diff --git a/test/functional/p2p_mutated_blocks.py b/test/functional/p2p_mutated_blocks.py
index 64d2fc96a8..708b19b1e5 100755
--- a/test/functional/p2p_mutated_blocks.py
+++ b/test/functional/p2p_mutated_blocks.py
@@ -104,11 +104,10 @@ class MutatedBlocksTest(BitcoinTestFramework):
block_missing_prev.hashPrevBlock = 123
block_missing_prev.solve()
- # Attacker gets a DoS score of 10, not immediately disconnected, so we do it 10 times to get to 100
- for _ in range(10):
- assert_equal(len(self.nodes[0].getpeerinfo()), 2)
- with self.nodes[0].assert_debug_log(expected_msgs=["AcceptBlock FAILED (prev-blk-not-found)"]):
- attacker.send_message(msg_block(block_missing_prev))
+ # Check that non-connecting block causes disconnect
+ assert_equal(len(self.nodes[0].getpeerinfo()), 2)
+ with self.nodes[0].assert_debug_log(expected_msgs=["AcceptBlock FAILED (prev-blk-not-found)"]):
+ attacker.send_message(msg_block(block_missing_prev))
attacker.wait_for_disconnect(timeout=5)
diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py
index 27a3aa8fb9..5c463267a1 100755
--- a/test/functional/p2p_sendheaders.py
+++ b/test/functional/p2p_sendheaders.py
@@ -71,19 +71,13 @@ f. Announce 1 more header that builds on that fork.
Expect: no response.
Part 5: Test handling of headers that don't connect.
-a. Repeat 10 times:
+a. Repeat 100 times:
1. Announce a header that doesn't connect.
Expect: getheaders message
2. Send headers chain.
Expect: getdata for the missing blocks, tip update.
-b. Then send 9 more headers that don't connect.
+b. Then send 99 more headers that don't connect.
Expect: getheaders message each time.
-c. Announce a header that does connect.
- Expect: no response.
-d. Announce 49 headers that don't connect.
- Expect: getheaders message each time.
-e. Announce one more that doesn't connect.
- Expect: disconnect.
"""
from test_framework.blocktools import create_block, create_coinbase
from test_framework.messages import CInv
@@ -526,7 +520,8 @@ class SendHeadersTest(BitcoinTestFramework):
# First we test that receipt of an unconnecting header doesn't prevent
# chain sync.
expected_hash = tip
- for i in range(10):
+ NUM_HEADERS = 100
+ for i in range(NUM_HEADERS):
self.log.debug("Part 5.{}: starting...".format(i))
test_node.last_message.pop("getdata", None)
blocks = []
@@ -550,41 +545,24 @@ class SendHeadersTest(BitcoinTestFramework):
blocks = []
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
- MAX_NUM_UNCONNECTING_HEADERS_MSGS = 10
- for _ in range(MAX_NUM_UNCONNECTING_HEADERS_MSGS + 1):
+ for _ in range(NUM_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
block_time += 1
height += 1
- for i in range(1, MAX_NUM_UNCONNECTING_HEADERS_MSGS):
- # Send a header that doesn't connect, check that we get a getheaders.
+ for i in range(1, NUM_HEADERS):
+ with p2p_lock:
+ test_node.last_message.pop("getheaders", None)
+ # Send an empty header as a failed response to the received getheaders
+ # (from the previous iteration). Otherwise, the new headers will be
+ # treated as a response instead of as an announcement.
+ test_node.send_header_for_blocks([])
+ # Send the actual unconnecting header, which should trigger a new getheaders.
test_node.send_header_for_blocks([blocks[i]])
test_node.wait_for_getheaders(block_hash=expected_hash)
- # Next header will connect, should re-set our count:
- test_node.send_header_for_blocks([blocks[0]])
- expected_hash = blocks[0].sha256
-
- # Remove the first two entries (blocks[1] would connect):
- blocks = blocks[2:]
-
- # Now try to see how many unconnecting headers we can send
- # before we get disconnected. Should be 5*MAX_NUM_UNCONNECTING_HEADERS_MSGS
- for i in range(5 * MAX_NUM_UNCONNECTING_HEADERS_MSGS - 1):
- # Send a header that doesn't connect, check that we get a getheaders.
- test_node.send_header_for_blocks([blocks[i % len(blocks)]])
- test_node.wait_for_getheaders(block_hash=expected_hash)
-
- # Eventually this stops working.
- test_node.send_header_for_blocks([blocks[-1]])
-
- # Should get disconnected
- test_node.wait_for_disconnect()
-
- self.log.info("Part 5: success!")
-
# Finally, check that the inv node never received a getdata request,
# throughout the test
assert "getdata" not in inv_node.last_message
diff --git a/test/functional/p2p_unrequested_blocks.py b/test/functional/p2p_unrequested_blocks.py
index f368434895..776eaf5255 100755
--- a/test/functional/p2p_unrequested_blocks.py
+++ b/test/functional/p2p_unrequested_blocks.py
@@ -170,9 +170,11 @@ class AcceptBlockTest(BitcoinTestFramework):
tip = next_block
# Now send the block at height 5 and check that it wasn't accepted (missing header)
- test_node.send_and_ping(msg_block(all_blocks[1]))
+ test_node.send_message(msg_block(all_blocks[1]))
+ test_node.wait_for_disconnect()
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblock, all_blocks[1].hash)
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getblockheader, all_blocks[1].hash)
+ test_node = self.nodes[0].add_p2p_connection(P2PInterface())
# The block at height 5 should be accepted if we provide the missing header, though
headers_message = msg_headers()
diff --git a/test/functional/rpc_createmultisig.py b/test/functional/rpc_createmultisig.py
index fdac3623d3..37656341d2 100755
--- a/test/functional/rpc_createmultisig.py
+++ b/test/functional/rpc_createmultisig.py
@@ -12,6 +12,7 @@ from test_framework.address import address_to_scriptpubkey
from test_framework.descriptors import descsum_create, drop_origins
from test_framework.key import ECPubKey
from test_framework.messages import COIN
+from test_framework.script_util import keys_to_multisig_script
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
assert_raises_rpc_error,
@@ -69,6 +70,16 @@ class RpcCreateMultiSigTest(BitcoinTestFramework):
# Check that bech32m is currently not allowed
assert_raises_rpc_error(-5, "createmultisig cannot create bech32m multisig addresses", self.nodes[0].createmultisig, 2, self.pub, "bech32m")
+ self.log.info('Check correct encoding of multisig script for all n (1..20)')
+ for nkeys in range(1, 20+1):
+ keys = [self.pub[0]]*nkeys
+ expected_ms_script = keys_to_multisig_script(keys, k=nkeys) # simply use n-of-n
+ # note that the 'legacy' address type fails for n values larger than 15
+ # due to exceeding the P2SH size limit (520 bytes), so we use 'bech32' instead
+ # (for the purpose of this encoding test, we don't care about the resulting address)
+ res = self.nodes[0].createmultisig(nrequired=nkeys, keys=keys, address_type='bech32')
+ assert_equal(res['redeemScript'], expected_ms_script.hex())
+
def check_addmultisigaddress_errors(self):
if self.options.descriptors:
return
diff --git a/test/functional/rpc_generate.py b/test/functional/rpc_generate.py
index 20f62079fd..3e250925e7 100755
--- a/test/functional/rpc_generate.py
+++ b/test/functional/rpc_generate.py
@@ -87,7 +87,7 @@ class RPCGenerateTest(BitcoinTestFramework):
txid1 = miniwallet.send_self_transfer(from_node=node)['txid']
utxo1 = miniwallet.get_utxo(txid=txid1)
rawtx2 = miniwallet.create_self_transfer(utxo_to_spend=utxo1)['hex']
- assert_raises_rpc_error(-25, 'TestBlockValidity failed: bad-txns-inputs-missingorspent', self.generateblock, node, address, [rawtx2, txid1])
+ assert_raises_rpc_error(-25, 'testBlockValidity failed: bad-txns-inputs-missingorspent', self.generateblock, node, address, [rawtx2, txid1])
self.log.info('Fail to generate block with txid not in mempool')
missing_txid = '0000000000000000000000000000000000000000000000000000000000000000'
diff --git a/test/functional/rpc_users.py b/test/functional/rpc_users.py
index 66cdd7cf9a..153493fbab 100755
--- a/test/functional/rpc_users.py
+++ b/test/functional/rpc_users.py
@@ -11,12 +11,15 @@ from test_framework.util import (
)
import http.client
+import os
+import platform
import urllib.parse
import subprocess
from random import SystemRandom
import string
import configparser
import sys
+from typing import Optional
def call_with_auth(node, user, password):
@@ -84,6 +87,40 @@ class HTTPBasicsTest(BitcoinTestFramework):
self.log.info('Wrong...')
assert_equal(401, call_with_auth(node, user + 'wrong', password + 'wrong').status)
+ def test_rpccookieperms(self):
+ p = {"owner": 0o600, "group": 0o640, "all": 0o644}
+
+ if platform.system() == 'Windows':
+ self.log.info(f"Skip cookie file permissions checks as OS detected as: {platform.system()=}")
+ return
+
+ self.log.info('Check cookie file permissions can be set using -rpccookieperms')
+
+ cookie_file_path = self.nodes[1].chain_path / '.cookie'
+ PERM_BITS_UMASK = 0o777
+
+ def test_perm(perm: Optional[str]):
+ if not perm:
+ perm = 'owner'
+ self.restart_node(1)
+ else:
+ self.restart_node(1, extra_args=[f"-rpccookieperms={perm}"])
+
+ file_stat = os.stat(cookie_file_path)
+ actual_perms = file_stat.st_mode & PERM_BITS_UMASK
+ expected_perms = p[perm]
+ assert_equal(expected_perms, actual_perms)
+
+ # Remove any leftover rpc{user|password} config options from previous tests
+ self.nodes[1].replace_in_config([("rpcuser", "#rpcuser"), ("rpcpassword", "#rpcpassword")])
+
+ self.log.info('Check default cookie permission')
+ test_perm(None)
+
+ self.log.info('Check custom cookie permissions')
+ for perm in ["owner", "group", "all"]:
+ test_perm(perm)
+
def run_test(self):
self.conf_setup()
self.log.info('Check correctness of the rpcauth config option')
@@ -115,6 +152,8 @@ class HTTPBasicsTest(BitcoinTestFramework):
(self.nodes[0].chain_path / ".cookie.tmp").mkdir()
self.nodes[0].assert_start_raises_init_error(expected_msg=init_error)
+ self.test_rpccookieperms()
+
if __name__ == '__main__':
HTTPBasicsTest().main()
diff --git a/test/functional/test_framework/script_util.py b/test/functional/test_framework/script_util.py
index 62894cc0f4..855f3b8cf5 100755
--- a/test/functional/test_framework/script_util.py
+++ b/test/functional/test_framework/script_util.py
@@ -3,10 +3,13 @@
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Useful Script constants and utils."""
+import unittest
+
from test_framework.script import (
CScript,
- CScriptOp,
OP_0,
+ OP_15,
+ OP_16,
OP_CHECKMULTISIG,
OP_CHECKSIG,
OP_DUP,
@@ -49,10 +52,8 @@ def keys_to_multisig_script(keys, *, k=None):
if k is None: # n-of-n multisig by default
k = n
assert k <= n
- op_k = CScriptOp.encode_op_n(k)
- op_n = CScriptOp.encode_op_n(n)
checked_keys = [check_key(key) for key in keys]
- return CScript([op_k] + checked_keys + [op_n, OP_CHECKMULTISIG])
+ return CScript([k] + checked_keys + [n, OP_CHECKMULTISIG])
def keyhash_to_p2pkh_script(hash):
@@ -125,3 +126,19 @@ def check_script(script):
if isinstance(script, bytes) or isinstance(script, CScript):
return script
assert False
+
+
+class TestFrameworkScriptUtil(unittest.TestCase):
+ def test_multisig(self):
+ fake_pubkey = bytes([0]*33)
+ # check correct encoding of P2MS script with n,k <= 16
+ normal_ms_script = keys_to_multisig_script([fake_pubkey]*16, k=15)
+ self.assertEqual(len(normal_ms_script), 1 + 16*34 + 1 + 1)
+ self.assertTrue(normal_ms_script.startswith(bytes([OP_15])))
+ self.assertTrue(normal_ms_script.endswith(bytes([OP_16, OP_CHECKMULTISIG])))
+
+ # check correct encoding of P2MS script with n,k > 16
+ max_ms_script = keys_to_multisig_script([fake_pubkey]*20, k=19)
+ self.assertEqual(len(max_ms_script), 2 + 20*34 + 2 + 1)
+ self.assertTrue(max_ms_script.startswith(bytes([1, 19]))) # using OP_PUSH1
+ self.assertTrue(max_ms_script.endswith(bytes([1, 20, OP_CHECKMULTISIG])))
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 84e524558f..8475dc5faa 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -15,8 +15,10 @@ For a description of arguments recognized by test scripts, see
import argparse
from collections import deque
import configparser
+import csv
import datetime
import os
+import pathlib
import platform
import time
import shutil
@@ -280,6 +282,7 @@ BASE_SCRIPTS = [
'mempool_packages.py',
'mempool_package_onemore.py',
'mempool_package_limits.py',
+ 'mempool_package_rbf.py',
'feature_versionbits_warning.py',
'rpc_preciousblock.py',
'wallet_importprunedfunds.py --legacy-wallet',
@@ -439,6 +442,7 @@ def main():
parser.add_argument('--filter', help='filter scripts to run by regular expression')
parser.add_argument("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave bitcoinds and test.* datadir on exit or error")
+ parser.add_argument('--resultsfile', '-r', help='store test results (as CSV) to the provided file')
args, unknown_args = parser.parse_known_args()
@@ -471,6 +475,13 @@ def main():
logging.debug("Temporary test directory at %s" % tmpdir)
+ results_filepath = None
+ if args.resultsfile:
+ results_filepath = pathlib.Path(args.resultsfile)
+ # Stop early if the parent directory doesn't exist
+ assert results_filepath.parent.exists(), "Results file parent directory does not exist"
+ logging.debug("Test results will be written to " + str(results_filepath))
+
enable_bitcoind = config["components"].getboolean("ENABLE_BITCOIND")
if not enable_bitcoind:
@@ -557,9 +568,10 @@ def main():
combined_logs_len=args.combinedlogslen,
failfast=args.failfast,
use_term_control=args.ansi,
+ results_filepath=results_filepath,
)
-def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control):
+def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=False, args=None, combined_logs_len=0, failfast=False, use_term_control, results_filepath=None):
args = args or []
# Warn if bitcoind is already running
@@ -651,11 +663,14 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
break
if "[Errno 28] No space left on device" in stdout:
- sys.exit(f"Early exiting after test failure due to insuffient free space in {tmpdir}\n"
+ sys.exit(f"Early exiting after test failure due to insufficient free space in {tmpdir}\n"
f"Test execution data left in {tmpdir}.\n"
f"Additional storage is needed to execute testing.")
- print_results(test_results, max_len_name, (int(time.time() - start_time)))
+ runtime = int(time.time() - start_time)
+ print_results(test_results, max_len_name, runtime)
+ if results_filepath:
+ write_results(test_results, results_filepath, runtime)
if coverage:
coverage_passed = coverage.report_rpc_coverage()
@@ -702,6 +717,17 @@ def print_results(test_results, max_len_name, runtime):
results += "Runtime: %s s\n" % (runtime)
print(results)
+
+def write_results(test_results, filepath, total_runtime):
+ with open(filepath, mode="w", encoding="utf8") as results_file:
+ results_writer = csv.writer(results_file)
+ results_writer.writerow(['test', 'status', 'duration(seconds)'])
+ all_passed = True
+ for test_result in test_results:
+ all_passed = all_passed and test_result.was_successful
+ results_writer.writerow([test_result.name, test_result.status, str(test_result.time)])
+ results_writer.writerow(['ALL', ("Passed" if all_passed else "Failed"), str(total_runtime)])
+
class TestHandler:
"""
Trigger the test scripts passed in via the list.
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index 5b7db55f45..6d45adc823 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -117,6 +117,7 @@ class BumpFeeTest(BitcoinTestFramework):
# Context independent tests
test_feerate_checks_replaced_outputs(self, rbf_node, peer_node)
+ test_bumpfee_with_feerate_ignores_walletincrementalrelayfee(self, rbf_node, peer_node)
def test_invalid_parameters(self, rbf_node, peer_node, dest_address):
self.log.info('Test invalid parameters')
@@ -816,7 +817,7 @@ def test_feerate_checks_replaced_outputs(self, rbf_node, peer_node):
# Since the bumped tx will replace all of the outputs with a single output, we can estimate that its size will 31 * (len(outputs) - 1) bytes smaller
tx_size = tx_details["decoded"]["vsize"]
est_bumped_size = tx_size - (len(tx_details["decoded"]["vout"]) - 1) * 31
- inc_fee_rate = max(rbf_node.getmempoolinfo()["incrementalrelayfee"], Decimal(0.00005000)) # Wallet has a fixed incremental relay fee of 5 sat/vb
+ inc_fee_rate = rbf_node.getmempoolinfo()["incrementalrelayfee"]
# RPC gives us fee as negative
min_fee = (-tx_details["fee"] + get_fee(est_bumped_size, inc_fee_rate)) * Decimal(1e8)
min_fee_rate = (min_fee / est_bumped_size).quantize(Decimal("1.000"))
@@ -830,5 +831,27 @@ def test_feerate_checks_replaced_outputs(self, rbf_node, peer_node):
self.clear_mempool()
+def test_bumpfee_with_feerate_ignores_walletincrementalrelayfee(self, rbf_node, peer_node):
+ self.log.info('Test that bumpfee with fee_rate ignores walletincrementalrelayfee')
+ # Make sure there is enough balance
+ peer_node.sendtoaddress(rbf_node.getnewaddress(), 2)
+ self.generate(peer_node, 1)
+
+ dest_address = peer_node.getnewaddress(address_type="bech32")
+ tx = rbf_node.send(outputs=[{dest_address: 1}], fee_rate=2)
+
+ # Ensure you can not fee bump with a fee_rate below or equal to the original fee_rate
+ assert_raises_rpc_error(-8, "Insufficient total fee", rbf_node.bumpfee, tx["txid"], {"fee_rate": 1})
+ assert_raises_rpc_error(-8, "Insufficient total fee", rbf_node.bumpfee, tx["txid"], {"fee_rate": 2})
+
+ # Ensure you can not fee bump if the fee_rate is more than original fee_rate but the total fee from new fee_rate is
+ # less than (original fee + incrementalrelayfee)
+ assert_raises_rpc_error(-8, "Insufficient total fee", rbf_node.bumpfee, tx["txid"], {"fee_rate": 2.8})
+
+ # You can fee bump as long as the new fee set from fee_rate is atleast (original fee + incrementalrelayfee)
+ rbf_node.bumpfee(tx["txid"], {"fee_rate": 3})
+ self.clear_mempool()
+
+
if __name__ == "__main__":
BumpFeeTest().main()
diff --git a/test/functional/wallet_fundrawtransaction.py b/test/functional/wallet_fundrawtransaction.py
index 71c883f166..3c1b2deb1d 100755
--- a/test/functional/wallet_fundrawtransaction.py
+++ b/test/functional/wallet_fundrawtransaction.py
@@ -114,6 +114,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.test_add_inputs_default_value()
self.test_preset_inputs_selection()
self.test_weight_calculation()
+ self.test_weight_limits()
self.test_change_position()
self.test_simple()
self.test_simple_two_coins()
@@ -1312,6 +1313,38 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[2].unloadwallet("test_weight_calculation")
+ def test_weight_limits(self):
+ self.log.info("Test weight limits")
+
+ self.nodes[2].createwallet("test_weight_limits")
+ wallet = self.nodes[2].get_wallet_rpc("test_weight_limits")
+
+ outputs = []
+ for _ in range(1472):
+ outputs.append({wallet.getnewaddress(address_type="legacy"): 0.1})
+ txid = self.nodes[0].send(outputs=outputs)["txid"]
+ self.generate(self.nodes[0], 1)
+
+ # 272 WU per input (273 when high-s); picking 1471 inputs will exceed the max standard tx weight.
+ rawtx = wallet.createrawtransaction([], [{wallet.getnewaddress(): 0.1 * 1471}])
+
+ # 1) Try to fund transaction only using the preset inputs
+ input_weights = []
+ for i in range(1471):
+ input_weights.append({"txid": txid, "vout": i, "weight": 273})
+ assert_raises_rpc_error(-4, "Transaction too large", wallet.fundrawtransaction, hexstring=rawtx, input_weights=input_weights)
+
+ # 2) Let the wallet fund the transaction
+ assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs",
+ wallet.fundrawtransaction, hexstring=rawtx)
+
+ # 3) Pre-select some inputs and let the wallet fill-up the remaining amount
+ inputs = input_weights[0:1000]
+ assert_raises_rpc_error(-4, "The combination of the pre-selected inputs and the wallet automatic inputs selection exceeds the transaction maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs",
+ wallet.fundrawtransaction, hexstring=rawtx, input_weights=inputs)
+
+ self.nodes[2].unloadwallet("test_weight_limits")
+
def test_include_unsafe(self):
self.log.info("Test fundrawtxn with unsafe inputs")
diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py
index fd586d546e..15214539a9 100755
--- a/test/functional/wallet_listsinceblock.py
+++ b/test/functional/wallet_listsinceblock.py
@@ -40,6 +40,7 @@ class ListSinceBlockTest(BitcoinTestFramework):
self.test_no_blockhash()
self.test_invalid_blockhash()
self.test_reorg()
+ self.test_cant_read_block()
self.test_double_spend()
self.test_double_send()
self.double_spends_filtered()
@@ -167,6 +168,31 @@ class ListSinceBlockTest(BitcoinTestFramework):
found = next(tx for tx in transactions if tx['txid'] == senttx)
assert_equal(found['blockheight'], self.nodes[0].getblockheader(nodes2_first_blockhash)['height'])
+ def test_cant_read_block(self):
+ self.log.info('Test the RPC error "Can\'t read block from disk"')
+
+ # Split network into two
+ self.split_network()
+
+ # generate on both sides
+ nodes1_last_blockhash = self.generate(self.nodes[1], 6, sync_fun=lambda: self.sync_all(self.nodes[:2]))[-1]
+ self.generate(self.nodes[2], 7, sync_fun=lambda: self.sync_all(self.nodes[2:]))[0]
+
+ self.join_network()
+
+ # Renaming the block file to induce unsuccessful block read
+ blk_dat = (self.nodes[0].blocks_path / "blk00000.dat")
+ blk_dat_moved = blk_dat.rename(self.nodes[0].blocks_path / "blk00000.dat.moved")
+ assert not blk_dat.exists()
+
+ # listsinceblock(nodes1_last_blockhash) should now fail as blocks are not accessible
+ assert_raises_rpc_error(-32603, "Can't read block from disk",
+ self.nodes[0].listsinceblock, nodes1_last_blockhash)
+
+ # Restoring block file
+ blk_dat_moved.rename(self.nodes[0].blocks_path / "blk00000.dat")
+ assert blk_dat.exists()
+
def test_double_spend(self):
'''
This tests the case where the same UTXO is spent twice on two separate
diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py
index 0a0a8dba0d..bbb0d658d9 100755
--- a/test/functional/wallet_send.py
+++ b/test/functional/wallet_send.py
@@ -577,5 +577,39 @@ class WalletSendTest(BitcoinTestFramework):
# but rounded to nearest integer, it should be the same as the target fee rate
assert_equal(round(actual_fee_rate_sat_vb), target_fee_rate_sat_vb)
+ # Check tx creation size limits
+ self.test_weight_limits()
+
+ def test_weight_limits(self):
+ self.log.info("Test weight limits")
+
+ self.nodes[1].createwallet("test_weight_limits")
+ wallet = self.nodes[1].get_wallet_rpc("test_weight_limits")
+
+ # Generate future inputs; 272 WU per input (273 when high-s).
+ # Picking 1471 inputs will exceed the max standard tx weight.
+ outputs = []
+ for _ in range(1472):
+ outputs.append({wallet.getnewaddress(address_type="legacy"): 0.1})
+ self.nodes[0].send(outputs=outputs)
+ self.generate(self.nodes[0], 1)
+
+ # 1) Try to fund transaction only using the preset inputs
+ inputs = wallet.listunspent()
+ assert_raises_rpc_error(-4, "Transaction too large",
+ wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}], options={"inputs": inputs, "add_inputs": False})
+
+ # 2) Let the wallet fund the transaction
+ assert_raises_rpc_error(-4, "The inputs size exceeds the maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs",
+ wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}])
+
+ # 3) Pre-select some inputs and let the wallet fill-up the remaining amount
+ inputs = inputs[0:1000]
+ assert_raises_rpc_error(-4, "The combination of the pre-selected inputs and the wallet automatic inputs selection exceeds the transaction maximum weight. Please try sending a smaller amount or manually consolidating your wallet's UTXOs",
+ wallet.send, outputs=[{wallet.getnewaddress(): 0.1 * 1471}], options={"inputs": inputs, "add_inputs": True})
+
+ self.nodes[1].unloadwallet("test_weight_limits")
+
+
if __name__ == '__main__':
WalletSendTest().main()