aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml4
-rw-r--r--.fuzzbuzz.yml16
-rw-r--r--REVIEWERS18
-rw-r--r--build_msvc/README.md12
-rwxr-xr-x[-rw-r--r--]build_msvc/msvc-autogen.py0
-rw-r--r--build_msvc/testconsensus/testconsensus.cpp2
-rwxr-xr-xci/test/00_setup_env.sh6
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_android.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_arm.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_i686_centos.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_mac.sh4
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_mac_host.sh3
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_asan.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_fuzz.sh0
-rwxr-xr-xci/test/00_setup_env_native_fuzz_with_msan.sh24
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_fuzz_with_valgrind.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_msan.sh6
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_multiprocess.sh7
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_nowallet.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_qt5.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_tsan.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_native_valgrind.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_s390x.sh0
-rwxr-xr-x[-rw-r--r--]ci/test/00_setup_env_win64.sh0
-rwxr-xr-xci/test/04_install.sh5
-rw-r--r--configure.ac13
-rwxr-xr-xcontrib/devtools/security-check.py133
-rwxr-xr-xcontrib/devtools/symbol-check.py47
-rwxr-xr-xcontrib/gitian-build.py2
-rw-r--r--[-rwxr-xr-x]contrib/gitian-descriptors/assign_DISTNAME0
-rw-r--r--contrib/gitian-descriptors/gitian-linux.yml3
-rw-r--r--contrib/gitian-descriptors/gitian-osx.yml7
-rw-r--r--contrib/gitian-descriptors/gitian-win.yml3
-rw-r--r--contrib/guix/README.md2
-rwxr-xr-x[-rw-r--r--]contrib/guix/libexec/build.sh2
-rw-r--r--contrib/guix/manifest.scm33
-rw-r--r--contrib/macdeploy/README.md18
-rwxr-xr-x[-rw-r--r--]contrib/qos/tc.sh0
-rwxr-xr-xcontrib/seeds/generate-seeds.py7
-rw-r--r--contrib/seeds/nodes_main.txt10
-rw-r--r--depends/Makefile58
-rw-r--r--depends/builders/darwin.mk8
-rw-r--r--depends/config.site.in8
-rw-r--r--depends/funcs.mk2
-rwxr-xr-xdepends/gen_id74
-rw-r--r--depends/hosts/darwin.mk23
-rw-r--r--depends/hosts/linux.mk2
-rw-r--r--depends/packages/boost.mk2
-rw-r--r--depends/packages/native_cctools.mk9
-rw-r--r--depends/packages/native_clang.mk10
-rw-r--r--depends/packages/native_libtapi.mk4
-rw-r--r--depends/packages/qt.mk3
-rw-r--r--depends/patches/native_cctools/ld64_disable_threading.patch26
-rw-r--r--depends/patches/qt/fix_qpainter_non_determinism.patch63
-rw-r--r--doc/files.md1
-rw-r--r--doc/fuzzing.md12
-rw-r--r--doc/reduce-memory.md10
-rw-r--r--doc/release-notes-20867.md11
-rw-r--r--doc/release-notes/release-notes-0.21.1.md203
-rw-r--r--doc/tor.md4
-rw-r--r--share/examples/bitcoin.conf12
-rw-r--r--src/Makefile.test.include5
-rw-r--r--src/bench/block_assemble.cpp15
-rw-r--r--src/chainparamsseeds.h8
-rw-r--r--src/compressor.cpp2
-rw-r--r--src/dbwrapper.cpp7
-rw-r--r--src/index/base.cpp2
-rw-r--r--src/index/blockfilterindex.cpp12
-rw-r--r--src/index/coinstatsindex.cpp10
-rw-r--r--src/index/txindex.cpp18
-rw-r--r--src/init.cpp47
-rw-r--r--src/key.cpp4
-rw-r--r--src/key.h2
-rw-r--r--src/net.cpp27
-rw-r--r--src/net_processing.cpp21
-rw-r--r--src/net_processing.h6
-rw-r--r--src/node/blockstorage.cpp326
-rw-r--r--src/node/blockstorage.h41
-rw-r--r--src/node/transaction.cpp2
-rw-r--r--src/node/utxo_snapshot.h9
-rw-r--r--src/pubkey.cpp2
-rw-r--r--src/pubkey.h4
-rw-r--r--src/qt/forms/debugwindow.ui6
-rw-r--r--src/qt/test/rpcnestedtests.cpp6
-rw-r--r--src/qt/walletmodel.cpp2
-rw-r--r--src/random.cpp2
-rw-r--r--src/rpc/blockchain.cpp2
-rw-r--r--src/rpc/net.cpp4
-rw-r--r--src/rpc/util.cpp12
-rw-r--r--src/script/descriptor.cpp9
-rw-r--r--src/script/interpreter.cpp4
-rw-r--r--src/script/interpreter.h2
-rw-r--r--src/script/sigcache.cpp4
-rw-r--r--src/script/sign.cpp2
-rw-r--r--src/script/standard.cpp55
-rw-r--r--src/streams.h2
-rw-r--r--src/test/compress_tests.cpp10
-rw-r--r--src/test/crypto_tests.cpp8
-rw-r--r--src/test/data/tx_invalid.json4
-rw-r--r--src/test/descriptor_tests.cpp35
-rwxr-xr-xsrc/test/fuzz/danger_link_all.sh28
-rw-r--r--src/test/fuzz/fuzz.cpp13
-rw-r--r--src/test/fuzz/parse_iso8601.cpp2
-rw-r--r--src/test/fuzz/rpc.cpp32
-rw-r--r--src/test/fuzz/script_assets_test_minimizer.cpp17
-rw-r--r--src/test/fuzz/tx_pool.cpp75
-rw-r--r--src/test/fuzz/util.cpp13
-rw-r--r--src/test/fuzz/util.h23
-rw-r--r--src/test/script_standard_tests.cpp34
-rw-r--r--src/test/script_tests.cpp26
-rw-r--r--src/test/streams_tests.cpp11
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp6
-rw-r--r--src/util/asmap.cpp6
-rw-r--r--src/validation.cpp287
-rw-r--r--src/validation.h35
-rw-r--r--src/wallet/crypter.cpp6
-rw-r--r--src/wallet/rpcdump.cpp8
-rw-r--r--src/wallet/salvage.cpp4
-rw-r--r--src/wallet/scriptpubkeyman.cpp2
-rw-r--r--src/wallet/test/wallet_tests.cpp1
-rw-r--r--src/wallet/wallet.cpp4
-rw-r--r--src/zmq/zmqpublishnotifier.cpp2
-rw-r--r--test/README.md2
-rwxr-xr-xtest/functional/feature_cltv.py40
-rwxr-xr-xtest/functional/feature_config_args.py2
-rwxr-xr-xtest/functional/interface_bitcoin_cli.py2
-rwxr-xr-xtest/functional/p2p_blocksonly.py26
-rwxr-xr-xtest/functional/p2p_invalid_messages.py10
-rwxr-xr-x[-rw-r--r--]test/functional/rpc_addresses_deprecation.py0
-rwxr-xr-xtest/functional/rpc_blockchain.py2
-rwxr-xr-xtest/functional/rpc_dumptxoutset.py2
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py4
-rw-r--r--test/functional/test_framework/wallet.py23
-rwxr-xr-xtest/functional/wallet_importdescriptors.py71
-rwxr-xr-xtest/lint/lint-filenames.sh24
-rwxr-xr-xtest/lint/lint-files.py203
-rwxr-xr-xtest/lint/lint-files.sh7
-rwxr-xr-xtest/lint/lint-locale-dependence.sh2
-rwxr-xr-xtest/lint/lint-shebang.sh24
-rw-r--r--test/sanitizer_suppressions/ubsan3
140 files changed, 1653 insertions, 1100 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index e353101ecc..01e0cebc92 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -112,7 +112,7 @@ task:
FILE_ENV: "./ci/test/00_setup_env_i686_centos.sh"
task:
- name: '[previous releases, uses qt5 dev package and some depends packages] [unsigned char] [bionic]'
+ name: '[previous releases, uses qt5 dev package and some depends packages, DEBUG] [unsigned char] [bionic]'
previous_releases_cache:
folder: "releases"
<< : *GLOBAL_TASK_TEMPLATE
@@ -161,7 +161,7 @@ task:
FILE_ENV: "./ci/test/00_setup_env_native_fuzz.sh"
task:
- name: '[multiprocess] [focal]'
+ name: '[multiprocess, DEBUG] [focal]'
<< : *GLOBAL_TASK_TEMPLATE
container:
image: ubuntu:focal
diff --git a/.fuzzbuzz.yml b/.fuzzbuzz.yml
deleted file mode 100644
index e40b4df165..0000000000
--- a/.fuzzbuzz.yml
+++ /dev/null
@@ -1,16 +0,0 @@
-base: ubuntu:16.04
-language: c++
-engine: libFuzzer
-environment:
- - CXXFLAGS=-fcoverage-mapping -fno-omit-frame-pointer -fprofile-instr-generate -gline-tables-only -O1
-setup:
- - sudo apt-get update
- - sudo apt-get install -y autoconf bsdmainutils clang git libboost-system-dev libboost-filesystem-dev libboost-test-dev libc++1 libc++abi1 libc++abi-dev libc++-dev libclang1 libclang-dev libdb5.3++ libevent-dev libllvm-ocaml-dev libomp5 libomp-dev libqt5core5a libqt5dbus5 libqt5gui5 libtool llvm llvm-dev llvm-runtime pkg-config qttools5-dev qttools5-dev-tools software-properties-common
- - ./autogen.sh
- - CC=clang CXX=clang++ ./configure --enable-fuzz --with-sanitizers=address,fuzzer,undefined --enable-danger-fuzz-link-all
- - make
- - git clone https://github.com/bitcoin-core/qa-assets
-auto_targets:
- find_targets_command: find src/test/fuzz/ -executable -type f ! -name "*.cpp" ! -name "*.h"
- base_corpus_dir: qa-assets/fuzz_seed_corpus/
- memory_limit: none
diff --git a/REVIEWERS b/REVIEWERS
index fa9a8f525f..d4d8939e1c 100644
--- a/REVIEWERS
+++ b/REVIEWERS
@@ -19,6 +19,7 @@
# Maintainers
# @fanquake
+# @hebasto
# @jonasschnelli
# @laanwj
# @marcofalke
@@ -59,11 +60,16 @@
/src/test/fuzz/ @practicalswift
/doc/fuzzing.md @practicalswift
-# Test framework
+# Tests
+/src/test/net_peer_eviction_tests.cpp @jonatack
/test/functional/mempool_updatefromblock.py @hebasto
/test/functional/feature_asmap.py @jonatack
/test/functional/interface_bitcoin_cli.py @jonatack
-/test/functional/tool_wallet.py @jonatack
+
+# Backwards compatibility tests
+*_compatibility.py @sjors
+/test/functional/wallet_upgradewallet.py @sjors @achow101
+/test/get_previous_releases.py @sjors
# Translations
/src/util/translation.h @hebasto
@@ -98,6 +104,11 @@
# Descriptors
*descriptor* @achow101 @sipa
+# External signer
+*external_signer* @sjors
+/doc/external-signer.md @sjors
+*signer.py @sjors
+
# Interfaces
/src/interfaces/ @ryanofsky
@@ -105,8 +116,7 @@
/src/txdb.* @jamesob
/src/dbwrapper.* @jamesob
-# Scripts/Linter
-*.sh @practicalswift
+# Linter
/test/lint/ @practicalswift
/test/lint/lint-shell.sh @hebasto
diff --git a/build_msvc/README.md b/build_msvc/README.md
index 7c1704d30f..88a05644a7 100644
--- a/build_msvc/README.md
+++ b/build_msvc/README.md
@@ -3,7 +3,7 @@ Building Bitcoin Core with Visual Studio
Introduction
---------------------
-Solution and project files to build the Bitcoin Core applications `msbuild` or Visual Studio can be found in the `build_msvc` directory. The build has been tested with Visual Studio 2017 and 2019.
+Solution and project files to build the Bitcoin Core applications `msbuild` or Visual Studio can be found in the `build_msvc` directory. The build has been tested with Visual Studio 2019 (building with earlier versions of Visual Studio should not be expected to work).
Building with Visual Studio is an alternative to the Linux based [cross-compiler build](https://github.com/bitcoin/bitcoin/blob/master/doc/build-windows.md).
@@ -35,7 +35,7 @@ vcpkg integrate install
Qt
---------------------
-In order to build the Bitcoin Core a static build of Qt is required. The runtime library version (e.g. v141, v142) and platform type (x86 or x64) must also match.
+In order to build Bitcoin Core a static build of Qt is required. The runtime library version (e.g. v142) and platform type (x86 or x64) must also match.
Some prebuilt x64 versions of Qt can be downloaded from [here](https://github.com/sipsorcery/qt_win_binary/releases). Please be aware these downloads are NOT officially sanctioned by Bitcoin Core and are provided for developer convenience only. They should NOT be used for builds that will be used in a production environment or with real funds.
@@ -57,19 +57,13 @@ PS >py -3 msvc-autogen.py
- An optional step is to adjust the settings in the `build_msvc` directory and the `common.init.vcxproj` file. This project file contains settings that are common to all projects such as the runtime library version and target Windows SDK version. The Qt directories can also be set.
-- To build from the command line with the Visual Studio 2017 toolchain use:
-
-```
-msbuild /m bitcoin.sln /p:Platform=x64 /p:Configuration=Release /p:PlatformToolset=v141 /t:build
-```
-
- To build from the command line with the Visual Studio 2019 toolchain use:
```
msbuild /m bitcoin.sln /p:Platform=x64 /p:Configuration=Release /t:build
```
-- Alternatively open the `build_msvc/bitcoin.sln` file in Visual Studio.
+- Alternatively, open the `build_msvc/bitcoin.sln` file in Visual Studio 2019.
AppVeyor
---------------------
diff --git a/build_msvc/msvc-autogen.py b/build_msvc/msvc-autogen.py
index d99b17d381..d99b17d381 100644..100755
--- a/build_msvc/msvc-autogen.py
+++ b/build_msvc/msvc-autogen.py
diff --git a/build_msvc/testconsensus/testconsensus.cpp b/build_msvc/testconsensus/testconsensus.cpp
index 115c92792d..f3c8517130 100644
--- a/build_msvc/testconsensus/testconsensus.cpp
+++ b/build_msvc/testconsensus/testconsensus.cpp
@@ -45,7 +45,7 @@ int main()
stream << vanillaSpendTx;
bitcoinconsensus_error err;
- auto op0Result = bitcoinconsensus_verify_script_with_amount(pubKeyScript.data(), pubKeyScript.size(), amount, (const unsigned char*)&stream[0], stream.size(), 0, bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL, &err);
+ auto op0Result = bitcoinconsensus_verify_script_with_amount(pubKeyScript.data(), pubKeyScript.size(), amount, stream.data(), stream.size(), 0, bitcoinconsensus_SCRIPT_FLAGS_VERIFY_ALL, &err);
std::cout << "Op0 result: " << op0Result << ", error code " << err << std::endl;
getchar();
diff --git a/ci/test/00_setup_env.sh b/ci/test/00_setup_env.sh
index e6aec723bc..fa4d0410fa 100755
--- a/ci/test/00_setup_env.sh
+++ b/ci/test/00_setup_env.sh
@@ -14,6 +14,9 @@ export BASE_ROOT_DIR
# The depends dir.
# This folder exists on the ci host and ci guest. Changes are propagated back and forth.
export DEPENDS_DIR=${DEPENDS_DIR:-$BASE_ROOT_DIR/depends}
+# A folder for the ci system to put temporary files (ccache, datadirs for tests, ...)
+# This folder only exists on the ci host.
+export BASE_SCRATCH_DIR=${BASE_SCRATCH_DIR:-$BASE_ROOT_DIR/ci/scratch}
echo "Setting specific values in env"
if [ -n "${FILE_ENV}" ]; then
@@ -25,9 +28,6 @@ fi
echo "Fallback to default values in env (if not yet set)"
# The number of parallel jobs to pass down to make and test_runner.py
export MAKEJOBS=${MAKEJOBS:--j4}
-# A folder for the ci system to put temporary files (ccache, datadirs for tests, ...)
-# This folder only exists on the ci host.
-export BASE_SCRATCH_DIR=${BASE_SCRATCH_DIR:-$BASE_ROOT_DIR/ci/scratch}
# What host to compile for. See also ./depends/README.md
# Tests that need cross-compilation export the appropriate HOST.
# Tests that run natively guess the host
diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh
index f78a84eeac..f78a84eeac 100644..100755
--- a/ci/test/00_setup_env_android.sh
+++ b/ci/test/00_setup_env_android.sh
diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh
index 07f099b85c..07f099b85c 100644..100755
--- a/ci/test/00_setup_env_arm.sh
+++ b/ci/test/00_setup_env_arm.sh
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index 05c724fc0b..05c724fc0b 100644..100755
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index f051318a58..196394e908 100644..100755
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -10,8 +10,8 @@ export CONTAINER_NAME=ci_macos_cross
export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos (Focal is used in the gitian build as well)
export HOST=x86_64-apple-darwin18
export PACKAGES="cmake imagemagick librsvg2-bin libz-dev libtiff-tools libtinfo5 python3-setuptools xorriso"
-export XCODE_VERSION=11.3.1
-export XCODE_BUILD_ID=11C505
+export XCODE_VERSION=12.1
+export XCODE_BUILD_ID=12A7403
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh
index e54e78add4..898c1530a1 100644..100755
--- a/ci/test/00_setup_env_mac_host.sh
+++ b/ci/test/00_setup_env_mac_host.sh
@@ -7,12 +7,11 @@
export LC_ALL=C.UTF-8
export HOST=x86_64-apple-darwin18
-export PIP_PACKAGES="zmq"
+export PIP_PACKAGES="zmq lief"
export GOAL="install"
export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-external-signer"
export CI_OS_NAME="macos"
export NO_DEPENDS=1
export OSX_SDK=""
export CCACHE_SIZE=300M
-
export RUN_SECURITY_TESTS="true"
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index 92af98aa9b..92af98aa9b 100644..100755
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index bedd0cf9aa..bedd0cf9aa 100644..100755
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
diff --git a/ci/test/00_setup_env_native_fuzz_with_msan.sh b/ci/test/00_setup_env_native_fuzz_with_msan.sh
new file mode 100755
index 0000000000..d5b28ca5cf
--- /dev/null
+++ b/ci/test/00_setup_env_native_fuzz_with_msan.sh
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+#
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+export LC_ALL=C.UTF-8
+
+export DOCKER_NAME_TAG="ubuntu:20.04"
+LIBCXX_DIR="${BASE_SCRATCH_DIR}/msan/build/"
+export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls"
+LIBCXX_FLAGS="-nostdinc++ -stdlib=libc++ -L${LIBCXX_DIR}lib -lc++abi -I${LIBCXX_DIR}include -I${LIBCXX_DIR}include/c++/v1 -lpthread -Wl,-rpath,${LIBCXX_DIR}lib -Wno-unused-command-line-argument"
+export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
+
+export CONTAINER_NAME="ci_native_msan"
+export PACKAGES="clang-9 llvm-9 cmake"
+export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' boost_cxxflags='-std=c++17 -fvisibility=hidden -fPIC ${MSAN_AND_LIBCXX_FLAGS}' libevent_cflags='${MSAN_FLAGS}' zeromq_cxxflags='-std=c++17 ${MSAN_AND_LIBCXX_FLAGS}'"
+export GOAL="install"
+export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,memory --with-asm=no --prefix=${DEPENDS_DIR}/x86_64-pc-linux-gnu/ CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}'"
+export USE_MEMORY_SANITIZER="true"
+export RUN_UNIT_TESTS="false"
+export RUN_FUNCTIONAL_TESTS="false"
+export RUN_FUZZ_TESTS=true
+export CCACHE_SIZE=250M
diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
index 2cf672b91e..2cf672b91e 100644..100755
--- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
+++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh
diff --git a/ci/test/00_setup_env_native_msan.sh b/ci/test/00_setup_env_native_msan.sh
index 3ce50f816f..7bcf9f23a2 100644..100755
--- a/ci/test/00_setup_env_native_msan.sh
+++ b/ci/test/00_setup_env_native_msan.sh
@@ -7,7 +7,7 @@
export LC_ALL=C.UTF-8
export DOCKER_NAME_TAG="ubuntu:20.04"
-LIBCXX_DIR="${BASE_ROOT_DIR}/ci/scratch/msan/build/"
+LIBCXX_DIR="${BASE_SCRATCH_DIR}/msan/build/"
export MSAN_FLAGS="-fsanitize=memory -fsanitize-memory-track-origins=2 -fno-omit-frame-pointer -g -O1 -fno-optimize-sibling-calls"
LIBCXX_FLAGS="-nostdinc++ -stdlib=libc++ -L${LIBCXX_DIR}lib -lc++abi -I${LIBCXX_DIR}include -I${LIBCXX_DIR}include/c++/v1 -lpthread -Wl,-rpath,${LIBCXX_DIR}lib -Wno-unused-command-line-argument"
export MSAN_AND_LIBCXX_FLAGS="${MSAN_FLAGS} ${LIBCXX_FLAGS}"
@@ -15,9 +15,9 @@ export BDB_PREFIX="${BASE_ROOT_DIR}/db4"
export CONTAINER_NAME="ci_native_msan"
export PACKAGES="clang-9 llvm-9 cmake"
-export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' boost_cxxflags='-std=c++17 -fvisibility=hidden -fPIC ${MSAN_AND_LIBCXX_FLAGS}' zeromq_cxxflags='-std=c++17 ${MSAN_AND_LIBCXX_FLAGS}'"
+export DEP_OPTS="NO_BDB=1 NO_QT=1 CC='clang' CXX='clang++' CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' boost_cxxflags='-std=c++17 -fvisibility=hidden -fPIC ${MSAN_AND_LIBCXX_FLAGS}' libevent_cflags='${MSAN_FLAGS}' sqlite_cflags='${MSAN_FLAGS}' zeromq_cxxflags='-std=c++17 ${MSAN_AND_LIBCXX_FLAGS}'"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-wallet --with-sanitizers=memory --with-asm=no --prefix=${BASE_ROOT_DIR}/depends/x86_64-pc-linux-gnu/ CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' BDB_LIBS='-L${BDB_PREFIX}/lib -ldb_cxx-4.8' BDB_CFLAGS='-I${BDB_PREFIX}/include'"
+export BITCOIN_CONFIG="--enable-wallet --with-sanitizers=memory --with-asm=no --prefix=${DEPENDS_DIR}/x86_64-pc-linux-gnu/ CC=clang CXX=clang++ CFLAGS='${MSAN_FLAGS}' CXXFLAGS='${MSAN_AND_LIBCXX_FLAGS}' BDB_LIBS='-L${BDB_PREFIX}/lib -ldb_cxx-4.8' BDB_CFLAGS='-I${BDB_PREFIX}/include'"
export USE_MEMORY_SANITIZER="true"
export RUN_FUNCTIONAL_TESTS="false"
export CCACHE_SIZE=250M
diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh
index 26a3996ce2..37d714400b 100644..100755
--- a/ci/test/00_setup_env_native_multiprocess.sh
+++ b/ci/test/00_setup_env_native_multiprocess.sh
@@ -8,9 +8,10 @@ export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_native_multiprocess
export DOCKER_NAME_TAG=ubuntu:20.04
-export PACKAGES="cmake python3"
-export DEP_OPTS="MULTIPROCESS=1"
+export PACKAGES="cmake python3 python3-pip llvm clang"
+export DEP_OPTS="DEBUG=1 MULTIPROCESS=1"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-external-signer"
+export BITCOIN_CONFIG="--enable-external-signer --enable-debug CC=clang CXX=clang++" # Use clang to avoid OOM
export TEST_RUNNER_ENV="BITCOIND=bitcoin-node"
export RUN_SECURITY_TESTS="true"
+export PIP_PACKAGES="lief"
diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh
index a496b5af6e..a496b5af6e 100644..100755
--- a/ci/test/00_setup_env_native_nowallet.sh
+++ b/ci/test/00_setup_env_native_nowallet.sh
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 61948ab221..61948ab221 100644..100755
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh
index 33f63fa9ba..33f63fa9ba 100644..100755
--- a/ci/test/00_setup_env_native_tsan.sh
+++ b/ci/test/00_setup_env_native_tsan.sh
diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh
index e079a7057c..e079a7057c 100644..100755
--- a/ci/test/00_setup_env_native_valgrind.sh
+++ b/ci/test/00_setup_env_native_valgrind.sh
diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh
index 88b431f3c7..88b431f3c7 100644..100755
--- a/ci/test/00_setup_env_s390x.sh
+++ b/ci/test/00_setup_env_s390x.sh
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index 4d5bde13fd..4d5bde13fd 100644..100755
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
diff --git a/ci/test/04_install.sh b/ci/test/04_install.sh
index 608acfc2cf..01dbfe221b 100755
--- a/ci/test/04_install.sh
+++ b/ci/test/04_install.sh
@@ -67,6 +67,9 @@ if [[ $DOCKER_NAME_TAG == centos* ]]; then
elif [ "$CI_USE_APT_INSTALL" != "no" ]; then
${CI_RETRY_EXE} DOCKER_EXEC apt-get update
${CI_RETRY_EXE} DOCKER_EXEC apt-get install --no-install-recommends --no-upgrade -y $PACKAGES $DOCKER_PACKAGES
+ if [ -n "$PIP_PACKAGES" ]; then
+ ${CI_RETRY_EXE} pip3 install --user $PIP_PACKAGES
+ fi
fi
if [ "$CI_OS_NAME" == "macos" ]; then
@@ -92,7 +95,7 @@ if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then
DOCKER_EXEC "update-alternatives --install /usr/bin/clang++ clang++ \$(which clang++-9) 100"
DOCKER_EXEC "update-alternatives --install /usr/bin/clang clang \$(which clang-9) 100"
DOCKER_EXEC "mkdir -p ${BASE_SCRATCH_DIR}/msan/build/"
- DOCKER_EXEC "git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-10.0.0 ${BASE_SCRATCH_DIR}/msan/llvm-project"
+ DOCKER_EXEC "git clone --depth=1 https://github.com/llvm/llvm-project -b llvmorg-12.0.0 ${BASE_SCRATCH_DIR}/msan/llvm-project"
DOCKER_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && cmake -DLLVM_ENABLE_PROJECTS='libcxx;libcxxabi' -DCMAKE_BUILD_TYPE=Release -DLLVM_USE_SANITIZER=Memory -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -DLLVM_TARGETS_TO_BUILD=X86 ../llvm-project/llvm/"
DOCKER_EXEC "cd ${BASE_SCRATCH_DIR}/msan/build/ && make $MAKEJOBS cxx"
fi
diff --git a/configure.ac b/configure.ac
index d1707dfd64..2bc404250d 100644
--- a/configure.ac
+++ b/configure.ac
@@ -194,12 +194,6 @@ AC_ARG_ENABLE([fuzz-binary],
[enable_fuzz_binary=$enableval],
[enable_fuzz_binary=yes])
-AC_ARG_ENABLE([danger_fuzz_link_all],
- AS_HELP_STRING([--enable-danger-fuzz-link-all],
- [Danger! Modifies source code. Needs git and gnu sed installed. Link each fuzz target (default no).]),
- [enable_danger_fuzz_link_all=$enableval],
- [enable_danger_fuzz_link_all=no])
-
AC_ARG_WITH([qrencode],
[AS_HELP_STRING([--with-qrencode],
[enable QR code support (default is yes if qt is enabled and libqrencode is found)])],
@@ -1326,9 +1320,11 @@ fi
if test x$enable_wallet != xno; then
dnl Check for libdb_cxx only if wallet enabled
- BITCOIN_FIND_BDB48
- if test x$suppress_external_warnings != xno ; then
+ if test "x$use_bdb" != "xno"; then
+ BITCOIN_FIND_BDB48
+ if test x$suppress_external_warnings != xno ; then
BDB_CPPFLAGS=SUPPRESS_WARNINGS($BDB_CPPFLAGS)
+ fi
fi
dnl Check for sqlite3
@@ -1788,7 +1784,6 @@ AM_CONDITIONAL([ENABLE_TRACING],[test x$have_sdt = xyes])
AM_CONDITIONAL([ENABLE_TESTS],[test x$BUILD_TEST = xyes])
AM_CONDITIONAL([ENABLE_FUZZ],[test x$enable_fuzz = xyes])
AM_CONDITIONAL([ENABLE_FUZZ_BINARY],[test x$enable_fuzz_binary = xyes])
-AM_CONDITIONAL([ENABLE_FUZZ_LINK_ALL],[test x$enable_danger_fuzz_link_all = xyes])
AM_CONDITIONAL([ENABLE_QT],[test x$bitcoin_enable_qt = xyes])
AM_CONDITIONAL([ENABLE_QT_TESTS],[test x$BUILD_TEST_QT = xyes])
AM_CONDITIONAL([ENABLE_BENCH],[test x$use_bench = xyes])
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 7b09c42fde..b6628c2ad5 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -6,22 +6,13 @@
Perform basic security checks on a series of executables.
Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
-Needs `objdump` (for PE) and `otool` (for MACHO).
'''
-import subprocess
import sys
-import os
from typing import List, Optional
+import lief
import pixie
-OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
-OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
-
-def run_command(command) -> str:
- p = subprocess.run(command, stdout=subprocess.PIPE, check=True, universal_newlines=True)
- return p.stdout
-
def check_ELF_PIE(executable) -> bool:
'''
Check for position independent executable (PIE), allowing for address space randomization.
@@ -143,112 +134,59 @@ def check_ELF_separate_code(executable):
return False
return True
-def get_PE_dll_characteristics(executable) -> int:
- '''Get PE DllCharacteristics bits'''
- stdout = run_command([OBJDUMP_CMD, '-x', executable])
-
- bits = 0
- for line in stdout.splitlines():
- tokens = line.split()
- if len(tokens)>=2 and tokens[0] == 'DllCharacteristics':
- bits = int(tokens[1],16)
- return bits
-
-IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA = 0x0020
-IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE = 0x0040
-IMAGE_DLL_CHARACTERISTICS_NX_COMPAT = 0x0100
-
def check_PE_DYNAMIC_BASE(executable) -> bool:
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
- bits = get_PE_dll_characteristics(executable)
- return (bits & IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE) == IMAGE_DLL_CHARACTERISTICS_DYNAMIC_BASE
+ binary = lief.parse(executable)
+ return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists
# Must support high-entropy 64-bit address space layout randomization
# in addition to DYNAMIC_BASE to have secure ASLR.
def check_PE_HIGH_ENTROPY_VA(executable) -> bool:
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
- bits = get_PE_dll_characteristics(executable)
- return (bits & IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA) == IMAGE_DLL_CHARACTERISTICS_HIGH_ENTROPY_VA
+ binary = lief.parse(executable)
+ return lief.PE.DLL_CHARACTERISTICS.HIGH_ENTROPY_VA in binary.optional_header.dll_characteristics_lists
def check_PE_RELOC_SECTION(executable) -> bool:
'''Check for a reloc section. This is required for functional ASLR.'''
- stdout = run_command([OBJDUMP_CMD, '-h', executable])
-
- for line in stdout.splitlines():
- if '.reloc' in line:
- return True
- return False
-
-def check_PE_NX(executable) -> bool:
- '''NX: DllCharacteristics bit 0x100 signifies nxcompat (DEP)'''
- bits = get_PE_dll_characteristics(executable)
- return (bits & IMAGE_DLL_CHARACTERISTICS_NX_COMPAT) == IMAGE_DLL_CHARACTERISTICS_NX_COMPAT
-
-def get_MACHO_executable_flags(executable) -> List[str]:
- stdout = run_command([OTOOL_CMD, '-vh', executable])
-
- flags: List[str] = []
- for line in stdout.splitlines():
- tokens = line.split()
- # filter first two header lines
- if 'magic' in tokens or 'Mach' in tokens:
- continue
- # filter ncmds and sizeofcmds values
- flags += [t for t in tokens if not t.isdigit()]
- return flags
-
-def check_MACHO_PIE(executable) -> bool:
- '''
- Check for position independent executable (PIE), allowing for address space randomization.
- '''
- flags = get_MACHO_executable_flags(executable)
- if 'PIE' in flags:
- return True
- return False
+ binary = lief.parse(executable)
+ return binary.has_relocations
def check_MACHO_NOUNDEFS(executable) -> bool:
'''
Check for no undefined references.
'''
- flags = get_MACHO_executable_flags(executable)
- if 'NOUNDEFS' in flags:
- return True
- return False
-
-def check_MACHO_NX(executable) -> bool:
- '''
- Check for no stack execution
- '''
- flags = get_MACHO_executable_flags(executable)
- if 'ALLOW_STACK_EXECUTION' in flags:
- return False
- return True
+ binary = lief.parse(executable)
+ return binary.header.has(lief.MachO.HEADER_FLAGS.NOUNDEFS)
def check_MACHO_LAZY_BINDINGS(executable) -> bool:
'''
Check for no lazy bindings.
We don't use or check for MH_BINDATLOAD. See #18295.
'''
- stdout = run_command([OTOOL_CMD, '-l', executable])
-
- for line in stdout.splitlines():
- tokens = line.split()
- if 'lazy_bind_off' in tokens or 'lazy_bind_size' in tokens:
- if tokens[1] != '0':
- return False
- return True
+ binary = lief.parse(executable)
+ return binary.dyld_info.lazy_bind == (0,0)
def check_MACHO_Canary(executable) -> bool:
'''
Check for use of stack canary
'''
- stdout = run_command([OTOOL_CMD, '-Iv', executable])
+ binary = lief.parse(executable)
+ return binary.has_symbol('___stack_chk_fail')
- ok = False
- for line in stdout.splitlines():
- if '___stack_chk_fail' in line:
- ok = True
- return ok
+def check_PIE(executable) -> bool:
+ '''
+ Check for position independent executable (PIE),
+ allowing for address space randomization.
+ '''
+ binary = lief.parse(executable)
+ return binary.is_pie
+
+def check_NX(executable) -> bool:
+ '''
+ Check for no stack execution
+ '''
+ binary = lief.parse(executable)
+ return binary.has_nx
CHECKS = {
'ELF': [
@@ -259,15 +197,16 @@ CHECKS = {
('separate_code', check_ELF_separate_code),
],
'PE': [
+ ('PIE', check_PIE),
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
- ('NX', check_PE_NX),
+ ('NX', check_NX),
('RELOC_SECTION', check_PE_RELOC_SECTION)
],
'MACHO': [
- ('PIE', check_MACHO_PIE),
+ ('PIE', check_PIE),
('NOUNDEFS', check_MACHO_NOUNDEFS),
- ('NX', check_MACHO_NX),
+ ('NX', check_NX),
('LAZY_BINDINGS', check_MACHO_LAZY_BINDINGS),
('Canary', check_MACHO_Canary)
]
@@ -285,24 +224,24 @@ def identify_executable(executable) -> Optional[str]:
return None
if __name__ == '__main__':
- retval = 0
+ retval: int = 0
for filename in sys.argv[1:]:
try:
etype = identify_executable(filename)
if etype is None:
- print('%s: unknown format' % filename)
+ print(f'{filename}: unknown format')
retval = 1
continue
- failed = []
+ failed: List[str] = []
for (name, func) in CHECKS[etype]:
if not func(filename):
failed.append(name)
if failed:
- print('%s: failed %s' % (filename, ' '.join(failed)))
+ print(f'{filename}: failed {" ".join(failed)}')
retval = 1
except IOError:
- print('%s: cannot open' % filename)
+ print(f'{filename}: cannot open')
retval = 1
sys.exit(retval)
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 436f179d61..d740a94560 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -15,6 +15,7 @@ import sys
import os
from typing import List, Optional
+import lief
import pixie
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
@@ -52,8 +53,6 @@ IGNORE_EXPORTS = {
'environ', '_environ', '__environ',
}
CPPFILT_CMD = os.getenv('CPPFILT', '/usr/bin/c++filt')
-OBJDUMP_CMD = os.getenv('OBJDUMP', '/usr/bin/objdump')
-OTOOL_CMD = os.getenv('OTOOL', '/usr/bin/otool')
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
@@ -203,44 +202,22 @@ def check_ELF_libraries(filename) -> bool:
ok = False
return ok
-def macho_read_libraries(filename) -> List[str]:
- p = subprocess.Popen([OTOOL_CMD, '-L', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
- (stdout, stderr) = p.communicate()
- if p.returncode:
- raise IOError('Error opening file')
- libraries = []
- for line in stdout.splitlines():
- tokens = line.split()
- if len(tokens) == 1: # skip executable name
- continue
- libraries.append(tokens[0].split('/')[-1])
- return libraries
-
def check_MACHO_libraries(filename) -> bool:
ok: bool = True
- for dylib in macho_read_libraries(filename):
- if dylib not in MACHO_ALLOWED_LIBRARIES:
- print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
+ binary = lief.parse(filename)
+ for dylib in binary.libraries:
+ split = dylib.name.split('/')
+ if split[-1] not in MACHO_ALLOWED_LIBRARIES:
+ print(f'{split[-1]} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
-def pe_read_libraries(filename) -> List[str]:
- p = subprocess.Popen([OBJDUMP_CMD, '-x', filename], stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE, universal_newlines=True)
- (stdout, stderr) = p.communicate()
- if p.returncode:
- raise IOError('Error opening file')
- libraries = []
- for line in stdout.splitlines():
- if 'DLL Name:' in line:
- tokens = line.split(': ')
- libraries.append(tokens[1])
- return libraries
-
def check_PE_libraries(filename) -> bool:
ok: bool = True
- for dylib in pe_read_libraries(filename):
+ binary = lief.parse(filename)
+ for dylib in binary.libraries:
if dylib not in PE_ALLOWED_LIBRARIES:
- print('{} is not in ALLOWED_LIBRARIES!'.format(dylib))
+ print(f'{dylib} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
@@ -275,7 +252,7 @@ if __name__ == '__main__':
try:
etype = identify_executable(filename)
if etype is None:
- print('{}: unknown format'.format(filename))
+ print(f'{filename}: unknown format')
retval = 1
continue
@@ -284,9 +261,9 @@ if __name__ == '__main__':
if not func(filename):
failed.append(name)
if failed:
- print('{}: failed {}'.format(filename, ' '.join(failed)))
+ print(f'{filename}: failed {" ".join(failed)}')
retval = 1
except IOError:
- print('{}: cannot open'.format(filename))
+ print(f'{filename}: cannot open')
retval = 1
sys.exit(retval)
diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py
index 60acb0d593..5df87d9e70 100755
--- a/contrib/gitian-build.py
+++ b/contrib/gitian-build.py
@@ -210,7 +210,7 @@ def main():
args.macos = 'm' in args.os
# Disable for MacOS if no SDK found
- if args.macos and not os.path.isfile('gitian-builder/inputs/Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers.tar.gz'):
+ if args.macos and not os.path.isfile('gitian-builder/inputs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz'):
print('Cannot build for MacOS, SDK does not exist. Will build for other OSes')
args.macos = False
diff --git a/contrib/gitian-descriptors/assign_DISTNAME b/contrib/gitian-descriptors/assign_DISTNAME
index 330fbc041b..330fbc041b 100755..100644
--- a/contrib/gitian-descriptors/assign_DISTNAME
+++ b/contrib/gitian-descriptors/assign_DISTNAME
diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml
index 9b5f5d7e07..103e249e33 100644
--- a/contrib/gitian-descriptors/gitian-linux.yml
+++ b/contrib/gitian-descriptors/gitian-linux.yml
@@ -23,6 +23,7 @@ packages:
- "patch"
- "pkg-config"
- "python3"
+- "python3-pip"
# Cross compilation HOSTS:
# - arm-linux-gnueabihf
- "binutils-arm-linux-gnueabihf"
@@ -99,6 +100,8 @@ script: |
done
}
+ pip3 install lief==0.11.4
+
# Faketime for depends so intermediate results are comparable
export PATH_orig=${PATH}
create_global_faketime_wrappers "2000-01-01 12:00:00"
diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml
index c1bd545501..d6c41b2c43 100644
--- a/contrib/gitian-descriptors/gitian-osx.yml
+++ b/contrib/gitian-descriptors/gitian-osx.yml
@@ -23,6 +23,7 @@ packages:
- "imagemagick"
- "libz-dev"
- "python3"
+- "python3-pip"
- "python3-setuptools"
- "fonts-tuffy"
- "xorriso"
@@ -31,7 +32,7 @@ remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
files:
-- "Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers.tar.gz"
+- "Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz"
script: |
set -e -o pipefail
@@ -78,6 +79,8 @@ script: |
done
}
+ pip3 install lief==0.11.4
+
# Faketime for depends so intermediate results are comparable
export PATH_orig=${PATH}
create_global_faketime_wrappers "2000-01-01 12:00:00"
@@ -88,7 +91,7 @@ script: |
BASEPREFIX="${PWD}/depends"
mkdir -p ${BASEPREFIX}/SDKs
- tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers.tar.gz
+ tar -C ${BASEPREFIX}/SDKs -xf ${BUILD_DIR}/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz
# Build dependencies for each host
for i in $HOSTS; do
diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml
index dbf1e8cc67..eabcdaa79d 100644
--- a/contrib/gitian-descriptors/gitian-win.yml
+++ b/contrib/gitian-descriptors/gitian-win.yml
@@ -22,6 +22,7 @@ packages:
- "zip"
- "ca-certificates"
- "python3"
+- "python3-pip"
remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
@@ -86,6 +87,8 @@ script: |
done
}
+ pip3 install lief==0.11.4
+
# Faketime for depends so intermediate results are comparable
export PATH_orig=${PATH}
create_global_faketime_wrappers "2000-01-01 12:00:00"
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index dad7de32c4..e604b370e3 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -167,7 +167,7 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum
Set the path where _extracted_ SDKs can be found. This is passed through to
the depends tree. Note that this is should be set to the _parent_ directory of
the actual SDK (e.g. SDK_PATH=$HOME/Downloads/macOS-SDKs instead of
- $HOME/Downloads/macOS-SDKs/Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers).
+ $HOME/Downloads/macOS-SDKs/Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers).
* _**JOBS**_
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index 6ea32329ba..e95617e2e2 100644..100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -255,7 +255,7 @@ case "$HOST" in
esac
case "$HOST" in
- powerpc64-linux-*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,-z,noexecstack" ;;
+ powerpc64-linux-*|riscv64-linux-*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,-z,noexecstack" ;;
esac
# Make $HOST-specific native binaries from depends available in $PATH
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 910a9dd6f6..f98f2b9422 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -27,9 +27,11 @@
(gnu packages version-control)
(guix build-system font)
(guix build-system gnu)
+ (guix build-system python)
(guix build-system trivial)
(guix download)
(guix gexp)
+ (guix git-download)
((guix licenses) #:prefix license:)
(guix packages)
(guix profiles)
@@ -129,7 +131,7 @@ chain for " target " development."))
(base-gcc-for-libc gcc-7)
(base-kernel-headers linux-libre-headers-5.4)
(base-libc glibc) ; glibc 2.31
- (base-gcc (make-gcc-rpath-link gcc-9)))
+ (base-gcc (make-gcc-rpath-link gcc-8)))
"Convenience wrapper around MAKE-CROSS-TOOLCHAIN with default values
desirable for building Bitcoin Core release binaries."
(make-cross-toolchain target
@@ -147,7 +149,7 @@ desirable for building Bitcoin Core release binaries."
(pthreads-xlibc mingw-w64-x86_64-winpthreads)
(pthreads-xgcc (make-gcc-with-pthreads
(cross-gcc target
- #:xgcc (make-ssp-fixed-gcc gcc-9)
+ #:xgcc (make-ssp-fixed-gcc gcc-8)
#:xbinutils xbinutils
#:libc pthreads-xlibc))))
;; Define a meta-package that propagates the resulting XBINUTILS, XLIBC, and
@@ -192,6 +194,29 @@ chain for " target " development."))
"Thatcher Ulrich's first outline font design. He started with the goal of producing a neutral, readable sans-serif text font. There are lots of \"expressive\" fonts out there, but he wanted to start with something very plain and clean, something he might want to actually use. ")
(license license:public-domain)))
+(define-public lief
+ (package
+ (name "python-lief")
+ (version "0.11.4")
+ (source
+ (origin
+ (method git-fetch)
+ (uri (git-reference
+ (url "https://github.com/lief-project/LIEF.git")
+ (commit version)))
+ (file-name (git-file-name name version))
+ (sha256
+ (base32
+ "0h4kcwr9z478almjqhmils8imfpflzk0r7d05g4xbkdyknn162qf"))))
+ (build-system python-build-system)
+ (native-inputs
+ `(("cmake" ,cmake)))
+ (home-page "https://github.com/lief-project/LIEF")
+ (synopsis "Library to Instrument Executable Formats")
+ (description "Python library to to provide a cross platform library which can
+parse, modify and abstract ELF, PE and MachO formats.")
+ (license license:asl2.0)))
+
(packages->manifest
(append
(list ;; The Basics
@@ -227,6 +252,8 @@ chain for " target " development."))
python-3
;; Git
git
+ ;; Tests
+ lief
;; Native gcc 7 toolchain
gcc-toolchain-7
(list gcc-toolchain-7 "static"))
@@ -239,5 +266,5 @@ chain for " target " development."))
((string-contains target "-linux-")
(list (make-bitcoin-cross-toolchain target)))
((string-contains target "darwin")
- (list clang-toolchain-8 binutils imagemagick libtiff librsvg font-tuffy cmake xorriso))
+ (list clang-toolchain-10 binutils imagemagick libtiff librsvg font-tuffy cmake xorriso))
(else '())))))
diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md
index 2d9a4a2153..21f6ba2eb3 100644
--- a/contrib/macdeploy/README.md
+++ b/contrib/macdeploy/README.md
@@ -13,9 +13,9 @@ When complete, it will have produced `Bitcoin-Core.dmg`.
### Step 1: Obtaining `Xcode.app`
Our current macOS SDK
-(`Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers.tar.gz`) can be
+(`Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`) can be
extracted from
-[Xcode_11.3.1.xip](https://download.developer.apple.com/Developer_Tools/Xcode_11.3.1/Xcode_11.3.1.xip).
+[Xcode_12.1.xip](https://download.developer.apple.com/Developer_Tools/Xcode_12.1/Xcode_12.1.xip).
An Apple ID is needed to download this.
After Xcode version 7.x, Apple started shipping the `Xcode.app` in a `.xip`
@@ -27,25 +27,25 @@ approach (tested on Debian Buster) is outlined below:
apt install cpio
git clone https://github.com/bitcoin-core/apple-sdk-tools.git
-# Unpack Xcode_11.3.1.xip and place the resulting Xcode.app in your current
+# Unpack Xcode_12.1.xip and place the resulting Xcode.app in your current
# working directory
-python3 apple-sdk-tools/extract_xcode.py -f Xcode_11.3.1.xip | cpio -d -i
+python3 apple-sdk-tools/extract_xcode.py -f Xcode_12.1.xip | cpio -d -i
```
On macOS the process is more straightforward:
```bash
-xip -x Xcode_11.3.1.xip
+xip -x Xcode_12.1.xip
```
-### Step 2: Generating `Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app`
+### Step 2: Generating `Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz` from `Xcode.app`
-To generate `Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers.tar.gz`, run
+To generate `Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz`, run
the script [`gen-sdk`](./gen-sdk) with the path to `Xcode.app` (extracted in the
previous stage) as the first argument.
```bash
-# Generate a Xcode-11.3.1-11C505-extracted-SDK-with-libcxx-headers.tar.gz from
+# Generate a Xcode-12.1-12A7403-extracted-SDK-with-libcxx-headers.tar.gz from
# the supplied Xcode.app
./contrib/macdeploy/gen-sdk '/path/to/Xcode.app'
```
@@ -56,7 +56,7 @@ Working macOS DMGs are created in Linux by combining a recent `clang`, the Apple
Apple uses `clang` extensively for development and has upstreamed the necessary
functionality so that a vanilla clang can take advantage. It supports the use of `-F`,
-`-target`, `-mmacosx-version-min`, and `--sysroot`, which are all necessary when
+`-target`, `-mmacosx-version-min`, and `-isysroot`, which are all necessary when
building for macOS.
Apple's version of `binutils` (called `cctools`) contains lots of functionality missing in the
diff --git a/contrib/qos/tc.sh b/contrib/qos/tc.sh
index 1cde19efd1..1cde19efd1 100644..100755
--- a/contrib/qos/tc.sh
+++ b/contrib/qos/tc.sh
diff --git a/contrib/seeds/generate-seeds.py b/contrib/seeds/generate-seeds.py
index d95069277d..9560b586ec 100755
--- a/contrib/seeds/generate-seeds.py
+++ b/contrib/seeds/generate-seeds.py
@@ -16,6 +16,7 @@ These files must consist of lines in the format
<ip>:<port>
[<ipv6>]:<port>
<onion>.onion:<port>
+ <i2p>.b32.i2p:<port>
The output will be two data structures with the peers in binary format:
@@ -52,6 +53,12 @@ def name_to_bip155(addr):
return (BIP155Network.TORV3, vchAddr[:32])
else:
raise ValueError('Invalid onion %s' % vchAddr)
+ elif addr.endswith('.b32.i2p'):
+ vchAddr = b32decode(addr[0:-8] + '====', True)
+ if len(vchAddr) == 32:
+ return (BIP155Network.I2P, vchAddr)
+ else:
+ raise ValueError(f'Invalid I2P {vchAddr}')
elif '.' in addr: # IPv4
return (BIP155Network.IPV4, bytes((int(x) for x in addr.split('.'))))
elif ':' in addr: # IPv6
diff --git a/contrib/seeds/nodes_main.txt b/contrib/seeds/nodes_main.txt
index c43caa73eb..7300e3043d 100644
--- a/contrib/seeds/nodes_main.txt
+++ b/contrib/seeds/nodes_main.txt
@@ -1188,3 +1188,13 @@ sys54sv4xv3hn3sdiv3oadmzqpgyhd4u4xphv4xqk64ckvaxzm57a7yd.onion:8333
tddeij4qigtjr6jfnrmq6btnirmq5msgwcsdpcdjr7atftm7cxlqztid.onion:8333
vi5bnbxkleeqi6hfccjochnn65lcxlfqs4uwgmhudph554zibiusqnad.onion:8333
xqt25cobm5zqucac3634zfght72he6u3eagfyej5ellbhcdgos7t2had.onion:8333
+
+# manually added 2021-05 for minimal i2p bootstrap support
+72l3ucjkuscrbiiepoehuwqgknyzgo7zuix5ty4puwrkyhtmnsga.b32.i2p:8333
+c4gfnttsuwqomiygupdqqqyy5y5emnk5c73hrfvatri67prd7vyq.b32.i2p:8333
+gehtac45oaghz54ypyopim64mql7oad2bqclla74l6tfeolzmodq.b32.i2p:8333
+h3r6bkn46qxftwja53pxiykntegfyfjqtnzbm6iv6r5mungmqgmq.b32.i2p:8333
+hnbbyjpxx54623l555sta7pocy3se4sdgmuebi5k6reesz5rjp6q.b32.i2p:8333
+pjs7or2ctvteeo5tu4bwyrtydeuhqhvdprtujn4daxr75jpebjxa.b32.i2p:8333
+wwbw7nqr3ahkqv62cuqfwgtneekvvpnuc4i4f6yo7tpoqjswvcwa.b32.i2p:8333
+zsxwyo6qcn3chqzwxnseusqgsnuw3maqnztkiypyfxtya4snkoka.b32.i2p:8333
diff --git a/depends/Makefile b/depends/Makefile
index c5dfd1e8a7..ac12e91e49 100644
--- a/depends/Makefile
+++ b/depends/Makefile
@@ -34,6 +34,8 @@ BASE_CACHE ?= $(BASEDIR)/built
SDK_PATH ?= $(BASEDIR)/SDKs
NO_QT ?=
NO_QR ?=
+NO_BDB ?=
+NO_SQLITE ?=
NO_WALLET ?=
NO_ZMQ ?=
NO_UPNP ?=
@@ -112,37 +114,31 @@ include builders/$(build_os).mk
include builders/default.mk
include packages/packages.mk
-full_env=$(shell printenv)
-
-build_id_string:=$(BUILD_ID_SALT)
-
-# GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want
-# the information from "-v -E -" as well, so just include both.
+# Previously, we directly invoked the well-known programs using $(shell ...)
+# to contruct build_id_string. However, that was problematic because:
#
-# '3>&1 1>&2 2>&3 > /dev/null' is supposed to swap stdin and stdout and silence
-# stdin, since we only want the stderr output
-build_id_string+=$(shell $(build_CC) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(build_CC) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null)
-build_id_string+=$(shell $(build_AR) --version 2>/dev/null) $(filter AR_%,$(full_env)) ZERO_AR_DATE=$(ZERO_AR_DATE)
-build_id_string+=$(shell $(build_CXX) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(build_CXX) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null)
-build_id_string+=$(shell $(build_RANLIB) --version 2>/dev/null) $(filter RANLIB_%,$(full_env))
-build_id_string+=$(shell $(build_STRIP) --version 2>/dev/null) $(filter STRIP_%,$(full_env))
-
-$(host_arch)_$(host_os)_id_string:=$(HOST_ID_SALT)
-$(host_arch)_$(host_os)_id_string+=$(shell $(host_CC) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(host_CC) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null)
-$(host_arch)_$(host_os)_id_string+=$(shell $(host_AR) --version 2>/dev/null) $(filter AR_%,$(full_env)) ZERO_AR_DATE=$(ZERO_AR_DATE)
-$(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(host_CXX) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null)
-$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) $(filter RANLIB_%,$(full_env))
-$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) $(filter STRIP_%,$(full_env))
-
-ifneq ($(strip $(FORCE_USE_SYSTEM_CLANG)),)
-# Make sure that cache is invalidated when switching between system and
-# depends-managed, pinned clang
-build_id_string+=system_clang
-$(host_arch)_$(host_os)_id_string+=system_clang
-endif
-
-build_id_string+=GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))
-$(host_arch)_$(host_os)_id_string+=GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))
+# 1. When invoking a shell, GNU Make special-cases exit code 127 (command not
+# found) by not capturing the output but instead passing it through. This is
+# not done for any other exit code.
+#
+# 2. Characters like '#' (from these programs' output) would end up in make
+# variables like build_id_string, which would be wrongly interpreted by make
+# when these variables were used.
+#
+# Therefore, we should avoid having arbitrary strings in make variables where
+# possible. The gen_id script used here hashes the output to construct a
+# "make-safe" id.
+#
+# Also note that these lines need to be:
+#
+# 1. After including {hosts,builders}/*.mk, since they rely on the tool
+# variables (e.g. build_CC, host_STRIP, etc.) to be set.
+#
+# 2. Before including packages/*.mk (excluding packages/packages.mk), since
+# they rely on the build_id variables
+#
+build_id:=$(shell env CC='$(build_CC)' CXX='$(build_CXX)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))')
+$(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' CXX='$(host_CXX)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))')
qrencode_packages_$(NO_QR) = $(qrencode_packages)
@@ -233,6 +229,8 @@ $(host_prefix)/share/config.site : config.site.in $(host_prefix)/.stamp_$(final_
-e 's|@no_qr@|$(NO_QR)|' \
-e 's|@no_zmq@|$(NO_ZMQ)|' \
-e 's|@no_wallet@|$(NO_WALLET)|' \
+ -e 's|@no_bdb@|$(NO_BDB)|' \
+ -e 's|@no_sqlite@|$(NO_SQLITE)|' \
-e 's|@no_upnp@|$(NO_UPNP)|' \
-e 's|@no_natpmp@|$(NO_NATPMP)|' \
-e 's|@multiprocess@|$(MULTIPROCESS)|' \
diff --git a/depends/builders/darwin.mk b/depends/builders/darwin.mk
index f4103fc1f2..001c928424 100644
--- a/depends/builders/darwin.mk
+++ b/depends/builders/darwin.mk
@@ -1,5 +1,5 @@
-build_darwin_CC:=$(shell xcrun -f clang) --sysroot $(shell xcrun --show-sdk-path)
-build_darwin_CXX:=$(shell xcrun -f clang++) --sysroot $(shell xcrun --show-sdk-path)
+build_darwin_CC:=$(shell xcrun -f clang) -isysroot$(shell xcrun --show-sdk-path)
+build_darwin_CXX:=$(shell xcrun -f clang++) -isysroot$(shell xcrun --show-sdk-path)
build_darwin_AR:=$(shell xcrun -f ar)
build_darwin_RANLIB:=$(shell xcrun -f ranlib)
build_darwin_STRIP:=$(shell xcrun -f strip)
@@ -10,8 +10,8 @@ build_darwin_SHA256SUM=shasum -a 256
build_darwin_DOWNLOAD=curl --location --fail --connect-timeout $(DOWNLOAD_CONNECT_TIMEOUT) --retry $(DOWNLOAD_RETRIES) -o
#darwin host on darwin builder. overrides darwin host preferences.
-darwin_CC=$(shell xcrun -f clang) -mmacosx-version-min=$(OSX_MIN_VERSION) --sysroot $(shell xcrun --show-sdk-path)
-darwin_CXX:=$(shell xcrun -f clang++) -mmacosx-version-min=$(OSX_MIN_VERSION) -stdlib=libc++ --sysroot $(shell xcrun --show-sdk-path)
+darwin_CC=$(shell xcrun -f clang) -mmacosx-version-min=$(OSX_MIN_VERSION) -isysroot$(shell xcrun --show-sdk-path)
+darwin_CXX:=$(shell xcrun -f clang++) -mmacosx-version-min=$(OSX_MIN_VERSION) -stdlib=libc++ -isysroot$(shell xcrun --show-sdk-path)
darwin_AR:=$(shell xcrun -f ar)
darwin_RANLIB:=$(shell xcrun -f ranlib)
darwin_STRIP:=$(shell xcrun -f strip)
diff --git a/depends/config.site.in b/depends/config.site.in
index f1a59a5861..bcc2850a53 100644
--- a/depends/config.site.in
+++ b/depends/config.site.in
@@ -38,6 +38,14 @@ if test -z "$enable_wallet" && test -n "@no_wallet@"; then
enable_wallet=no
fi
+if test -z "$with_bdb" && test -n "@no_bdb@"; then
+ with_bdb=no
+fi
+
+if test -z "$with_sqlite" && test -n "@no_sqlite@"; then
+ with_sqlite=no
+fi
+
if test -z "$enable_multiprocess" && test -n "@multiprocess@"; then
enable_multiprocess=yes
fi
diff --git a/depends/funcs.mk b/depends/funcs.mk
index 6e1a922f89..34a030fab7 100644
--- a/depends/funcs.mk
+++ b/depends/funcs.mk
@@ -49,7 +49,7 @@ define int_get_build_id
$(eval $(1)_dependencies += $($(1)_$(host_arch)_$(host_os)_dependencies) $($(1)_$(host_os)_dependencies))
$(eval $(1)_all_dependencies:=$(call int_get_all_dependencies,$(1),$($($(1)_type)_native_toolchain) $($($(1)_type)_native_binutils) $($(1)_dependencies)))
$(foreach dep,$($(1)_all_dependencies),$(eval $(1)_build_id_deps+=$(dep)-$($(dep)_version)-$($(dep)_recipe_hash)))
-$(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id_string))
+$(eval $(1)_build_id_long:=$(1)-$($(1)_version)-$($(1)_recipe_hash)-$(release_type) $($(1)_build_id_deps) $($($(1)_type)_id))
$(eval $(1)_build_id:=$(shell echo -n "$($(1)_build_id_long)" | $(build_SHA256SUM) | cut -c-$(HASH_LENGTH)))
final_build_id_long+=$($(package)_build_id_long)
diff --git a/depends/gen_id b/depends/gen_id
new file mode 100755
index 0000000000..ac69ca7ee1
--- /dev/null
+++ b/depends/gen_id
@@ -0,0 +1,74 @@
+#!/usr/bin/env bash
+
+# Usage: env [ CC=... ] [ CXX=... ] [ AR=... ] [ RANLIB=... ] [ STRIP=... ] \
+# [ DEBUG=... ] ./build-id [ID_SALT]...
+#
+# Prints to stdout a SHA256 hash representing the current toolset, used by
+# depends/Makefile as a build id for caching purposes (detecting when the
+# toolset has changed and the cache needs to be invalidated).
+#
+# If the DEBUG environment variable is non-empty and the system has `tee`
+# available in its $PATH, the pre-image to the SHA256 hash will be printed to
+# stderr. This is to help developers debug caching issues in depends.
+
+# This script explicitly does not `set -e` because id determination is mostly
+# opportunistic: it is fine that things fail, as long as they fail consistently.
+
+# Command variables (CC/CXX/AR) which can be blank are invoked with `bash -c`,
+# because the "command not found" error message printed by shells often include
+# the line number, like so:
+#
+# ./depends/gen_id: line 43: --version: command not found
+#
+# By invoking with `bash -c`, we ensure that the line number is always 1
+
+(
+ # Redirect stderr to stdout
+ exec 2>&1
+
+ echo "BEGIN ALL"
+
+ # Include any ID salts supplied via command line
+ echo "BEGIN ID SALT"
+ echo "$@"
+ echo "END ID SALT"
+
+ # GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want
+ # the information from "-v -E -" as well, so just include both.
+ echo "BEGIN CC"
+ bash -c "${CC} -v"
+ bash -c "${CC} -v -E -xc -o /dev/null - < /dev/null"
+ bash -c "${CC} -v -E -xobjective-c -o /dev/null - < /dev/null"
+ echo "END CC"
+
+ echo "BEGIN CXX"
+ bash -c "${CXX} -v"
+ bash -c "${CXX} -v -E -xc++ -o /dev/null - < /dev/null"
+ bash -c "${CXX} -v -E -xobjective-c++ -o /dev/null - < /dev/null"
+ echo "END CXX"
+
+ echo "BEGIN AR"
+ bash -c "${AR} --version"
+ env | grep '^AR_'
+ echo "ZERO_AR_DATE=${ZERO_AR_DATE}"
+ echo "END AR"
+
+ echo "BEGIN RANLIB"
+ bash -c "${RANLIB} --version"
+ env | grep '^RANLIB_'
+ echo "END RANLIB"
+
+ echo "BEGIN STRIP"
+ bash -c "${STRIP} --version"
+ env | grep '^STRIP_'
+ echo "END STRIP"
+
+ echo "END ALL"
+) | if [ -n "$DEBUG" ] && command -v tee > /dev/null 2>&1; then
+ # When debugging and `tee` is available, output the preimage to stderr
+ # in addition to passing through stdin to stdout
+ tee >(cat 1>&2)
+ else
+ # Otherwise, passthrough stdin to stdout
+ cat
+ fi | ${SHA256SUM} - | cut -d' ' -f1
diff --git a/depends/hosts/darwin.mk b/depends/hosts/darwin.mk
index dd71697f0f..5a7ae2df9a 100644
--- a/depends/hosts/darwin.mk
+++ b/depends/hosts/darwin.mk
@@ -1,8 +1,8 @@
OSX_MIN_VERSION=10.14
-OSX_SDK_VERSION=10.15.1
-XCODE_VERSION=11.3.1
-XCODE_BUILD_ID=11C505
-LD64_VERSION=530
+OSX_SDK_VERSION=10.15.6
+XCODE_VERSION=12.1
+XCODE_BUILD_ID=12A7403
+LD64_VERSION=609
OSX_SDK=$(SDK_PATH)/Xcode-$(XCODE_VERSION)-$(XCODE_BUILD_ID)-extracted-SDK-with-libcxx-headers
@@ -60,16 +60,11 @@ $(foreach TOOL,$(cctools_TOOLS),$(eval darwin_$(TOOL) = $$(build_prefix)/bin/$$(
# Explicitly point to our binaries (e.g. cctools) so that they are
# ensured to be found and preferred over other possibilities.
#
-# -stdlib=libc++ -nostdinc++ -Xclang -cxx-isystem$(OSX_SDK)/usr/include/c++/v1
+# -stdlib=libc++ -stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1
#
# Forces clang to use the libc++ headers from our SDK and completely
# forget about the libc++ headers from the standard directories
#
-# TODO: Once we start requiring a clang version that has the
-# -stdlib++-isystem<directory> flag first introduced here:
-# https://reviews.llvm.org/D64089, we should use that instead. Read the
-# differential summary there for more details.
-#
# -Xclang -*system<path_a> \
# -Xclang -*system<path_b> \
# -Xclang -*system<path_c> ...
@@ -100,7 +95,7 @@ darwin_CC=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \
-u LIBRARY_PATH \
$(clang_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \
-B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \
- --sysroot=$(OSX_SDK) \
+ -isysroot$(OSX_SDK) \
-Xclang -internal-externc-isystem$(clang_resource_dir)/include \
-Xclang -internal-externc-isystem$(OSX_SDK)/usr/include
darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \
@@ -108,9 +103,9 @@ darwin_CXX=env -u C_INCLUDE_PATH -u CPLUS_INCLUDE_PATH \
-u LIBRARY_PATH \
$(clangxx_prog) --target=$(host) -mmacosx-version-min=$(OSX_MIN_VERSION) \
-B$(build_prefix)/bin -mlinker-version=$(LD64_VERSION) \
- --sysroot=$(OSX_SDK) \
- -stdlib=libc++ -nostdinc++ \
- -Xclang -cxx-isystem$(OSX_SDK)/usr/include/c++/v1 \
+ -isysroot$(OSX_SDK) \
+ -stdlib=libc++ \
+ -stdlib++-isystem$(OSX_SDK)/usr/include/c++/v1 \
-Xclang -internal-externc-isystem$(clang_resource_dir)/include \
-Xclang -internal-externc-isystem$(OSX_SDK)/usr/include
diff --git a/depends/hosts/linux.mk b/depends/hosts/linux.mk
index 8ab448ce5f..07da752492 100644
--- a/depends/hosts/linux.mk
+++ b/depends/hosts/linux.mk
@@ -7,7 +7,7 @@ linux_release_CXXFLAGS=$(linux_release_CFLAGS)
linux_debug_CFLAGS=-O1
linux_debug_CXXFLAGS=$(linux_debug_CFLAGS)
-linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC
+linux_debug_CPPFLAGS=-D_GLIBCXX_DEBUG -D_GLIBCXX_DEBUG_PEDANTIC -D_LIBCPP_DEBUG=1
ifeq (86,$(findstring 86,$(build_arch)))
i686_linux_CC=gcc -m32
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index 29a3efdfe6..0800c63dfc 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -1,6 +1,6 @@
package=boost
$(package)_version=1_71_0
-$(package)_download_path=https://dl.bintray.com/boostorg/release/$(subst _,.,$($(package)_version))/source/
+$(package)_download_path=https://boostorg.jfrog.io/artifactory/main/release/$(subst _,.,$($(package)_version))/source/
$(package)_file_name=boost_$($(package)_version).tar.bz2
$(package)_sha256_hash=d73a8da01e8bf8c7eda40b4c84915071a8c8a0df4a6734537ddde4a8580524ee
$(package)_dependencies=native_b2
diff --git a/depends/packages/native_cctools.mk b/depends/packages/native_cctools.mk
index c789dab74c..885207fce9 100644
--- a/depends/packages/native_cctools.mk
+++ b/depends/packages/native_cctools.mk
@@ -1,10 +1,9 @@
package=native_cctools
-$(package)_version=55562e4073dea0fbfd0b20e0bf69ffe6390c7f97
+$(package)_version=2ef2e931cf641547eb8a68cfebde61003587c9fd
$(package)_download_path=https://github.com/tpoechtrager/cctools-port/archive
$(package)_file_name=$($(package)_version).tar.gz
-$(package)_sha256_hash=e51995a843533a3dac155dd0c71362dd471597a2d23f13dff194c6285362f875
+$(package)_sha256_hash=6b73269efdf5c58a070e7357b66ee760501388549d6a12b423723f45888b074b
$(package)_build_subdir=cctools
-$(package)_patches=ld64_disable_threading.patch
$(package)_dependencies=native_libtapi
define $(package)_set_vars
@@ -17,10 +16,6 @@ define $(package)_set_vars
$(package)_cxx=$(clangxx_prog)
endef
-define $(package)_preprocess_cmds
- patch -p1 < $($(package)_patch_dir)/ld64_disable_threading.patch
-endef
-
define $(package)_config_cmds
$($(package)_autoconf)
endef
diff --git a/depends/packages/native_clang.mk b/depends/packages/native_clang.mk
index 741ddacf09..36adeb196d 100644
--- a/depends/packages/native_clang.mk
+++ b/depends/packages/native_clang.mk
@@ -1,9 +1,9 @@
package=native_clang
-$(package)_version=8.0.0
-$(package)_download_path=https://releases.llvm.org/$($(package)_version)
-$(package)_download_file=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz
-$(package)_file_name=clang-llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-14.04.tar.xz
-$(package)_sha256_hash=9ef854b71949f825362a119bf2597f744836cb571131ae6b721cd102ffea8cd0
+$(package)_version=10.0.1
+$(package)_download_path=https://github.com/llvm/llvm-project/releases/download/llvmorg-$($(package)_version)
+$(package)_download_file=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-16.04.tar.xz
+$(package)_file_name=clang+llvm-$($(package)_version)-x86_64-linux-gnu-ubuntu-16.04.tar.xz
+$(package)_sha256_hash=48b83ef827ac2c213d5b64f5ad7ed082c8bcb712b46644e0dc5045c6f462c231
define $(package)_preprocess_cmds
rm -f $($(package)_extract_dir)/lib/libc++abi.so*
diff --git a/depends/packages/native_libtapi.mk b/depends/packages/native_libtapi.mk
index d7ac4156a7..60b898da5f 100644
--- a/depends/packages/native_libtapi.mk
+++ b/depends/packages/native_libtapi.mk
@@ -1,9 +1,9 @@
package=native_libtapi
-$(package)_version=3efb201881e7a76a21e0554906cf306432539cef
+$(package)_version=664b8414f89612f2dfd35a9b679c345aa5389026
$(package)_download_path=https://github.com/tpoechtrager/apple-libtapi/archive
$(package)_download_file=$($(package)_version).tar.gz
$(package)_file_name=$($(package)_version).tar.gz
-$(package)_sha256_hash=380c1ca37cfa04a8699d0887a8d3ee1ad27f3d08baba78887c73b09485c0fbd3
+$(package)_sha256_hash=62e419c12d1c9fad67cc1cd523132bc00db050998337c734c15bc8d73cc02b61
ifeq ($(strip $(FORCE_USE_SYSTEM_CLANG)),)
$(package)_dependencies=native_clang
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 0133eee920..8b5bfa6895 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -9,7 +9,7 @@ $(package)_qt_libs=corelib network widgets gui plugins testlib
$(package)_patches=fix_qt_pkgconfig.patch mac-qmake.conf fix_no_printer.patch no-xlib.patch
$(package)_patches+= fix_android_qmake_conf.patch fix_android_jni_static.patch dont_hardcode_pwd.patch
$(package)_patches+= drop_lrelease_dependency.patch no_sdk_version_check.patch
-$(package)_patches+= fix_qpainter_non_determinism.patch fix_lib_paths.patch fix_android_pch.patch
+$(package)_patches+= fix_lib_paths.patch fix_android_pch.patch
$(package)_patches+= fix_bigsur_drawing.patch qtbase-moc-ignore-gcc-macro.patch
$(package)_qttranslations_file_name=qttranslations-$($(package)_suffix)
@@ -227,7 +227,6 @@ define $(package)_preprocess_cmds
patch -p1 -i $($(package)_patch_dir)/fix_android_jni_static.patch && \
patch -p1 -i $($(package)_patch_dir)/fix_android_pch.patch && \
patch -p1 -i $($(package)_patch_dir)/no-xlib.patch && \
- patch -p1 -i $($(package)_patch_dir)/fix_qpainter_non_determinism.patch &&\
patch -p1 -i $($(package)_patch_dir)/no_sdk_version_check.patch && \
patch -p1 -i $($(package)_patch_dir)/fix_lib_paths.patch && \
patch -p1 -i $($(package)_patch_dir)/fix_bigsur_drawing.patch && \
diff --git a/depends/patches/native_cctools/ld64_disable_threading.patch b/depends/patches/native_cctools/ld64_disable_threading.patch
deleted file mode 100644
index 2de6874cd4..0000000000
--- a/depends/patches/native_cctools/ld64_disable_threading.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-commit 584668415039adeed073decee7e04de28248afd3
-Author: fanquake <fanquake@gmail.com>
-Date: Tue Aug 18 01:20:24 2020 +0000
-
- Disable threading to fix non-determinism
-
- A bug in the file parser can cause dependencies to be calculated
- differently based on which files have already been parsed. This is more
- likely to occur on systems with more CPUs.
-
- Just disable threading for now. There is no noticeable slowdown.
-
- See #9891.
-
-diff --git a/cctools/ld64/src/ld/InputFiles.h b/cctools/ld64/src/ld/InputFiles.h
-index ef9c756..90a70b6 100644
---- a/cctools/ld64/src/ld/InputFiles.h
-+++ b/cctools/ld64/src/ld/InputFiles.h
-@@ -25,7 +25,6 @@
- #ifndef __INPUT_FILES_H__
- #define __INPUT_FILES_H__
-
--#define HAVE_PTHREADS 1
-
- #include <stdlib.h>
- #include <sys/types.h>
diff --git a/depends/patches/qt/fix_qpainter_non_determinism.patch b/depends/patches/qt/fix_qpainter_non_determinism.patch
deleted file mode 100644
index 44c45187c5..0000000000
--- a/depends/patches/qt/fix_qpainter_non_determinism.patch
+++ /dev/null
@@ -1,63 +0,0 @@
-commit 2a8f7dc6ddfc414a66491522501c1574a1343ee1
-Author: Andrew Chow <achow101-github@achow101.com>
-Date: Sat Nov 21 01:11:04 2020 -0500
-
- build: Fix determinism issue when building with Clang 8
-
- When building Qt with LLVM/Clang 8 under -O3 (the default), we run into
- a determinism issue in `qt_interset_spans`. The issue has been fixed for
- LLVM/Clang 9, see
- https://github.com/llvm/llvm-project/commit/db101864bdc938deb1d63fe4f7da761bd38e5cae
- and https://reviews.llvm.org/D64601, however this fix was not backported
- to 8.x. Once LLVM/Clang 9 is used, this patch can be dropped.
-
- The particular issue appears to be an optimization done by -O3 which
- adds a temporary variable for `spans->y` in `qt_intersect_spans`. When
- it does this, sometimes it chooses to use a 32-bit movs instruction
- (movswl), and other times it chooses a 64-bit movs instruction (movswq).
- By patching `qt_intersect_spans` to always make a temporary variable for
- `spans->y`, we are able to sidestep this problem.
-
-diff --git a/qtbase/src/gui/painting/qpaintengine_raster.cpp b/qtbase/src/gui/painting/qpaintengine_raster.cpp
-index 92ab6e8375..f018009e0b 100644
---- a/qtbase/src/gui/painting/qpaintengine_raster.cpp
-+++ b/qtbase/src/gui/painting/qpaintengine_raster.cpp
-@@ -4128,22 +4128,23 @@ static const QSpan *qt_intersect_spans(const QClipData *clip, int *currentClip,
- const QSpan *clipEnd = clip->m_spans + clip->count;
-
- while (available && spans < end ) {
-+ const short spans_y = spans->y;
- if (clipSpans >= clipEnd) {
- spans = end;
- break;
- }
-- if (clipSpans->y > spans->y) {
-+ if (clipSpans->y > spans_y) {
- ++spans;
- continue;
- }
-- if (spans->y != clipSpans->y) {
-- if (spans->y < clip->count && clip->m_clipLines[spans->y].spans)
-- clipSpans = clip->m_clipLines[spans->y].spans;
-+ if (spans_y != clipSpans->y) {
-+ if (spans_y < clip->count && clip->m_clipLines[spans_y].spans)
-+ clipSpans = clip->m_clipLines[spans_y].spans;
- else
- ++clipSpans;
- continue;
- }
-- Q_ASSERT(spans->y == clipSpans->y);
-+ Q_ASSERT(spans_y == clipSpans->y);
-
- int sx1 = spans->x;
- int sx2 = sx1 + spans->len;
-@@ -4162,7 +4163,7 @@ static const QSpan *qt_intersect_spans(const QClipData *clip, int *currentClip,
- if (len) {
- out->x = qMax(sx1, cx1);
- out->len = qMin(sx2, cx2) - out->x;
-- out->y = spans->y;
-+ out->y = spans_y;
- out->coverage = qt_div_255(spans->coverage * clipSpans->coverage);
- ++out;
- --available;
-
diff --git a/doc/files.md b/doc/files.md
index a80adf30ea..695ea6f448 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -53,6 +53,7 @@ Subdirectory | File(s) | Description
`indexes/txindex/` | LevelDB database | Transaction index; *optional*, used if `-txindex=1`
`indexes/blockfilter/basic/db/` | LevelDB database | Blockfilter index LevelDB database for the basic filtertype; *optional*, used if `-blockfilterindex=basic`
`indexes/blockfilter/basic/` | `fltrNNNNN.dat`<sup>[\[2\]](#note2)</sup> | Blockfilter index filters for the basic filtertype; *optional*, used if `-blockfilterindex=basic`
+`indexes/coinstats/db/` | LevelDB database | Coinstats index; *optional*, used if `-coinstatsindex=1`
`wallets/` | | [Contains wallets](#multi-wallet-environment); can be specified by `-walletdir` option; if `wallets/` subdirectory does not exist, wallets reside in the [data directory](#data-directory-location)
`./` | `anchors.dat` | Anchor IP address database, created on shutdown and deleted at startup. Anchors are last known outgoing block-relay-only peers that are tried to re-connect to on startup
`./` | `banlist.dat` | Stores the IPs/subnets of banned nodes
diff --git a/doc/fuzzing.md b/doc/fuzzing.md
index 4d8825f4c2..5ffa3a9416 100644
--- a/doc/fuzzing.md
+++ b/doc/fuzzing.md
@@ -230,3 +230,15 @@ $ honggfuzz/honggfuzz --exit_upon_crash --quiet --timeout 4 -n 1 -Q \
-nodebuglogfile -bind=127.0.0.1:18444 -logthreadnames \
-debug
```
+
+# OSS-Fuzz
+
+Bitcoin Core participates in Google's [OSS-Fuzz](https://github.com/google/oss-fuzz/tree/master/projects/bitcoin-core)
+program, which includes a dashboard of [publicly disclosed vulnerabilities](https://bugs.chromium.org/p/oss-fuzz/issues/list).
+Generally, we try to disclose vulnerabilities as soon as possible after they
+are fixed to give users the knowledge they need to be protected. However,
+because Bitcoin is a live P2P network, and not just standalone local software,
+we might not fully disclose every issue within Google's standard
+[90-day disclosure window](https://google.github.io/oss-fuzz/getting-started/bug-disclosure-guidelines/)
+if a partial or delayed disclosure is important to protect users or the
+function of the network.
diff --git a/doc/reduce-memory.md b/doc/reduce-memory.md
index 6e7a578ecc..296b172bde 100644
--- a/doc/reduce-memory.md
+++ b/doc/reduce-memory.md
@@ -24,9 +24,13 @@ The size of some in-memory caches can be reduced. As caches trade off memory usa
## Number of peers
-- `-maxconnections=<n>` - the maximum number of connections, this defaults to 125. Each active connection takes up some
- memory. This option applies only if incoming connections are enabled, otherwise the number of connections will never
- be more than 10. Of the 10 outbound peers, there can be 8 full-relay connections and 2 block-relay-only ones.
+- `-maxconnections=<n>` - the maximum number of connections, which defaults to 125. Each active connection takes up some
+ memory. This option applies only if inbound connections are enabled; otherwise, the number of connections will not
+ be more than 11. Of the 11 outbound peers, there can be 8 full-relay connections, 2 block-relay-only ones,
+ and occasionally 1 short-lived feeler or extra outbound block-relay-only connection.
+
+- These limits do not apply to connections added manually with the `-addnode` configuration option or
+ the `addnode` RPC, which have a separate limit of 8 connections.
## Thread configuration
diff --git a/doc/release-notes-20867.md b/doc/release-notes-20867.md
new file mode 100644
index 0000000000..60eed6838f
--- /dev/null
+++ b/doc/release-notes-20867.md
@@ -0,0 +1,11 @@
+Wallet
+------
+
+- We now support up to 20 keys in `multi()` and `sortedmulti()` descriptors
+ under `wsh()`. (#20867)
+
+Updated RPCs
+------------
+
+- `addmultisigaddress` and `createmultisig` now support up to 20 keys for
+ Segwit addresses.
diff --git a/doc/release-notes/release-notes-0.21.1.md b/doc/release-notes/release-notes-0.21.1.md
new file mode 100644
index 0000000000..d032fa8429
--- /dev/null
+++ b/doc/release-notes/release-notes-0.21.1.md
@@ -0,0 +1,203 @@
+0.21.1 Release Notes
+====================
+
+Bitcoin Core version 0.21.1 is now available from:
+
+ <https://bitcoincore.org/bin/bitcoin-core-0.21.1/>
+
+This minor release includes various bug fixes and performance
+improvements, as well as updated translations.
+
+Please report bugs using the issue tracker at GitHub:
+
+ <https://github.com/bitcoin/bitcoin/issues>
+
+To receive security and update notifications, please subscribe to:
+
+ <https://bitcoincore.org/en/list/announcements/join/>
+
+How to Upgrade
+==============
+
+If you are running an older version, shut it down. Wait until it has completely
+shut down (which might take a few minutes in some cases), then run the
+installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac)
+or `bitcoind`/`bitcoin-qt` (on Linux).
+
+Upgrading directly from a version of Bitcoin Core that has reached its EOL is
+possible, but it might take some time if the data directory needs to be migrated. Old
+wallet versions of Bitcoin Core are generally supported.
+
+Compatibility
+==============
+
+Bitcoin Core is supported and extensively tested on operating systems
+using the Linux kernel, macOS 10.12+, and Windows 7 and newer. Bitcoin
+Core should also work on most other Unix-like systems but is not as
+frequently tested on them. It is not recommended to use Bitcoin Core on
+unsupported systems.
+
+From Bitcoin Core 0.20.0 onwards, macOS versions earlier than 10.12 are no
+longer supported. Additionally, Bitcoin Core does not yet change appearance
+when macOS "dark mode" is activated.
+
+Notable changes
+===============
+
+## Taproot Soft Fork
+
+Included in this release are the mainnet and testnet activation
+parameters for the taproot soft fork (BIP341) which also adds support
+for schnorr signatures (BIP340) and tapscript (BIP342).
+
+If activated, these improvements will allow users of single-signature
+scripts, multisignature scripts, and complex contracts to all use
+identical-appearing commitments that enhance their privacy and the
+fungibility of all bitcoins. Spenders will enjoy lower fees and the
+ability to resolve many multisig scripts and complex contracts with the
+same efficiency, low fees, and large anonymity set as single-sig users.
+Taproot and schnorr also include efficiency improvements for full nodes
+such as the ability to batch signature verification. Together, the
+improvements lay the groundwork for future potential
+upgrades that may improve efficiency, privacy, and fungibility further.
+
+Activation for taproot is being managed using a variation of BIP9
+versionbits called Speedy Trial (described in BIP341). Taproot's
+versionbit is bit 2, and nodes will begin tracking which blocks signal
+support for taproot at the beginning of the first retarget period after
+taproot’s start date of 24 April 2021. If 90% of blocks within a
+2,016-block retarget period (about two weeks) signal support for taproot
+prior to the first retarget period beginning after the time of 11 August
+2021, the soft fork will be locked in, and taproot will then be active
+as of block 709632 (expected in early or mid November).
+
+Should taproot not be locked in via Speedy Trial activation, it is
+expected that a follow-up activation mechanism will be deployed, with
+changes to address the reasons the Speedy Trial method failed.
+
+This release includes the ability to pay taproot addresses, although
+payments to such addresses are not secure until taproot activates.
+It also includes the ability to relay and mine taproot transactions
+after activation. Beyond those two basic capabilities, this release
+does not include any code that allows anyone to directly use taproot.
+The addition of taproot-related features to Bitcoin Core's wallet is
+expected in later releases once taproot activation is assured.
+
+All users, businesses, and miners are encouraged to upgrade to this
+release (or a subsequent compatible release) unless they object to
+activation of taproot. If taproot is locked in, then upgrading before
+block 709632 is highly recommended to help enforce taproot's new rules
+and to avoid the unlikely case of seeing falsely confirmed transactions.
+
+Miners who want to activate Taproot should preferably use this release
+to control their signaling. The `getblocktemplate` RPC results will
+automatically be updated to signal once the appropriate start has been
+reached and continue signaling until the timeout occurs or taproot
+activates. Alternatively, miners may manually start signaling on bit 2
+at any time; if taproot activates, they will need to ensure they update
+their nodes before block 709632 or non-upgraded nodes could cause them to mine on
+an invalid chain. See the [versionbits
+FAQ](https://bitcoincore.org/en/2016/06/08/version-bits-miners-faq/) for
+details.
+
+
+For more information about taproot, please see the following resources:
+
+- Technical specifications
+ - [BIP340 Schnorr signatures for secp256k1](https://github.com/bitcoin/bips/blob/master/bip-0340.mediawiki)
+ - [BIP341 Taproot: SegWit version 1 spending rules](https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki)
+ - [BIP342 Validation of Taproot scripts](https://github.com/bitcoin/bips/blob/master/bip-0342.mediawiki)
+
+- Popular articles;
+ - [Taproot Is Coming: What It Is, and How It Will Benefit Bitcoin](https://bitcoinmagazine.com/technical/taproot-coming-what-it-and-how-it-will-benefit-bitcoin)
+ - [What do Schnorr Signatures Mean for Bitcoin?](https://academy.binance.com/en/articles/what-do-schnorr-signatures-mean-for-bitcoin)
+ - [The Schnorr Signature & Taproot Softfork Proposal](https://blog.bitmex.com/the-schnorr-signature-taproot-softfork-proposal/)
+
+- Development history overview
+ - [Taproot](https://bitcoinops.org/en/topics/taproot/)
+ - [Schnorr signatures](https://bitcoinops.org/en/topics/schnorr-signatures/)
+ - [Tapscript](https://bitcoinops.org/en/topics/tapscript/)
+ - [Soft fork activation](https://bitcoinops.org/en/topics/soft-fork-activation/)
+
+- Other
+ - [Questions and answers related to taproot](https://bitcoin.stackexchange.com/questions/tagged/taproot)
+ - [Taproot review](https://github.com/ajtowns/taproot-review)
+
+Updated RPCs
+------------
+
+- Due to [BIP 350](https://github.com/bitcoin/bips/blob/master/bip-0350.mediawiki)
+ being implemented, behavior for all RPCs that accept addresses is changed when
+ a native witness version 1 (or higher) is passed. These now require a Bech32m
+ encoding instead of a Bech32 one, and Bech32m encoding will be used for such
+ addresses in RPC output as well. No version 1 addresses should be created
+ for mainnet until consensus rules are adopted that give them meaning
+ (e.g. through [BIP 341](https://github.com/bitcoin/bips/blob/master/bip-0341.mediawiki)).
+ Once that happens, Bech32m is expected to be used for them, so this shouldn't
+ affect any production systems, but may be observed on other networks where such
+ addresses already have meaning (like signet).
+
+0.21.1 change log
+=================
+
+### Consensus
+- #21377 Speedy trial support for versionbits (ajtowns)
+- #21686 Speedy trial activation parameters for Taproot (achow101)
+
+### P2P protocol and network code
+- #20852 allow CSubNet of non-IP networks (vasild)
+- #21043 Avoid UBSan warning in ProcessMessage(…) (practicalswift)
+
+### Wallet
+- #21166 Introduce DeferredSignatureChecker and have SignatureExtractorClass subclass it (achow101)
+- #21083 Avoid requesting fee rates multiple times during coin selection (achow101)
+
+### RPC and other APIs
+- #21201 Disallow sendtoaddress and sendmany when private keys disabled (achow101)
+
+### Build system
+- #21486 link against -lsocket if required for `*ifaddrs` (fanquake)
+- #20983 Fix MSVC build after gui#176 (hebasto)
+
+### Tests and QA
+- #21380 Add fuzzing harness for versionbits (ajtowns)
+- #20812 fuzz: Bump FuzzedDataProvider.h (MarcoFalke)
+- #20740 fuzz: Update FuzzedDataProvider.h from upstream (LLVM) (practicalswift)
+- #21446 Update vcpkg checkout commit (sipsorcery)
+- #21397 fuzz: Bump FuzzedDataProvider.h (MarcoFalke)
+- #21081 Fix the unreachable code at `feature_taproot` (brunoerg)
+- #20562 Test that a fully signed tx given to signrawtx is unchanged (achow101)
+- #21571 Make sure non-IP peers get discouraged and disconnected (vasild, MarcoFalke)
+- #21489 fuzz: cleanups for versionbits fuzzer (ajtowns)
+
+### Miscellaneous
+- #20861 BIP 350: Implement Bech32m and use it for v1+ segwit addresses (sipa)
+
+### Documentation
+- #21384 add signet to bitcoin.conf documentation (jonatack)
+- #21342 Remove outdated comment (hebasto)
+
+Credits
+=======
+
+Thanks to everyone who directly contributed to this release:
+
+- Aaron Clauson
+- Andrew Chow
+- Anthony Towns
+- Bruno Garcia
+- Fabian Jahr
+- fanquake
+- Hennadii Stepanov
+- Jon Atack
+- Luke Dashjr
+- MarcoFalke
+- Pieter Wuille
+- practicalswift
+- randymcmillan
+- Sjors Provoost
+- Vasil Dimov
+- W. J. van der Laan
+
+As well as to everyone that helped with translations on
+[Transifex](https://www.transifex.com/bitcoin/bitcoin/).
diff --git a/doc/tor.md b/doc/tor.md
index e38ada5bd6..2640a6109b 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -15,6 +15,10 @@ There are several ways to see your local onion address in Bitcoin Core:
You may set the `-debug=tor` config logging option to have additional
information in the debug log about your Tor configuration.
+CLI `-addrinfo` returns the number of addresses known to your node per network
+type, including Tor v2 and v3. This is useful to see how many onion addresses
+are known to your node for `-onlynet=onion` and how many Tor v3 addresses it
+knows when upgrading to current and future Tor releases that support Tor v3 only.
## 1. Run Bitcoin Core behind a Tor proxy
diff --git a/share/examples/bitcoin.conf b/share/examples/bitcoin.conf
index 5b7fc776a4..318b48bf2e 100644
--- a/share/examples/bitcoin.conf
+++ b/share/examples/bitcoin.conf
@@ -63,7 +63,12 @@
# Port on which to listen for connections (default: 8333, testnet: 18333, signet: 38333, regtest: 18444)
#port=
-# Maximum number of inbound+outbound connections.
+# Maximum number of inbound + outbound connections (default: 125). This option
+# applies only if inbound connections are enabled; otherwise, the number of connections
+# will not be more than 11: 8 full-relay connections, 2 block-relay-only ones, and
+# occasionally 1 short-lived feeler or extra outbound block-relay-only connection.
+# These limits do not apply to connections added manually with the -addnode
+# configuration option or the addnode RPC, which have a separate limit of 8 connections.
#maxconnections=
#
@@ -142,8 +147,11 @@
# both prior transactions and several dozen future transactions.
#keypool=100
+# Maintain coinstats index used by the gettxoutsetinfo RPC (default: 0).
+#coinstatsindex=1
+
# Enable pruning to reduce storage requirements by deleting old blocks.
-# This mode is incompatible with -txindex and -rescan.
+# This mode is incompatible with -txindex, -coinstatsindex and -rescan.
# 0 = default (no pruning).
# 1 = allows manual pruning via RPC.
# >=550 = target to stay under in MiB.
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 9360aa05f1..d55f5e1850 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -350,11 +350,6 @@ if EMBEDDED_UNIVALUE
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C univalue check
endif
-if ENABLE_FUZZ_LINK_ALL
-all-local: $(FUZZ_BINARY)
- bash ./test/fuzz/danger_link_all.sh
-endif
-
%.cpp.test: %.cpp
@echo Running tests: `cat $< | grep -E "(BOOST_FIXTURE_TEST_SUITE\\(|BOOST_AUTO_TEST_SUITE\\()" | cut -d '(' -f 2 | cut -d ',' -f 1 | cut -d ')' -f 1` from $<
$(AM_V_at)$(TEST_BINARY) --catch_system_errors=no -l test_suite -t "`cat $< | grep -E "(BOOST_FIXTURE_TEST_SUITE\\(|BOOST_AUTO_TEST_SUITE\\()" | cut -d '(' -f 2 | cut -d ',' -f 1 | cut -d ')' -f 1`" -- DEBUG_LOG_OUT > $<.log 2>&1 || (cat $<.log && false)
diff --git a/src/bench/block_assemble.cpp b/src/bench/block_assemble.cpp
index 67ab02a5b3..aa72981cb4 100644
--- a/src/bench/block_assemble.cpp
+++ b/src/bench/block_assemble.cpp
@@ -6,6 +6,7 @@
#include <consensus/validation.h>
#include <crypto/sha256.h>
#include <test/util/mining.h>
+#include <test/util/script.h>
#include <test/util/setup_common.h>
#include <test/util/wallet.h>
#include <txmempool.h>
@@ -18,23 +19,17 @@ static void AssembleBlock(benchmark::Bench& bench)
{
const auto test_setup = MakeNoLogFileContext<const TestingSetup>();
- const std::vector<unsigned char> op_true{OP_TRUE};
CScriptWitness witness;
- witness.stack.push_back(op_true);
-
- uint256 witness_program;
- CSHA256().Write(&op_true[0], op_true.size()).Finalize(witness_program.begin());
-
- const CScript SCRIPT_PUB{CScript(OP_0) << std::vector<unsigned char>{witness_program.begin(), witness_program.end()}};
+ witness.stack.push_back(WITNESS_STACK_ELEM_OP_TRUE);
// Collect some loose transactions that spend the coinbases of our mined blocks
constexpr size_t NUM_BLOCKS{200};
std::array<CTransactionRef, NUM_BLOCKS - COINBASE_MATURITY + 1> txs;
for (size_t b{0}; b < NUM_BLOCKS; ++b) {
CMutableTransaction tx;
- tx.vin.push_back(MineBlock(test_setup->m_node, SCRIPT_PUB));
+ tx.vin.push_back(MineBlock(test_setup->m_node, P2WSH_OP_TRUE));
tx.vin.back().scriptWitness = witness;
- tx.vout.emplace_back(1337, SCRIPT_PUB);
+ tx.vout.emplace_back(1337, P2WSH_OP_TRUE);
if (NUM_BLOCKS - b >= COINBASE_MATURITY)
txs.at(b) = MakeTransactionRef(tx);
}
@@ -48,7 +43,7 @@ static void AssembleBlock(benchmark::Bench& bench)
}
bench.run([&] {
- PrepareBlock(test_setup->m_node, SCRIPT_PUB);
+ PrepareBlock(test_setup->m_node, P2WSH_OP_TRUE);
});
}
diff --git a/src/chainparamsseeds.h b/src/chainparamsseeds.h
index cd7b806621..2e31daea83 100644
--- a/src/chainparamsseeds.h
+++ b/src/chainparamsseeds.h
@@ -1195,6 +1195,14 @@ static const uint8_t chainparams_seed_main[] = {
0x04,0x20,0x98,0xc6,0x44,0x27,0x90,0x41,0xa6,0x98,0xf9,0x25,0x6c,0x59,0x0f,0x06,0x6d,0x44,0x59,0x0e,0xb2,0x46,0xb0,0xa4,0x37,0x88,0x69,0x8f,0xc1,0x32,0xcd,0x9f,0x15,0xd7,0x20,0x8d,
0x04,0x20,0xaa,0x3a,0x16,0x86,0xea,0x59,0x09,0x04,0x78,0xe5,0x10,0x92,0xe1,0x1d,0xad,0xf7,0x56,0x2b,0xac,0xb0,0x97,0x29,0x63,0x30,0xf4,0x1b,0xcf,0xde,0xf3,0x28,0x0a,0x29,0x20,0x8d,
0x04,0x20,0xbc,0x27,0xae,0x89,0xc1,0x67,0x73,0x0a,0x08,0x02,0xdf,0xb7,0xcc,0x94,0xc7,0x9f,0xf4,0x72,0x7a,0x9b,0x20,0x0c,0x5c,0x11,0x3d,0x22,0xd6,0x13,0x88,0x66,0x74,0xbf,0x20,0x8d,
+ 0x05,0x20,0xfe,0x97,0xba,0x09,0x2a,0xa4,0x85,0x10,0xa1,0x04,0x7b,0x88,0x7a,0x5a,0x06,0x53,0x71,0x93,0x3b,0xf9,0xa2,0x2f,0xd9,0xe3,0x8f,0xa5,0xa2,0xac,0x1e,0x6c,0x6c,0x8c,0x20,0x8d,
+ 0x05,0x20,0x17,0x0c,0x56,0xce,0x72,0xa5,0xa0,0xe6,0x23,0x06,0xa3,0xc7,0x08,0x43,0x18,0xee,0x3a,0x46,0x35,0x5d,0x17,0xf6,0x78,0x96,0xa0,0x9c,0x51,0xef,0xbe,0x23,0xfd,0x71,0x20,0x8d,
+ 0x05,0x20,0x31,0x0f,0x30,0x0b,0x9d,0x70,0x0c,0x7c,0xf7,0x98,0x7e,0x1c,0xf4,0x33,0xdc,0x64,0x17,0xf7,0x00,0x7a,0x0c,0x04,0xb5,0x83,0xfc,0x5f,0xa6,0x52,0x39,0x79,0x63,0x87,0x20,0x8d,
+ 0x05,0x20,0x3e,0xe3,0xe0,0xa9,0xbc,0xf4,0x2e,0x59,0xd9,0x20,0xee,0xdf,0x74,0x61,0x4d,0x99,0x0c,0x5c,0x15,0x30,0x9b,0x72,0x16,0x79,0x15,0xf4,0x7a,0xca,0x34,0xcc,0x81,0x99,0x20,0x8d,
+ 0x05,0x20,0x3b,0x42,0x1c,0x25,0xf7,0xbf,0x79,0xed,0x6d,0x7d,0xef,0x65,0x30,0x7d,0xee,0x16,0x37,0x22,0x72,0x43,0x33,0x28,0x40,0xa3,0xaa,0xf4,0x48,0x49,0x67,0xb1,0x4b,0xfd,0x20,0x8d,
+ 0x05,0x20,0x7a,0x65,0xf7,0x47,0x42,0x9d,0x66,0x42,0x3b,0xb3,0xa7,0x03,0x6c,0x46,0x78,0x19,0x28,0x78,0x1e,0xa3,0x7c,0x67,0x44,0xb7,0x83,0x05,0xe3,0xfe,0xa5,0xe4,0x0a,0x6e,0x20,0x8d,
+ 0x05,0x20,0xb5,0x83,0x6f,0xb6,0x11,0xd8,0x0e,0xa8,0x57,0xda,0x15,0x20,0x5b,0x1a,0x6d,0x21,0x15,0x5a,0xbd,0xb4,0x17,0x11,0xc2,0xfb,0x0e,0xfc,0xde,0xe8,0x26,0x56,0xa8,0xac,0x20,0x8d,
+ 0x05,0x20,0xcc,0xaf,0x6c,0x3b,0xd0,0x13,0x76,0x23,0xc3,0x36,0xbb,0x64,0x4a,0x4a,0x06,0x93,0x69,0x6d,0xb0,0x10,0x6e,0x66,0xa4,0x61,0xf8,0x2d,0xe7,0x80,0x72,0x4d,0x53,0x94,0x20,0x8d,
};
static const uint8_t chainparams_seed_test[] = {
diff --git a/src/compressor.cpp b/src/compressor.cpp
index ef3135e7a5..a161c42866 100644
--- a/src/compressor.cpp
+++ b/src/compressor.cpp
@@ -124,7 +124,7 @@ bool DecompressScript(CScript& script, unsigned int nSize, const CompressedScrip
unsigned char vch[33] = {};
vch[0] = nSize - 2;
memcpy(&vch[1], in.data(), 32);
- CPubKey pubkey(&vch[0], &vch[33]);
+ CPubKey pubkey{vch};
if (!pubkey.Decompress())
return false;
assert(pubkey.size() == 65);
diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp
index d7694108f5..3a1086bf4c 100644
--- a/src/dbwrapper.cpp
+++ b/src/dbwrapper.cpp
@@ -220,10 +220,9 @@ const unsigned int CDBWrapper::OBFUSCATE_KEY_NUM_BYTES = 8;
*/
std::vector<unsigned char> CDBWrapper::CreateObfuscateKey() const
{
- unsigned char buff[OBFUSCATE_KEY_NUM_BYTES];
- GetRandBytes(buff, OBFUSCATE_KEY_NUM_BYTES);
- return std::vector<unsigned char>(&buff[0], &buff[OBFUSCATE_KEY_NUM_BYTES]);
-
+ std::vector<uint8_t> ret(OBFUSCATE_KEY_NUM_BYTES);
+ GetRandBytes(ret.data(), OBFUSCATE_KEY_NUM_BYTES);
+ return ret;
}
bool CDBWrapper::IsEmpty()
diff --git a/src/index/base.cpp b/src/index/base.cpp
index 9e637c9c6f..357c4fbaf9 100644
--- a/src/index/base.cpp
+++ b/src/index/base.cpp
@@ -13,7 +13,7 @@
#include <validation.h> // For g_chainman
#include <warnings.h>
-constexpr char DB_BEST_BLOCK = 'B';
+constexpr uint8_t DB_BEST_BLOCK{'B'};
constexpr int64_t SYNC_LOG_INTERVAL = 30; // seconds
constexpr int64_t SYNC_LOCATOR_WRITE_INTERVAL = 30; // seconds
diff --git a/src/index/blockfilterindex.cpp b/src/index/blockfilterindex.cpp
index 154d7a7027..b82b9915d5 100644
--- a/src/index/blockfilterindex.cpp
+++ b/src/index/blockfilterindex.cpp
@@ -24,9 +24,9 @@
* as big-endian so that sequential reads of filters by height are fast.
* Keys for the hash index have the type [DB_BLOCK_HASH, uint256].
*/
-constexpr char DB_BLOCK_HASH = 's';
-constexpr char DB_BLOCK_HEIGHT = 't';
-constexpr char DB_FILTER_POS = 'P';
+constexpr uint8_t DB_BLOCK_HASH{'s'};
+constexpr uint8_t DB_BLOCK_HEIGHT{'t'};
+constexpr uint8_t DB_FILTER_POS{'P'};
constexpr unsigned int MAX_FLTR_FILE_SIZE = 0x1000000; // 16 MiB
/** The pre-allocation chunk size for fltr?????.dat files */
@@ -63,7 +63,7 @@ struct DBHeightKey {
template<typename Stream>
void Unserialize(Stream& s)
{
- char prefix = ser_readdata8(s);
+ const uint8_t prefix{ser_readdata8(s)};
if (prefix != DB_BLOCK_HEIGHT) {
throw std::ios_base::failure("Invalid format for block filter index DB height key");
}
@@ -77,7 +77,7 @@ struct DBHashKey {
explicit DBHashKey(const uint256& hash_in) : hash(hash_in) {}
SERIALIZE_METHODS(DBHashKey, obj) {
- char prefix = DB_BLOCK_HASH;
+ uint8_t prefix{DB_BLOCK_HASH};
READWRITE(prefix);
if (prefix != DB_BLOCK_HASH) {
throw std::ios_base::failure("Invalid format for block filter index DB hash key");
@@ -149,7 +149,7 @@ bool BlockFilterIndex::ReadFilterFromDisk(const FlatFilePos& pos, BlockFilter& f
}
uint256 block_hash;
- std::vector<unsigned char> encoded_filter;
+ std::vector<uint8_t> encoded_filter;
try {
filein >> block_hash >> encoded_filter;
filter = BlockFilter(GetFilterType(), block_hash, std::move(encoded_filter));
diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp
index c7c1f4b533..7c8b2b186e 100644
--- a/src/index/coinstatsindex.cpp
+++ b/src/index/coinstatsindex.cpp
@@ -12,9 +12,9 @@
#include <undo.h>
#include <validation.h>
-static constexpr char DB_BLOCK_HASH = 's';
-static constexpr char DB_BLOCK_HEIGHT = 't';
-static constexpr char DB_MUHASH = 'M';
+static constexpr uint8_t DB_BLOCK_HASH{'s'};
+static constexpr uint8_t DB_BLOCK_HEIGHT{'t'};
+static constexpr uint8_t DB_MUHASH{'M'};
namespace {
@@ -66,7 +66,7 @@ struct DBHeightKey {
template <typename Stream>
void Unserialize(Stream& s)
{
- char prefix{static_cast<char>(ser_readdata8(s))};
+ const uint8_t prefix{ser_readdata8(s)};
if (prefix != DB_BLOCK_HEIGHT) {
throw std::ios_base::failure("Invalid format for coinstatsindex DB height key");
}
@@ -81,7 +81,7 @@ struct DBHashKey {
SERIALIZE_METHODS(DBHashKey, obj)
{
- char prefix{DB_BLOCK_HASH};
+ uint8_t prefix{DB_BLOCK_HASH};
READWRITE(prefix);
if (prefix != DB_BLOCK_HASH) {
throw std::ios_base::failure("Invalid format for coinstatsindex DB hash key");
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index f41985c344..3feefe8619 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -4,20 +4,20 @@
#include <index/disktxpos.h>
#include <index/txindex.h>
+#include <node/blockstorage.h>
#include <node/ui_interface.h>
#include <shutdown.h>
#include <util/system.h>
#include <util/translation.h>
#include <validation.h>
-constexpr char DB_BEST_BLOCK = 'B';
-constexpr char DB_TXINDEX = 't';
-constexpr char DB_TXINDEX_BLOCK = 'T';
+constexpr uint8_t DB_BEST_BLOCK{'B'};
+constexpr uint8_t DB_TXINDEX{'t'};
+constexpr uint8_t DB_TXINDEX_BLOCK{'T'};
std::unique_ptr<TxIndex> g_txindex;
-
/** Access to the txindex database (indexes/txindex/) */
class TxIndex::DB : public BaseIndex::DB
{
@@ -60,8 +60,8 @@ bool TxIndex::DB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_
*/
static void WriteTxIndexMigrationBatches(CDBWrapper& newdb, CDBWrapper& olddb,
CDBBatch& batch_newdb, CDBBatch& batch_olddb,
- const std::pair<unsigned char, uint256>& begin_key,
- const std::pair<unsigned char, uint256>& end_key)
+ const std::pair<uint8_t, uint256>& begin_key,
+ const std::pair<uint8_t, uint256>& end_key)
{
// Sync new DB changes to disk before deleting from old DB.
newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
@@ -113,9 +113,9 @@ bool TxIndex::DB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator&
CDBBatch batch_newdb(*this);
CDBBatch batch_olddb(block_tree_db);
- std::pair<unsigned char, uint256> key;
- std::pair<unsigned char, uint256> begin_key{DB_TXINDEX, uint256()};
- std::pair<unsigned char, uint256> prev_key = begin_key;
+ std::pair<uint8_t, uint256> key;
+ std::pair<uint8_t, uint256> begin_key{DB_TXINDEX, uint256()};
+ std::pair<uint8_t, uint256> prev_key = begin_key;
bool interrupted = false;
std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
diff --git a/src/init.cpp b/src/init.cpp
index b56c25c145..dbc4ea30c5 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -384,7 +384,7 @@ void SetupServerArgs(NodeContext& node)
#endif
argsman.AddArg("-blockreconstructionextratxn=<n>", strprintf("Extra transactions to keep in memory for compact block reconstructions (default: %u)", DEFAULT_BLOCK_RECONSTRUCTION_EXTRA_TXN), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-blocksonly", strprintf("Whether to reject transactions from network peers. Automatic broadcast and rebroadcast of any transactions from inbound peers is disabled, unless the peer has the 'forcerelay' permission. RPC transactions are not affected. (default: %u)", DEFAULT_BLOCKSONLY), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-coinstatsindex", strprintf("Maintain coinstats index used by the gettxoutset RPC (default: %u)", DEFAULT_COINSTATSINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
+ argsman.AddArg("-coinstatsindex", strprintf("Maintain coinstats index used by the gettxoutsetinfo RPC (default: %u)", DEFAULT_COINSTATSINDEX), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-conf=<file>", strprintf("Specify path to read-only configuration file. Relative paths will be prefixed by datadir location. (default: %s)", BITCOIN_CONF_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-dbbatchsize", strprintf("Maximum database write batch size in bytes (default: %u)", nDefaultDbBatchSize), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::OPTIONS);
@@ -420,7 +420,7 @@ void SetupServerArgs(NodeContext& node)
" If <type> is not supplied or if <type> = 1, indexes for all known types are enabled.",
ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
- argsman.AddArg("-addnode=<ip>", "Add a node to connect to and attempt to keep the connection open (see the `addnode` RPC command help for more info). This option can be specified multiple times to add multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-addnode=<ip>", strprintf("Add a node to connect to and attempt to keep the connection open (see the addnode RPC help for more info). This option can be specified multiple times to add multiple nodes; connections are limited to %u at a time and are counted separately from the -maxconnections limit.", MAX_ADDNODE_CONNECTIONS), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
argsman.AddArg("-asmap=<file>", strprintf("Specify asn mapping used for bucketing of the peers (default: %s). Relative paths will be prefixed by the net-specific datadir location.", DEFAULT_ASMAP_FILENAME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-bantime=<n>", strprintf("Default duration (in seconds) of manually configured bans (default: %u)", DEFAULT_MISBEHAVING_BANTIME), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-bind=<addr>[:<port>][=onion]", strprintf("Bind to given address and always listen on it (default: 0.0.0.0). Use [host]:port notation for IPv6. Append =onion to tag any incoming connections to that address and port as incoming Tor connections (default: 127.0.0.1:%u=onion, testnet: 127.0.0.1:%u=onion, signet: 127.0.0.1:%u=onion, regtest: 127.0.0.1:%u=onion)", defaultBaseParams->OnionServiceTargetPort(), testnetBaseParams->OnionServiceTargetPort(), signetBaseParams->OnionServiceTargetPort(), regtestBaseParams->OnionServiceTargetPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION);
@@ -433,7 +433,7 @@ void SetupServerArgs(NodeContext& node)
argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u). This limit does not apply to connections manually added via -addnode or the addnode RPC, which have a separate limit of %u.", DEFAULT_MAX_PEER_CONNECTIONS, MAX_ADDNODE_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
@@ -602,47 +602,6 @@ static void BlockNotifyGenesisWait(const CBlockIndex* pBlockIndex)
}
}
-// If we're using -prune with -reindex, then delete block files that will be ignored by the
-// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
-// is missing, do the same here to delete any later block files after a gap. Also delete all
-// rev files since they'll be rewritten by the reindex anyway. This ensures that vinfoBlockFile
-// is in sync with what's actually on disk by the time we start downloading, so that pruning
-// works correctly.
-static void CleanupBlockRevFiles()
-{
- std::map<std::string, fs::path> mapBlockFiles;
-
- // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
- // Remove the rev files immediately and insert the blk file paths into an
- // ordered map keyed by block file index.
- LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
- fs::path blocksdir = gArgs.GetBlocksDirPath();
- for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
- if (fs::is_regular_file(*it) &&
- it->path().filename().string().length() == 12 &&
- it->path().filename().string().substr(8,4) == ".dat")
- {
- if (it->path().filename().string().substr(0,3) == "blk")
- mapBlockFiles[it->path().filename().string().substr(3,5)] = it->path();
- else if (it->path().filename().string().substr(0,3) == "rev")
- remove(it->path());
- }
- }
-
- // Remove all block files that aren't part of a contiguous set starting at
- // zero by walking the ordered map (keys are block file indices) by
- // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
- // start removing block files.
- int nContigCounter = 0;
- for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
- if (atoi(item.first) == nContigCounter) {
- nContigCounter++;
- continue;
- }
- remove(item.second);
- }
-}
-
#if HAVE_SYSTEM
static void StartupNotify(const ArgsManager& args)
{
diff --git a/src/key.cpp b/src/key.cpp
index 1e59b301cb..5666adebb8 100644
--- a/src/key.cpp
+++ b/src/key.cpp
@@ -293,7 +293,7 @@ bool CKey::Derive(CKey& keyChild, ChainCode &ccChild, unsigned int nChild, const
bool CExtKey::Derive(CExtKey &out, unsigned int _nChild) const {
out.nDepth = nDepth + 1;
CKeyID id = key.GetPubKey().GetID();
- memcpy(&out.vchFingerprint[0], &id, 4);
+ memcpy(out.vchFingerprint, &id, 4);
out.nChild = _nChild;
return key.Derive(out.key, out.chaincode, _nChild, chaincode);
}
@@ -312,7 +312,7 @@ void CExtKey::SetSeed(const unsigned char *seed, unsigned int nSeedLen) {
CExtPubKey CExtKey::Neuter() const {
CExtPubKey ret;
ret.nDepth = nDepth;
- memcpy(&ret.vchFingerprint[0], &vchFingerprint[0], 4);
+ memcpy(ret.vchFingerprint, vchFingerprint, 4);
ret.nChild = nChild;
ret.pubkey = key.GetPubKey();
ret.chaincode = chaincode;
diff --git a/src/key.h b/src/key.h
index 206322956c..3ee49a778b 100644
--- a/src/key.h
+++ b/src/key.h
@@ -151,7 +151,7 @@ struct CExtKey {
friend bool operator==(const CExtKey& a, const CExtKey& b)
{
return a.nDepth == b.nDepth &&
- memcmp(&a.vchFingerprint[0], &b.vchFingerprint[0], sizeof(vchFingerprint)) == 0 &&
+ memcmp(a.vchFingerprint, b.vchFingerprint, sizeof(vchFingerprint)) == 0 &&
a.nChild == b.nChild &&
a.chaincode == b.chaincode &&
a.key == b.key;
diff --git a/src/net.cpp b/src/net.cpp
index fe1a0dfded..ce38c140ce 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -681,19 +681,19 @@ int V1TransportDeserializer::readHeader(Span<const uint8_t> msg_bytes)
hdrbuf >> hdr;
}
catch (const std::exception&) {
- LogPrint(BCLog::NET, "HEADER ERROR - UNABLE TO DESERIALIZE, peer=%d\n", m_node_id);
+ LogPrint(BCLog::NET, "Header error: Unable to deserialize, peer=%d\n", m_node_id);
return -1;
}
// Check start string, network magic
if (memcmp(hdr.pchMessageStart, m_chain_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) {
- LogPrint(BCLog::NET, "HEADER ERROR - MESSAGESTART (%s, %u bytes), received %s, peer=%d\n", hdr.GetCommand(), hdr.nMessageSize, HexStr(hdr.pchMessageStart), m_node_id);
+ LogPrint(BCLog::NET, "Header error: Wrong MessageStart %s received, peer=%d\n", HexStr(hdr.pchMessageStart), m_node_id);
return -1;
}
// reject messages larger than MAX_SIZE or MAX_PROTOCOL_MESSAGE_LENGTH
if (hdr.nMessageSize > MAX_SIZE || hdr.nMessageSize > MAX_PROTOCOL_MESSAGE_LENGTH) {
- LogPrint(BCLog::NET, "HEADER ERROR - SIZE (%s, %u bytes), peer=%d\n", hdr.GetCommand(), hdr.nMessageSize, m_node_id);
+ LogPrint(BCLog::NET, "Header error: Size too large (%s, %u bytes), peer=%d\n", SanitizeString(hdr.GetCommand()), hdr.nMessageSize, m_node_id);
return -1;
}
@@ -746,7 +746,7 @@ std::optional<CNetMessage> V1TransportDeserializer::GetMessage(const std::chrono
// Check checksum and header command string
if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) {
- LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s, peer=%d\n",
+ LogPrint(BCLog::NET, "Header error: Wrong checksum (%s, %u bytes), expected %s was %s, peer=%d\n",
SanitizeString(msg->m_command), msg->m_message_size,
HexStr(Span<uint8_t>(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE)),
HexStr(hdr.pchChecksum),
@@ -754,8 +754,8 @@ std::optional<CNetMessage> V1TransportDeserializer::GetMessage(const std::chrono
out_err_raw_size = msg->m_raw_message_size;
msg = std::nullopt;
} else if (!hdr.IsCommandValid()) {
- LogPrint(BCLog::NET, "HEADER ERROR - COMMAND (%s, %u bytes), peer=%d\n",
- hdr.GetCommand(), msg->m_message_size, m_node_id);
+ LogPrint(BCLog::NET, "Header error: Invalid message type (%s, %u bytes), peer=%d\n",
+ SanitizeString(hdr.GetCommand()), msg->m_message_size, m_node_id);
out_err_raw_size = msg->m_raw_message_size;
msg.reset();
}
@@ -1224,19 +1224,10 @@ void CConnman::DisconnectNodes()
std::list<CNode*> vNodesDisconnectedCopy = vNodesDisconnected;
for (CNode* pnode : vNodesDisconnectedCopy)
{
- // wait until threads are done using it
+ // Destroy the object only after other threads have stopped using it.
if (pnode->GetRefCount() <= 0) {
- bool fDelete = false;
- {
- TRY_LOCK(pnode->cs_vSend, lockSend);
- if (lockSend) {
- fDelete = true;
- }
- }
- if (fDelete) {
- vNodesDisconnected.remove(pnode);
- DeleteNode(pnode);
- }
+ vNodesDisconnected.remove(pnode);
+ DeleteNode(pnode);
}
}
}
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 2201caf7d2..27ad9eefb5 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -25,6 +25,7 @@
#include <reverse_iterator.h>
#include <scheduler.h>
#include <streams.h>
+#include <sync.h>
#include <tinyformat.h>
#include <txmempool.h>
#include <txorphanage.h>
@@ -256,6 +257,9 @@ public:
const std::chrono::microseconds time_received, const std::atomic<bool>& interruptMsgProc) override;
private:
+ void _RelayTransaction(const uint256& txid, const uint256& wtxid)
+ EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
void ConsiderEviction(CNode& pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -1015,7 +1019,7 @@ void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
if (tx != nullptr) {
LOCK(cs_main);
- RelayTransaction(txid, tx->GetWitnessHash());
+ _RelayTransaction(txid, tx->GetWitnessHash());
} else {
m_mempool.RemoveUnbroadcastTx(txid, true);
}
@@ -1512,6 +1516,11 @@ void PeerManagerImpl::SendPings()
void PeerManagerImpl::RelayTransaction(const uint256& txid, const uint256& wtxid)
{
+ WITH_LOCK(cs_main, _RelayTransaction(txid, wtxid););
+}
+
+void PeerManagerImpl::_RelayTransaction(const uint256& txid, const uint256& wtxid)
+{
m_connman.ForEachNode([&txid, &wtxid](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
@@ -2087,7 +2096,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set)
if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) {
LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString());
- RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
+ _RelayTransaction(orphanHash, porphanTx->GetWitnessHash());
m_orphanage.AddChildrenToWorkSet(*porphanTx, orphan_work_set);
m_orphanage.EraseTx(orphanHash);
for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) {
@@ -3055,7 +3064,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LogPrintf("Not relaying non-mempool transaction %s from forcerelay peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
} else {
LogPrintf("Force relaying tx %s from peer=%d\n", tx.GetHash().ToString(), pfrom.GetId());
- RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
+ _RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
}
}
return;
@@ -3070,7 +3079,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// requests for it.
m_txrequest.ForgetTxHash(tx.GetHash());
m_txrequest.ForgetTxHash(tx.GetWitnessHash());
- RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
+ _RelayTransaction(tx.GetHash(), tx.GetWitnessHash());
m_orphanage.AddChildrenToWorkSet(tx, peer->m_orphan_work_set);
pfrom.nLastTXTime = GetTime();
@@ -4449,8 +4458,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
}
}
peer->m_blocks_for_inv_relay.clear();
+ }
- if (pto->m_tx_relay != nullptr) {
+ if (pto->m_tx_relay != nullptr) {
LOCK(pto->m_tx_relay->cs_tx_inventory);
// Check whether periodic sends should happen
bool fSendTrickle = pto->HasPermission(PF_NOBAN);
@@ -4578,7 +4588,6 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
}
}
}
- }
}
if (!vInv.empty())
m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::INV, vInv));
diff --git a/src/net_processing.h b/src/net_processing.h
index 67252acbb6..d5801aadd3 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -7,7 +7,6 @@
#define BITCOIN_NET_PROCESSING_H
#include <net.h>
-#include <sync.h>
#include <validationinterface.h>
class CAddrMan;
@@ -15,8 +14,6 @@ class CChainParams;
class CTxMemPool;
class ChainstateManager;
-extern RecursiveMutex cs_main;
-
/** Default for -maxorphantx, maximum number of orphan transactions kept in memory */
static const unsigned int DEFAULT_MAX_ORPHAN_TRANSACTIONS = 100;
/** Default number of orphan+recently-replaced txn to keep around for block reconstruction */
@@ -49,8 +46,7 @@ public:
virtual bool IgnoresIncomingTxs() = 0;
/** Relay transaction to all peers. */
- virtual void RelayTransaction(const uint256& txid, const uint256& wtxid)
- EXCLUSIVE_LOCKS_REQUIRED(cs_main) = 0;
+ virtual void RelayTransaction(const uint256& txid, const uint256& wtxid) = 0;
/** Send ping message to all peers */
virtual void SendPings() = 0;
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index daed6605e8..6c66c565ad 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -6,17 +6,310 @@
#include <chain.h>
#include <chainparams.h>
+#include <clientversion.h>
+#include <consensus/validation.h>
#include <flatfile.h>
#include <fs.h>
+#include <hash.h>
#include <pow.h>
#include <shutdown.h>
#include <signet.h>
#include <streams.h>
+#include <undo.h>
#include <util/system.h>
#include <validation.h>
-// From validation. TODO move here
-bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false);
+std::atomic_bool fImporting(false);
+std::atomic_bool fReindex(false);
+bool fHavePruned = false;
+bool fPruneMode = false;
+uint64_t nPruneTarget = 0;
+
+// TODO make namespace {
+RecursiveMutex cs_LastBlockFile;
+std::vector<CBlockFileInfo> vinfoBlockFile;
+int nLastBlockFile = 0;
+/** Global flag to indicate we should check to see if there are
+* block/undo files that should be deleted. Set on startup
+* or if we allocate more file space when we're in prune mode
+*/
+bool fCheckForPruning = false;
+
+/** Dirty block index entries. */
+std::set<CBlockIndex*> setDirtyBlockIndex;
+
+/** Dirty block file entries. */
+std::set<int> setDirtyFileInfo;
+// } // namespace
+
+static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
+static FlatFileSeq BlockFileSeq();
+static FlatFileSeq UndoFileSeq();
+
+bool IsBlockPruned(const CBlockIndex* pblockindex)
+{
+ return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
+}
+
+// If we're using -prune with -reindex, then delete block files that will be ignored by the
+// reindex. Since reindexing works by starting at block file 0 and looping until a blockfile
+// is missing, do the same here to delete any later block files after a gap. Also delete all
+// rev files since they'll be rewritten by the reindex anyway. This ensures that vinfoBlockFile
+// is in sync with what's actually on disk by the time we start downloading, so that pruning
+// works correctly.
+void CleanupBlockRevFiles()
+{
+ std::map<std::string, fs::path> mapBlockFiles;
+
+ // Glob all blk?????.dat and rev?????.dat files from the blocks directory.
+ // Remove the rev files immediately and insert the blk file paths into an
+ // ordered map keyed by block file index.
+ LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
+ fs::path blocksdir = gArgs.GetBlocksDirPath();
+ for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
+ if (fs::is_regular_file(*it) &&
+ it->path().filename().string().length() == 12 &&
+ it->path().filename().string().substr(8,4) == ".dat")
+ {
+ if (it->path().filename().string().substr(0, 3) == "blk") {
+ mapBlockFiles[it->path().filename().string().substr(3, 5)] = it->path();
+ } else if (it->path().filename().string().substr(0, 3) == "rev") {
+ remove(it->path());
+ }
+ }
+ }
+
+ // Remove all block files that aren't part of a contiguous set starting at
+ // zero by walking the ordered map (keys are block file indices) by
+ // keeping a separate counter. Once we hit a gap (or if 0 doesn't exist)
+ // start removing block files.
+ int nContigCounter = 0;
+ for (const std::pair<const std::string, fs::path>& item : mapBlockFiles) {
+ if (atoi(item.first) == nContigCounter) {
+ nContigCounter++;
+ continue;
+ }
+ remove(item.second);
+ }
+}
+
+std::string CBlockFileInfo::ToString() const
+{
+ return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
+}
+
+CBlockFileInfo* GetBlockFileInfo(size_t n)
+{
+ LOCK(cs_LastBlockFile);
+
+ return &vinfoBlockFile.at(n);
+}
+
+static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
+{
+ // Open history file to append
+ CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
+ if (fileout.IsNull()) {
+ return error("%s: OpenUndoFile failed", __func__);
+ }
+
+ // Write index header
+ unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
+ fileout << messageStart << nSize;
+
+ // Write undo data
+ long fileOutPos = ftell(fileout.Get());
+ if (fileOutPos < 0) {
+ return error("%s: ftell failed", __func__);
+ }
+ pos.nPos = (unsigned int)fileOutPos;
+ fileout << blockundo;
+
+ // calculate & write checksum
+ CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
+ hasher << hashBlock;
+ hasher << blockundo;
+ fileout << hasher.GetHash();
+
+ return true;
+}
+
+bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
+{
+ FlatFilePos pos = pindex->GetUndoPos();
+ if (pos.IsNull()) {
+ return error("%s: no undo data available", __func__);
+ }
+
+ // Open history file to read
+ CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
+ if (filein.IsNull()) {
+ return error("%s: OpenUndoFile failed", __func__);
+ }
+
+ // Read block
+ uint256 hashChecksum;
+ CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
+ try {
+ verifier << pindex->pprev->GetBlockHash();
+ verifier >> blockundo;
+ filein >> hashChecksum;
+ } catch (const std::exception& e) {
+ return error("%s: Deserialize or I/O error - %s", __func__, e.what());
+ }
+
+ // Verify checksum
+ if (hashChecksum != verifier.GetHash()) {
+ return error("%s: Checksum mismatch", __func__);
+ }
+
+ return true;
+}
+
+static void FlushUndoFile(int block_file, bool finalize = false)
+{
+ FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
+ if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
+ AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
+ }
+}
+
+void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
+{
+ LOCK(cs_LastBlockFile);
+ FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
+ if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
+ AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
+ }
+ // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
+ // e.g. during IBD or a sync after a node going offline
+ if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
+}
+
+uint64_t CalculateCurrentUsage()
+{
+ LOCK(cs_LastBlockFile);
+
+ uint64_t retval = 0;
+ for (const CBlockFileInfo& file : vinfoBlockFile) {
+ retval += file.nSize + file.nUndoSize;
+ }
+ return retval;
+}
+
+void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
+{
+ for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
+ FlatFilePos pos(*it, 0);
+ fs::remove(BlockFileSeq().FileName(pos));
+ fs::remove(UndoFileSeq().FileName(pos));
+ LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
+ }
+}
+
+static FlatFileSeq BlockFileSeq()
+{
+ return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
+}
+
+static FlatFileSeq UndoFileSeq()
+{
+ return FlatFileSeq(gArgs.GetBlocksDirPath(), "rev", UNDOFILE_CHUNK_SIZE);
+}
+
+FILE* OpenBlockFile(const FlatFilePos& pos, bool fReadOnly)
+{
+ return BlockFileSeq().Open(pos, fReadOnly);
+}
+
+/** Open an undo file (rev?????.dat) */
+static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly)
+{
+ return UndoFileSeq().Open(pos, fReadOnly);
+}
+
+fs::path GetBlockPosFilename(const FlatFilePos& pos)
+{
+ return BlockFileSeq().FileName(pos);
+}
+
+bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false)
+{
+ LOCK(cs_LastBlockFile);
+
+ unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
+ if (vinfoBlockFile.size() <= nFile) {
+ vinfoBlockFile.resize(nFile + 1);
+ }
+
+ bool finalize_undo = false;
+ if (!fKnown) {
+ while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
+ // when the undo file is keeping up with the block file, we want to flush it explicitly
+ // when it is lagging behind (more blocks arrive than are being connected), we let the
+ // undo block write case handle it
+ assert(std::addressof(::ChainActive()) == std::addressof(active_chain));
+ finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight);
+ nFile++;
+ if (vinfoBlockFile.size() <= nFile) {
+ vinfoBlockFile.resize(nFile + 1);
+ }
+ }
+ pos.nFile = nFile;
+ pos.nPos = vinfoBlockFile[nFile].nSize;
+ }
+
+ if ((int)nFile != nLastBlockFile) {
+ if (!fKnown) {
+ LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
+ }
+ FlushBlockFile(!fKnown, finalize_undo);
+ nLastBlockFile = nFile;
+ }
+
+ vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
+ if (fKnown) {
+ vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
+ } else {
+ vinfoBlockFile[nFile].nSize += nAddSize;
+ }
+
+ if (!fKnown) {
+ bool out_of_space;
+ size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
+ if (out_of_space) {
+ return AbortNode("Disk space is too low!", _("Disk space is too low!"));
+ }
+ if (bytes_allocated != 0 && fPruneMode) {
+ fCheckForPruning = true;
+ }
+ }
+
+ setDirtyFileInfo.insert(nFile);
+ return true;
+}
+
+static bool FindUndoPos(BlockValidationState& state, int nFile, FlatFilePos& pos, unsigned int nAddSize)
+{
+ pos.nFile = nFile;
+
+ LOCK(cs_LastBlockFile);
+
+ pos.nPos = vinfoBlockFile[nFile].nUndoSize;
+ vinfoBlockFile[nFile].nUndoSize += nAddSize;
+ setDirtyFileInfo.insert(nFile);
+
+ bool out_of_space;
+ size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
+ if (out_of_space) {
+ return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
+ }
+ if (bytes_allocated != 0 && fPruneMode) {
+ fCheckForPruning = true;
+ }
+
+ return true;
+}
static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessageHeader::MessageStartChars& messageStart)
{
@@ -41,6 +334,35 @@ static bool WriteBlockToDisk(const CBlock& block, FlatFilePos& pos, const CMessa
return true;
}
+bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
+{
+ // Write undo information to disk
+ if (pindex->GetUndoPos().IsNull()) {
+ FlatFilePos _pos;
+ if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40)) {
+ return error("ConnectBlock(): FindUndoPos failed");
+ }
+ if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart())) {
+ return AbortNode(state, "Failed to write undo data");
+ }
+ // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
+ // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
+ // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
+ // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
+ // the FindBlockPos function
+ if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
+ FlushUndoFile(_pos.nFile, true);
+ }
+
+ // update nUndoPos in block index
+ pindex->nUndoPos = _pos.nPos;
+ pindex->nStatus |= BLOCK_HAVE_UNDO;
+ setDirtyBlockIndex.insert(pindex);
+ }
+
+ return true;
+}
+
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams)
{
block.SetNull();
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 3b546f0719..faf99dea81 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -12,7 +12,9 @@
#include <protocol.h> // For CMessageHeader::MessageStartChars
class ArgsManager;
+class BlockValidationState;
class CBlock;
+class CBlockFileInfo;
class CBlockIndex;
class CBlockUndo;
class CChain;
@@ -25,6 +27,44 @@ struct Params;
static constexpr bool DEFAULT_STOPAFTERBLOCKIMPORT{false};
+/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
+static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
+/** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
+static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
+/** The maximum size of a blk?????.dat file (since 0.8) */
+static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
+
+extern std::atomic_bool fImporting;
+extern std::atomic_bool fReindex;
+/** Pruning-related variables and constants */
+/** True if any block files have ever been pruned. */
+extern bool fHavePruned;
+/** True if we're running in -prune mode. */
+extern bool fPruneMode;
+/** Number of MiB of block files that we're trying to stay below. */
+extern uint64_t nPruneTarget;
+
+//! Check whether the block associated with this index entry is pruned or not.
+bool IsBlockPruned(const CBlockIndex* pblockindex);
+
+void CleanupBlockRevFiles();
+
+/** Open a block file (blk?????.dat) */
+FILE* OpenBlockFile(const FlatFilePos& pos, bool fReadOnly = false);
+/** Translation to a filesystem path */
+fs::path GetBlockPosFilename(const FlatFilePos& pos);
+
+/** Get block file info entry for one block file */
+CBlockFileInfo* GetBlockFileInfo(size_t n);
+
+/** Calculate the amount of disk space the block & undo files currently use */
+uint64_t CalculateCurrentUsage();
+
+/**
+ * Actually unlink the specified files
+ */
+void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune);
+
/** Functions for disk access for blocks */
bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::Params& consensusParams);
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams);
@@ -32,6 +72,7 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const CBlockIndex* pindex, const CMessageHeader::MessageStartChars& message_start);
bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex);
+bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams);
FlatFilePos SaveBlockToDisk(const CBlock& block, int nHeight, CChain& active_chain, const CChainParams& chainparams, const FlatFilePos* dbp);
diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp
index 691b2791d7..a1e7a71e2c 100644
--- a/src/node/transaction.cpp
+++ b/src/node/transaction.cpp
@@ -100,8 +100,6 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t
// the mempool tracks locally submitted transactions to make a
// best-effort of initial broadcast
node.mempool->AddUnbroadcastTx(hashTx);
-
- LOCK(cs_main);
node.peerman->RelayTransaction(hashTx, tx->GetWitnessHash());
}
diff --git a/src/node/utxo_snapshot.h b/src/node/utxo_snapshot.h
index fe78cb46bd..61292cdcc5 100644
--- a/src/node/utxo_snapshot.h
+++ b/src/node/utxo_snapshot.h
@@ -22,20 +22,15 @@ public:
//! during snapshot load to estimate progress of UTXO set reconstruction.
uint64_t m_coins_count = 0;
- //! Necessary to "fake" the base nChainTx so that we can estimate progress during
- //! initial block download for the assumeutxo chainstate.
- unsigned int m_nchaintx = 0;
-
SnapshotMetadata() { }
SnapshotMetadata(
const uint256& base_blockhash,
uint64_t coins_count,
unsigned int nchaintx) :
m_base_blockhash(base_blockhash),
- m_coins_count(coins_count),
- m_nchaintx(nchaintx) { }
+ m_coins_count(coins_count) { }
- SERIALIZE_METHODS(SnapshotMetadata, obj) { READWRITE(obj.m_base_blockhash, obj.m_coins_count, obj.m_nchaintx); }
+ SERIALIZE_METHODS(SnapshotMetadata, obj) { READWRITE(obj.m_base_blockhash, obj.m_coins_count); }
};
#endif // BITCOIN_NODE_UTXO_SNAPSHOT_H
diff --git a/src/pubkey.cpp b/src/pubkey.cpp
index fc1624af25..a89988cc90 100644
--- a/src/pubkey.cpp
+++ b/src/pubkey.cpp
@@ -293,7 +293,7 @@ void CExtPubKey::Decode(const unsigned char code[BIP32_EXTKEY_SIZE]) {
bool CExtPubKey::Derive(CExtPubKey &out, unsigned int _nChild) const {
out.nDepth = nDepth + 1;
CKeyID id = pubkey.GetID();
- memcpy(&out.vchFingerprint[0], &id, 4);
+ memcpy(out.vchFingerprint, &id, 4);
out.nChild = _nChild;
return pubkey.Derive(out.pubkey, out.chaincode, _nChild, chaincode);
}
diff --git a/src/pubkey.h b/src/pubkey.h
index 12514fc3c9..b653c202fc 100644
--- a/src/pubkey.h
+++ b/src/pubkey.h
@@ -101,7 +101,7 @@ public:
}
//! Construct a public key from a byte vector.
- explicit CPubKey(const std::vector<unsigned char>& _vch)
+ explicit CPubKey(Span<const uint8_t> _vch)
{
Set(_vch.begin(), _vch.end());
}
@@ -247,7 +247,7 @@ struct CExtPubKey {
friend bool operator==(const CExtPubKey &a, const CExtPubKey &b)
{
return a.nDepth == b.nDepth &&
- memcmp(&a.vchFingerprint[0], &b.vchFingerprint[0], sizeof(vchFingerprint)) == 0 &&
+ memcmp(a.vchFingerprint, b.vchFingerprint, sizeof(vchFingerprint)) == 0 &&
a.nChild == b.nChild &&
a.chaincode == b.chaincode &&
a.pubkey == b.pubkey;
diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui
index 13687a6510..61ecea1cac 100644
--- a/src/qt/forms/debugwindow.ui
+++ b/src/qt/forms/debugwindow.ui
@@ -923,6 +923,9 @@
<property name="tabKeyNavigation">
<bool>false</bool>
</property>
+ <property name="alternatingRowColors">
+ <bool>true</bool>
+ </property>
<property name="textElideMode">
<enum>Qt::ElideMiddle</enum>
</property>
@@ -984,6 +987,9 @@
<property name="tabKeyNavigation">
<bool>false</bool>
</property>
+ <property name="alternatingRowColors">
+ <bool>true</bool>
+ </property>
<property name="sortingEnabled">
<bool>true</bool>
</property>
diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp
index 9e6e98b274..df3b85ea71 100644
--- a/src/qt/test/rpcnestedtests.cpp
+++ b/src/qt/test/rpcnestedtests.cpp
@@ -35,14 +35,16 @@ static RPCHelpMan rpcNestedTest_rpc()
}
static const CRPCCommand vRPCCommands[] = {
- {"test", &rpcNestedTest_rpc},
+ {"rpcNestedTest", &rpcNestedTest_rpc},
};
void RPCNestedTests::rpcNestedTests()
{
// do some test setup
// could be moved to a more generic place when we add more tests on QT level
- tableRPC.appendCommand("rpcNestedTest", &vRPCCommands[0]);
+ for (const auto& c : vRPCCommands) {
+ tableRPC.appendCommand(c.name, &c);
+ }
TestingSetup test;
m_node.setContext(&test.m_node);
diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp
index c6dd0c2ad6..cc6db8d33e 100644
--- a/src/qt/walletmodel.cpp
+++ b/src/qt/walletmodel.cpp
@@ -248,7 +248,7 @@ WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &tran
CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION);
ssTx << *newTx;
- transaction_array.append((const char*)&(ssTx[0]), ssTx.size());
+ transaction_array.append((const char*)ssTx.data(), ssTx.size());
}
// Add addresses / update labels that we've sent to the address book,
diff --git a/src/random.cpp b/src/random.cpp
index 9900825abb..174f4cef31 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -628,7 +628,7 @@ std::vector<unsigned char> FastRandomContext::randbytes(size_t len)
if (requires_seed) RandomSeed();
std::vector<unsigned char> ret(len);
if (len > 0) {
- rng.Keystream(&ret[0], len);
+ rng.Keystream(ret.data(), len);
}
return ret;
}
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 59f7657c0e..81cb802357 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -1099,7 +1099,7 @@ static RPCHelpMan gettxoutsetinfo()
"Note this call may take some time if you are not using coinstatsindex.\n",
{
{"hash_type", RPCArg::Type::STR, RPCArg::Default{"hash_serialized_2"}, "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'muhash', 'none'."},
- {"hash_or_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "The block hash or height of the target height (only available with coinstatsindex)", "", {"", "string or numeric"}},
+ {"hash_or_height", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "The block hash or height of the target height (only available with coinstatsindex).", "", {"", "string or numeric"}},
{"use_index", RPCArg::Type::BOOL, RPCArg::Default{true}, "Use coinstatsindex, if available."},
},
RPCResult{
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 2bfcaeafc9..382c6f11ff 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -277,7 +277,9 @@ static RPCHelpMan addnode()
"\nAttempts to add or remove a node from the addnode list.\n"
"Or try a connection to a node once.\n"
"Nodes added using addnode (or -connect) are protected from DoS disconnection and are not required to be\n"
- "full nodes/support SegWit as other outbound peers are (though such peers will not be synced from).\n",
+ "full nodes/support SegWit as other outbound peers are (though such peers will not be synced from).\n" +
+ strprintf("Addnode connections are limited to %u at a time", MAX_ADDNODE_CONNECTIONS) +
+ " and are counted separately from the -maxconnections limit.\n",
{
{"node", RPCArg::Type::STR, RPCArg::Optional::NO, "The node (see getpeerinfo for nodes)"},
{"command", RPCArg::Type::STR, RPCArg::Optional::NO, "'add' to add a node to the list, 'remove' to remove a node from the list, 'onetry' to try a connection to the node once"},
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index df3ee9f007..069669bb3b 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -231,16 +231,12 @@ CTxDestination AddAndGetMultisigDestination(const int required, const std::vecto
if ((int)pubkeys.size() < required) {
throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("not enough keys supplied (got %u keys, but need at least %d to redeem)", pubkeys.size(), required));
}
- if (pubkeys.size() > 16) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, "Number of keys involved in the multisignature address creation > 16\nReduce the number");
+ if (pubkeys.size() > MAX_PUBKEYS_PER_MULTISIG) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Number of keys involved in the multisignature address creation > %d\nReduce the number", MAX_PUBKEYS_PER_MULTISIG));
}
script_out = GetScriptForMultisig(required, pubkeys);
- if (script_out.size() > MAX_SCRIPT_ELEMENT_SIZE) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, (strprintf("redeemScript exceeds size limit: %d > %d", script_out.size(), MAX_SCRIPT_ELEMENT_SIZE)));
- }
-
// Check if any keys are uncompressed. If so, the type is legacy
for (const CPubKey& pk : pubkeys) {
if (!pk.IsCompressed()) {
@@ -249,6 +245,10 @@ CTxDestination AddAndGetMultisigDestination(const int required, const std::vecto
}
}
+ if (type == OutputType::LEGACY && script_out.size() > MAX_SCRIPT_ELEMENT_SIZE) {
+ throw JSONRPCError(RPC_INVALID_PARAMETER, (strprintf("redeemScript exceeds size limit: %d > %d", script_out.size(), MAX_SCRIPT_ELEMENT_SIZE)));
+ }
+
// Make the address
CTxDestination dest = AddAndGetDestinationForScript(keystore, script_out, type);
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index f1433553bc..b54ba204f0 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -998,8 +998,8 @@ std::unique_ptr<DescriptorImpl> ParseScript(uint32_t& key_exp_index, Span<const
providers.emplace_back(std::move(pk));
key_exp_index++;
}
- if (providers.empty() || providers.size() > 16) {
- error = strprintf("Cannot have %u keys in multisig; must have between 1 and 16 keys, inclusive", providers.size());
+ if (providers.empty() || providers.size() > MAX_PUBKEYS_PER_MULTISIG) {
+ error = strprintf("Cannot have %u keys in multisig; must have between 1 and %d keys, inclusive", providers.size(), MAX_PUBKEYS_PER_MULTISIG);
return nullptr;
} else if (thres < 1) {
error = strprintf("Multisig threshold cannot be %d, must be at least 1", thres);
@@ -1015,6 +1015,7 @@ std::unique_ptr<DescriptorImpl> ParseScript(uint32_t& key_exp_index, Span<const
}
}
if (ctx == ParseScriptContext::P2SH) {
+ // This limits the maximum number of compressed pubkeys to 15.
if (script_size + 3 > MAX_SCRIPT_ELEMENT_SIZE) {
error = strprintf("P2SH script is too large, %d bytes is larger than %d bytes", script_size + 3, MAX_SCRIPT_ELEMENT_SIZE);
return nullptr;
@@ -1097,7 +1098,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo
TxoutType txntype = Solver(script, data);
if (txntype == TxoutType::PUBKEY) {
- CPubKey pubkey(data[0].begin(), data[0].end());
+ CPubKey pubkey(data[0]);
if (pubkey.IsValid()) {
return std::make_unique<PKDescriptor>(InferPubkey(pubkey, ctx, provider));
}
@@ -1121,7 +1122,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo
if (txntype == TxoutType::MULTISIG) {
std::vector<std::unique_ptr<PubkeyProvider>> providers;
for (size_t i = 1; i + 1 < data.size(); ++i) {
- CPubKey pubkey(data[i].begin(), data[i].end());
+ CPubKey pubkey(data[i]);
providers.push_back(InferPubkey(pubkey, ctx, provider));
}
return std::make_unique<MultisigDescriptor>((int)data[0][0], std::move(providers));
diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp
index abc0625bb1..dc0f165be0 100644
--- a/src/script/interpreter.cpp
+++ b/src/script/interpreter.cpp
@@ -225,7 +225,7 @@ bool static CheckPubKeyEncoding(const valtype &vchPubKey, unsigned int flags, co
return true;
}
-bool static CheckMinimalPush(const valtype& data, opcodetype opcode) {
+bool CheckMinimalPush(const valtype& data, opcodetype opcode) {
// Excludes OP_1NEGATE, OP_1-16 since they are by definition minimal
assert(0 <= opcode && opcode <= OP_PUSHDATA4);
if (data.size() == 0) {
@@ -1890,7 +1890,7 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion,
const valtype& script_bytes = SpanPopBack(stack);
exec_script = CScript(script_bytes.begin(), script_bytes.end());
uint256 hash_exec_script;
- CSHA256().Write(&exec_script[0], exec_script.size()).Finalize(hash_exec_script.begin());
+ CSHA256().Write(exec_script.data(), exec_script.size()).Finalize(hash_exec_script.begin());
if (memcmp(hash_exec_script.begin(), program.data(), 32)) {
return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH);
}
diff --git a/src/script/interpreter.h b/src/script/interpreter.h
index c76b3acb22..212de17c7b 100644
--- a/src/script/interpreter.h
+++ b/src/script/interpreter.h
@@ -316,6 +316,8 @@ bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const C
size_t CountWitnessSigOps(const CScript& scriptSig, const CScript& scriptPubKey, const CScriptWitness* witness, unsigned int flags);
+bool CheckMinimalPush(const std::vector<unsigned char>& data, opcodetype opcode);
+
int FindAndDelete(CScript& script, const CScript& b);
#endif // BITCOIN_SCRIPT_INTERPRETER_H
diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp
index c6d898a25a..65867c1c14 100644
--- a/src/script/sigcache.cpp
+++ b/src/script/sigcache.cpp
@@ -53,14 +53,14 @@ public:
ComputeEntryECDSA(uint256& entry, const uint256 &hash, const std::vector<unsigned char>& vchSig, const CPubKey& pubkey) const
{
CSHA256 hasher = m_salted_hasher_ecdsa;
- hasher.Write(hash.begin(), 32).Write(&pubkey[0], pubkey.size()).Write(&vchSig[0], vchSig.size()).Finalize(entry.begin());
+ hasher.Write(hash.begin(), 32).Write(pubkey.data(), pubkey.size()).Write(vchSig.data(), vchSig.size()).Finalize(entry.begin());
}
void
ComputeEntrySchnorr(uint256& entry, const uint256 &hash, Span<const unsigned char> sig, const XOnlyPubKey& pubkey) const
{
CSHA256 hasher = m_salted_hasher_schnorr;
- hasher.Write(hash.begin(), 32).Write(&pubkey[0], pubkey.size()).Write(sig.data(), sig.size()).Finalize(entry.begin());
+ hasher.Write(hash.begin(), 32).Write(pubkey.data(), pubkey.size()).Write(sig.data(), sig.size()).Finalize(entry.begin());
}
bool
diff --git a/src/script/sign.cpp b/src/script/sign.cpp
index 4d9026427e..da0092f9e3 100644
--- a/src/script/sign.cpp
+++ b/src/script/sign.cpp
@@ -167,7 +167,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator
return true;
case TxoutType::WITNESS_V0_SCRIPTHASH:
- CRIPEMD160().Write(&vSolutions[0][0], vSolutions[0].size()).Finalize(h160.begin());
+ CRIPEMD160().Write(vSolutions[0].data(), vSolutions[0].size()).Finalize(h160.begin());
if (GetCScript(provider, sigdata, CScriptID{h160}, scriptRet)) {
ret.push_back(std::vector<unsigned char>(scriptRet.begin(), scriptRet.end()));
return true;
diff --git a/src/script/standard.cpp b/src/script/standard.cpp
index 700155c8d4..364fac3c84 100644
--- a/src/script/standard.cpp
+++ b/src/script/standard.cpp
@@ -88,21 +88,53 @@ static constexpr bool IsSmallInteger(opcodetype opcode)
return opcode >= OP_1 && opcode <= OP_16;
}
-static bool MatchMultisig(const CScript& script, unsigned int& required, std::vector<valtype>& pubkeys)
+static constexpr bool IsPushdataOp(opcodetype opcode)
+{
+ return opcode > OP_FALSE && opcode <= OP_PUSHDATA4;
+}
+
+static constexpr bool IsValidMultisigKeyCount(int n_keys)
+{
+ return n_keys > 0 && n_keys <= MAX_PUBKEYS_PER_MULTISIG;
+}
+
+static bool GetMultisigKeyCount(opcodetype opcode, valtype data, int& count)
+{
+ if (IsSmallInteger(opcode)) {
+ count = CScript::DecodeOP_N(opcode);
+ return IsValidMultisigKeyCount(count);
+ }
+
+ if (IsPushdataOp(opcode)) {
+ if (!CheckMinimalPush(data, opcode)) return false;
+ try {
+ count = CScriptNum(data, /* fRequireMinimal = */ true).getint();
+ return IsValidMultisigKeyCount(count);
+ } catch (const scriptnum_error&) {
+ return false;
+ }
+ }
+
+ return false;
+}
+
+static bool MatchMultisig(const CScript& script, int& required_sigs, std::vector<valtype>& pubkeys)
{
opcodetype opcode;
valtype data;
+ int num_keys;
+
CScript::const_iterator it = script.begin();
if (script.size() < 1 || script.back() != OP_CHECKMULTISIG) return false;
- if (!script.GetOp(it, opcode, data) || !IsSmallInteger(opcode)) return false;
- required = CScript::DecodeOP_N(opcode);
+ if (!script.GetOp(it, opcode, data) || !GetMultisigKeyCount(opcode, data, required_sigs)) return false;
while (script.GetOp(it, opcode, data) && CPubKey::ValidSize(data)) {
pubkeys.emplace_back(std::move(data));
}
- if (!IsSmallInteger(opcode)) return false;
- unsigned int keys = CScript::DecodeOP_N(opcode);
- if (pubkeys.size() != keys || keys < required) return false;
+ if (!GetMultisigKeyCount(opcode, data, num_keys)) return false;
+
+ if (pubkeys.size() != static_cast<unsigned long>(num_keys) || num_keys < required_sigs) return false;
+
return (it + 1 == script.end());
}
@@ -163,12 +195,12 @@ TxoutType Solver(const CScript& scriptPubKey, std::vector<std::vector<unsigned c
return TxoutType::PUBKEYHASH;
}
- unsigned int required;
+ int required;
std::vector<std::vector<unsigned char>> keys;
if (MatchMultisig(scriptPubKey, required, keys)) {
- vSolutionsRet.push_back({static_cast<unsigned char>(required)}); // safe as required is in range 1..16
+ vSolutionsRet.push_back({static_cast<unsigned char>(required)}); // safe as required is in range 1..20
vSolutionsRet.insert(vSolutionsRet.end(), keys.begin(), keys.end());
- vSolutionsRet.push_back({static_cast<unsigned char>(keys.size())}); // safe as size is in range 1..16
+ vSolutionsRet.push_back({static_cast<unsigned char>(keys.size())}); // safe as size is in range 1..20
return TxoutType::MULTISIG;
}
@@ -318,10 +350,11 @@ CScript GetScriptForMultisig(int nRequired, const std::vector<CPubKey>& keys)
{
CScript script;
- script << CScript::EncodeOP_N(nRequired);
+ script << nRequired;
for (const CPubKey& key : keys)
script << ToByteVector(key);
- script << CScript::EncodeOP_N(keys.size()) << OP_CHECKMULTISIG;
+ script << keys.size() << OP_CHECKMULTISIG;
+
return script;
}
diff --git a/src/streams.h b/src/streams.h
index e78da31cbc..31407287ae 100644
--- a/src/streams.h
+++ b/src/streams.h
@@ -167,7 +167,7 @@ public:
}
template<typename T>
- VectorReader& operator>>(T& obj)
+ VectorReader& operator>>(T&& obj)
{
// Unserialize from this stream
::Unserialize(*this, obj);
diff --git a/src/test/compress_tests.cpp b/src/test/compress_tests.cpp
index 7b661a0d1d..b7ba46bd30 100644
--- a/src/test/compress_tests.cpp
+++ b/src/test/compress_tests.cpp
@@ -79,7 +79,7 @@ BOOST_AUTO_TEST_CASE(compress_script_to_ckey_id)
// Check compressed script
BOOST_CHECK_EQUAL(out.size(), 21U);
BOOST_CHECK_EQUAL(out[0], 0x00);
- BOOST_CHECK_EQUAL(memcmp(&out[1], &script[3], 20), 0); // compare the 20 relevant chars of the CKeyId in the script
+ BOOST_CHECK_EQUAL(memcmp(out.data() + 1, script.data() + 3, 20), 0); // compare the 20 relevant chars of the CKeyId in the script
}
BOOST_AUTO_TEST_CASE(compress_script_to_cscript_id)
@@ -96,7 +96,7 @@ BOOST_AUTO_TEST_CASE(compress_script_to_cscript_id)
// Check compressed script
BOOST_CHECK_EQUAL(out.size(), 21U);
BOOST_CHECK_EQUAL(out[0], 0x01);
- BOOST_CHECK_EQUAL(memcmp(&out[1], &script[2], 20), 0); // compare the 20 relevant chars of the CScriptId in the script
+ BOOST_CHECK_EQUAL(memcmp(out.data() + 1, script.data() + 2, 20), 0); // compare the 20 relevant chars of the CScriptId in the script
}
BOOST_AUTO_TEST_CASE(compress_script_to_compressed_pubkey_id)
@@ -113,8 +113,8 @@ BOOST_AUTO_TEST_CASE(compress_script_to_compressed_pubkey_id)
// Check compressed script
BOOST_CHECK_EQUAL(out.size(), 33U);
- BOOST_CHECK_EQUAL(memcmp(&out[0], &script[1], 1), 0);
- BOOST_CHECK_EQUAL(memcmp(&out[1], &script[2], 32), 0); // compare the 32 chars of the compressed CPubKey
+ BOOST_CHECK_EQUAL(memcmp(out.data(), script.data() + 1, 1), 0);
+ BOOST_CHECK_EQUAL(memcmp(out.data() + 1, script.data() + 2, 32), 0); // compare the 32 chars of the compressed CPubKey
}
BOOST_AUTO_TEST_CASE(compress_script_to_uncompressed_pubkey_id)
@@ -130,7 +130,7 @@ BOOST_AUTO_TEST_CASE(compress_script_to_uncompressed_pubkey_id)
// Check compressed script
BOOST_CHECK_EQUAL(out.size(), 33U);
- BOOST_CHECK_EQUAL(memcmp(&out[1], &script[2], 32), 0); // first 32 chars of CPubKey are copied into out[1:]
+ BOOST_CHECK_EQUAL(memcmp(out.data() + 1, script.data() + 2, 32), 0); // first 32 chars of CPubKey are copied into out[1:]
BOOST_CHECK_EQUAL(out[0], 0x04 | (script[65] & 0x01)); // least significant bit (lsb) of last char of pubkey is mapped into out[0]
}
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index 7358b246b6..edec5f0a31 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -33,7 +33,7 @@ static void TestVector(const Hasher &h, const In &in, const Out &out) {
hash.resize(out.size());
{
// Test that writing the whole input string at once works.
- Hasher(h).Write((unsigned char*)&in[0], in.size()).Finalize(&hash[0]);
+ Hasher(h).Write((const uint8_t*)in.data(), in.size()).Finalize(hash.data());
BOOST_CHECK(hash == out);
}
for (int i=0; i<32; i++) {
@@ -42,15 +42,15 @@ static void TestVector(const Hasher &h, const In &in, const Out &out) {
size_t pos = 0;
while (pos < in.size()) {
size_t len = InsecureRandRange((in.size() - pos + 1) / 2 + 1);
- hasher.Write((unsigned char*)&in[pos], len);
+ hasher.Write((const uint8_t*)in.data() + pos, len);
pos += len;
if (pos > 0 && pos + 2 * out.size() > in.size() && pos < in.size()) {
// Test that writing the rest at once to a copy of a hasher works.
- Hasher(hasher).Write((unsigned char*)&in[pos], in.size() - pos).Finalize(&hash[0]);
+ Hasher(hasher).Write((const uint8_t*)in.data() + pos, in.size() - pos).Finalize(hash.data());
BOOST_CHECK(hash == out);
}
}
- hasher.Finalize(&hash[0]);
+ hasher.Finalize(hash.data());
BOOST_CHECK(hash == out);
}
}
diff --git a/src/test/data/tx_invalid.json b/src/test/data/tx_invalid.json
index 41bebab2a0..a47bc8f366 100644
--- a/src/test/data/tx_invalid.json
+++ b/src/test/data/tx_invalid.json
@@ -311,6 +311,10 @@
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x00 0x20 0x34b6c399093e06cf9f0f7f660a1abcfe78fcf7b576f43993208edd9518a0ae9b", 1000]],
"0100000000010100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff01e803000000000000015101045102010100000000", "P2SH,WITNESS"],
+["P2WSH with an empty redeem should fail due to empty stack"],
+[[["3d4da21b04a67a54c8a58df1c53a0534b0a7f0864fb3d19abd43b8f6934e785f", 0, "0x00 0x20 0xe3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", 1337]],
+"020000000001015f784e93f6b843bd9ad1b34f86f0a7b034053ac5f18da5c8547aa6041ba24d3d0000000000ffffffff013905000000000000220020e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855010000000000", "P2SH,WITNESS"],
+
["33 bytes push should be considered a witness scriptPubKey"],
[[["0000000000000000000000000000000000000000000000000000000000000100", 0, "0x60 0x21 0xff25429251b5a84f452230a3c75fd886b7fc5a7865ce4a7bb7a9d7c5be6da3dbff", 1000]],
"010000000100010000000000000000000000000000000000000000000000000000000000000000000000ffffffff01e803000000000000015100000000", "P2SH,WITNESS,DISCOURAGE_UPGRADABLE_WITNESS_PROGRAM"],
diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp
index ea41a03728..36e2dac3ff 100644
--- a/src/test/descriptor_tests.cpp
+++ b/src/test/descriptor_tests.cpp
@@ -2,6 +2,7 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <pubkey.h>
#include <script/descriptor.h>
#include <script/sign.h>
#include <script/standard.h>
@@ -27,6 +28,14 @@ void CheckUnparsable(const std::string& prv, const std::string& pub, const std::
BOOST_CHECK_EQUAL(error, expected_error);
}
+/** Check that the script is inferred as non-standard */
+void CheckInferRaw(const CScript& script)
+{
+ FlatSigningProvider dummy_provider;
+ std::unique_ptr<Descriptor> desc = InferDescriptor(script, dummy_provider);
+ BOOST_CHECK(desc->ToString().rfind("raw(", 0) == 0);
+}
+
constexpr int DEFAULT = 0;
constexpr int RANGE = 1; // Expected to be ranged descriptor
constexpr int HARDENED = 2; // Derivation needs access to private keys
@@ -351,8 +360,9 @@ BOOST_AUTO_TEST_CASE(descriptor_test)
CheckUnparsable("multi(0,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(0,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "Multisig threshold cannot be 0, must be at least 1"); // Threshold of 0
CheckUnparsable("multi(3,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(3,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "Multisig threshold cannot be larger than the number of keys; threshold is 3 but only 2 keys specified"); // Threshold larger than number of keys
CheckUnparsable("multi(3,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f)", "multi(3,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8)", "Cannot have 4 pubkeys in bare multisig; only at most 3 pubkeys"); // Threshold larger than number of keys
- CheckUnparsable("sh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))","sh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "Cannot have 17 keys in multisig; must have between 1 and 16 keys, inclusive"); // Cannot have more than 16 keys in a multisig
-
+ CheckUnparsable("sh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))","sh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "P2SH script is too large, 581 bytes is larger than 520 bytes"); // Cannot have more than 15 keys in a P2SH multisig, or we exceed maximum push size
+ Check("wsh(multi(20,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9,KzRedjSwMggebB3VufhbzpYJnvHfHe9kPJSjCU5QpJdAW3NSZxYS,Kyjtp5858xL7JfeV4PNRCKy2t6XvgqNNepArGY9F9F1SSPqNEMs3,L2D4RLHPiHBidkHS8ftx11jJk1hGFELvxh8LoxNQheaGT58dKenW,KyLPZdwY4td98bKkXqEXTEBX3vwEYTQo1yyLjX2jKXA63GBpmSjv))","wsh(multi(20,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232,02bc2feaa536991d269aae46abb8f3772a5b3ad592314945e51543e7da84c4af6e,0318bf32e5217c1eb771a6d5ce1cd39395dff7ff665704f175c9a5451d95a2f2ca,02c681a6243f16208c2004bb81f5a8a67edfdd3e3711534eadeec3dcf0b010c759,0249fdd6b69768b8d84b4893f8ff84b36835c50183de20fcae8f366a45290d01fd))", "wsh(multi(20,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9,KzRedjSwMggebB3VufhbzpYJnvHfHe9kPJSjCU5QpJdAW3NSZxYS,Kyjtp5858xL7JfeV4PNRCKy2t6XvgqNNepArGY9F9F1SSPqNEMs3,L2D4RLHPiHBidkHS8ftx11jJk1hGFELvxh8LoxNQheaGT58dKenW,KyLPZdwY4td98bKkXqEXTEBX3vwEYTQo1yyLjX2jKXA63GBpmSjv))","wsh(multi(20,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232,02bc2feaa536991d269aae46abb8f3772a5b3ad592314945e51543e7da84c4af6e,0318bf32e5217c1eb771a6d5ce1cd39395dff7ff665704f175c9a5451d95a2f2ca,02c681a6243f16208c2004bb81f5a8a67edfdd3e3711534eadeec3dcf0b010c759,0249fdd6b69768b8d84b4893f8ff84b36835c50183de20fcae8f366a45290d01fd))", SIGNABLE, {{"0020376bd8344b8b6ebe504ff85ef743eaa1aa9272178223bcb6887e9378efb341ac"}}, OutputType::BECH32); // In P2WSH we can have up to 20 keys
+Check("sh(wsh(multi(20,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9,KzRedjSwMggebB3VufhbzpYJnvHfHe9kPJSjCU5QpJdAW3NSZxYS,Kyjtp5858xL7JfeV4PNRCKy2t6XvgqNNepArGY9F9F1SSPqNEMs3,L2D4RLHPiHBidkHS8ftx11jJk1hGFELvxh8LoxNQheaGT58dKenW,KyLPZdwY4td98bKkXqEXTEBX3vwEYTQo1yyLjX2jKXA63GBpmSjv)))","sh(wsh(multi(20,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232,02bc2feaa536991d269aae46abb8f3772a5b3ad592314945e51543e7da84c4af6e,0318bf32e5217c1eb771a6d5ce1cd39395dff7ff665704f175c9a5451d95a2f2ca,02c681a6243f16208c2004bb81f5a8a67edfdd3e3711534eadeec3dcf0b010c759,0249fdd6b69768b8d84b4893f8ff84b36835c50183de20fcae8f366a45290d01fd)))", "sh(wsh(multi(20,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9,KzRedjSwMggebB3VufhbzpYJnvHfHe9kPJSjCU5QpJdAW3NSZxYS,Kyjtp5858xL7JfeV4PNRCKy2t6XvgqNNepArGY9F9F1SSPqNEMs3,L2D4RLHPiHBidkHS8ftx11jJk1hGFELvxh8LoxNQheaGT58dKenW,KyLPZdwY4td98bKkXqEXTEBX3vwEYTQo1yyLjX2jKXA63GBpmSjv)))","sh(wsh(multi(20,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232,02bc2feaa536991d269aae46abb8f3772a5b3ad592314945e51543e7da84c4af6e,0318bf32e5217c1eb771a6d5ce1cd39395dff7ff665704f175c9a5451d95a2f2ca,02c681a6243f16208c2004bb81f5a8a67edfdd3e3711534eadeec3dcf0b010c759,0249fdd6b69768b8d84b4893f8ff84b36835c50183de20fcae8f366a45290d01fd)))", SIGNABLE, {{"a914c2c9c510e9d7f92fd6131e94803a8d34a8ef675e87"}}, OutputType::P2SH_SEGWIT); // Even if it's wrapped into P2SH
// Check for invalid nesting of structures
CheckUnparsable("sh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "A function is needed within P2SH"); // P2SH needs a script, not a key
CheckUnparsable("sh(combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "Can only have combo() at top level"); // Old must be top level
@@ -376,6 +386,27 @@ BOOST_AUTO_TEST_CASE(descriptor_test)
CheckUnparsable("", "addr(asdf)", "Address is not valid"); // Invalid address
CheckUnparsable("", "raw(asdf)", "Raw script is not hex"); // Invalid script
CheckUnparsable("", "raw(Ü)#00000000", "Invalid characters in payload"); // Invalid chars
+
+ // A 2of4 but using a direct push rather than OP_2
+ CScript nonminimalmultisig;
+ CKey keys[4];
+ nonminimalmultisig << std::vector<unsigned char>{2};
+ for (int i = 0; i < 4; i++) {
+ keys[i].MakeNewKey(true);
+ nonminimalmultisig << ToByteVector(keys[i].GetPubKey());
+ }
+ nonminimalmultisig << 4 << OP_CHECKMULTISIG;
+ CheckInferRaw(nonminimalmultisig);
+
+ // A 2of4 but using a direct push rather than OP_4
+ nonminimalmultisig.clear();
+ nonminimalmultisig << 2;
+ for (int i = 0; i < 4; i++) {
+ keys[i].MakeNewKey(true);
+ nonminimalmultisig << ToByteVector(keys[i].GetPubKey());
+ }
+ nonminimalmultisig << std::vector<unsigned char>{4} << OP_CHECKMULTISIG;
+ CheckInferRaw(nonminimalmultisig);
}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/fuzz/danger_link_all.sh b/src/test/fuzz/danger_link_all.sh
deleted file mode 100755
index 2ddd00c658..0000000000
--- a/src/test/fuzz/danger_link_all.sh
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/usr/bin/env bash
-# Copyright (c) 2020 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-export LC_ALL=C.UTF-8
-
-set -e
-
-ROOT_DIR="$(git rev-parse --show-toplevel)"
-
-# Run only once (break make recursion)
-if [ -d "${ROOT_DIR}/lock_fuzz_link_all" ]; then
- exit
-fi
-mkdir "${ROOT_DIR}/lock_fuzz_link_all"
-
-echo "Linking each fuzz target separately."
-for FUZZING_HARNESS in $(PRINT_ALL_FUZZ_TARGETS_AND_ABORT=1 "${ROOT_DIR}/src/test/fuzz/fuzz" | sort -u); do
- echo "Building src/test/fuzz/${FUZZING_HARNESS} ..."
- git checkout -- "${ROOT_DIR}/src/test/fuzz/fuzz.cpp"
- sed -i "s/std::getenv(\"FUZZ\")/\"${FUZZING_HARNESS}\"/g" "${ROOT_DIR}/src/test/fuzz/fuzz.cpp"
- make
- mv "${ROOT_DIR}/src/test/fuzz/fuzz" "${ROOT_DIR}/src/test/fuzz/${FUZZING_HARNESS}"
-done
-git checkout -- "${ROOT_DIR}/src/test/fuzz/fuzz.cpp"
-rmdir "${ROOT_DIR}/lock_fuzz_link_all"
-echo "Successfully built all fuzz targets."
diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp
index 1fab46ff13..0d8d960d56 100644
--- a/src/test/fuzz/fuzz.cpp
+++ b/src/test/fuzz/fuzz.cpp
@@ -29,13 +29,24 @@ static TypeTestOneInput* g_test_one_input{nullptr};
void initialize()
{
+ bool should_abort{false};
if (std::getenv("PRINT_ALL_FUZZ_TARGETS_AND_ABORT")) {
for (const auto& t : FuzzTargets()) {
if (std::get<2>(t.second)) continue;
std::cout << t.first << std::endl;
}
- Assert(false);
+ should_abort = true;
}
+ if (const char* out_path = std::getenv("WRITE_ALL_FUZZ_TARGETS_AND_ABORT")) {
+ std::cout << "Writing all fuzz target names to '" << out_path << "'." << std::endl;
+ std::ofstream out_stream(out_path, std::ios::binary);
+ for (const auto& t : FuzzTargets()) {
+ if (std::get<2>(t.second)) continue;
+ out_stream << t.first << std::endl;
+ }
+ should_abort = true;
+ }
+ Assert(!should_abort);
std::string_view fuzz_target{Assert(std::getenv("FUZZ"))};
const auto it = FuzzTargets().find(fuzz_target);
Assert(it != FuzzTargets().end());
diff --git a/src/test/fuzz/parse_iso8601.cpp b/src/test/fuzz/parse_iso8601.cpp
index dcb24ac127..a56f2aa48d 100644
--- a/src/test/fuzz/parse_iso8601.cpp
+++ b/src/test/fuzz/parse_iso8601.cpp
@@ -15,7 +15,7 @@ FUZZ_TARGET(parse_iso8601)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
- const int64_t random_time = fuzzed_data_provider.ConsumeIntegral<int64_t>();
+ const int64_t random_time = fuzzed_data_provider.ConsumeIntegral<int32_t>();
const std::string random_string = fuzzed_data_provider.ConsumeRemainingBytesAsString();
const std::string iso8601_datetime = FormatISO8601DateTime(random_time);
diff --git a/src/test/fuzz/rpc.cpp b/src/test/fuzz/rpc.cpp
index dae6f6b6a7..cf32a79932 100644
--- a/src/test/fuzz/rpc.cpp
+++ b/src/test/fuzz/rpc.cpp
@@ -3,9 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <base58.h>
-#include <chainparamsbase.h>
#include <core_io.h>
-#include <interfaces/chain.h>
#include <key.h>
#include <key_io.h>
#include <node/context.h>
@@ -70,18 +68,15 @@ const std::vector<std::string> RPC_COMMANDS_NOT_SAFE_FOR_FUZZING{
"addpeeraddress", // avoid DNS lookups
"analyzepsbt", // avoid signed integer overflow in CFeeRate::GetFee(unsigned long) (https://github.com/bitcoin/bitcoin/issues/20607)
"dumptxoutset", // avoid writing to disk
-#ifdef ENABLE_WALLET
"dumpwallet", // avoid writing to disk
-#endif
- "echoipc", // avoid assertion failure (Assertion `"EnsureAnyNodeContext(request.context).init" && check' failed.)
- "generatetoaddress", // avoid timeout
- "gettxoutproof", // avoid slow execution
-#ifdef ENABLE_WALLET
+ "echoipc", // avoid assertion failure (Assertion `"EnsureAnyNodeContext(request.context).init" && check' failed.)
+ "generatetoaddress", // avoid prohibitively slow execution (when `num_blocks` is large)
+ "generatetodescriptor", // avoid prohibitively slow execution (when `nblocks` is large)
+ "gettxoutproof", // avoid prohibitively slow execution
"importwallet", // avoid reading from disk
"loadwallet", // avoid reading from disk
-#endif
- "mockscheduler", // avoid assertion failure (Assertion `delta_seconds.count() > 0 && delta_seconds < std::chrono::hours{1}' failed.)
"prioritisetransaction", // avoid signed integer overflow in CTxMemPool::PrioritiseTransaction(uint256 const&, long const&) (https://github.com/bitcoin/bitcoin/issues/20626)
+ "savemempool", // disabled as a precautionary measure: may take a file path argument in the future
"setban", // avoid DNS lookups
"stop", // avoid shutdown state
};
@@ -107,7 +102,6 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"finalizepsbt",
"generate",
"generateblock",
- "generatetodescriptor",
"getaddednodeinfo",
"getbestblockhash",
"getblock",
@@ -145,11 +139,11 @@ const std::vector<std::string> RPC_COMMANDS_SAFE_FOR_FUZZING{
"joinpsbts",
"listbanned",
"logging",
+ "mockscheduler",
"ping",
"preciousblock",
"pruneblockchain",
"reconsiderblock",
- "savemempool",
"scantxoutset",
"sendrawtransaction",
"setmocktime",
@@ -334,20 +328,6 @@ void initialize_rpc()
std::terminate();
}
}
- for (const std::string& rpc_command : RPC_COMMANDS_SAFE_FOR_FUZZING) {
- const bool supported_rpc_command = std::find(supported_rpc_commands.begin(), supported_rpc_commands.end(), rpc_command) != supported_rpc_commands.end();
- if (!supported_rpc_command) {
- std::cerr << "Error: Unknown RPC command \"" << rpc_command << "\" found in RPC_COMMANDS_SAFE_FOR_FUZZING. Please update " << __FILE__ << ".\n";
- std::terminate();
- }
- }
- for (const std::string& rpc_command : RPC_COMMANDS_NOT_SAFE_FOR_FUZZING) {
- const bool supported_rpc_command = std::find(supported_rpc_commands.begin(), supported_rpc_commands.end(), rpc_command) != supported_rpc_commands.end();
- if (!supported_rpc_command) {
- std::cerr << "Error: Unknown RPC command \"" << rpc_command << "\" found in RPC_COMMANDS_NOT_SAFE_FOR_FUZZING. Please update " << __FILE__ << ".\n";
- std::terminate();
- }
- }
const char* limit_to_rpc_command_env = std::getenv("LIMIT_TO_RPC_COMMAND");
if (limit_to_rpc_command_env != nullptr) {
g_limit_to_rpc_command = std::string{limit_to_rpc_command_env};
diff --git a/src/test/fuzz/script_assets_test_minimizer.cpp b/src/test/fuzz/script_assets_test_minimizer.cpp
index cec5212f42..a80338b965 100644
--- a/src/test/fuzz/script_assets_test_minimizer.cpp
+++ b/src/test/fuzz/script_assets_test_minimizer.cpp
@@ -133,8 +133,7 @@ unsigned int ParseScriptFlags(const std::string& str)
std::vector<std::string> words;
boost::algorithm::split(words, str, boost::algorithm::is_any_of(","));
- for (const std::string& word : words)
- {
+ for (const std::string& word : words) {
auto it = FLAG_NAMES.find(word);
if (it == FLAG_NAMES.end()) throw std::runtime_error("Unknown verification flag " + word);
flags |= it->second;
@@ -186,15 +185,19 @@ void Test(const std::string& str)
}
}
-ECCVerifyHandle handle;
-
-} // namespace
+void test_init()
+{
+ static ECCVerifyHandle handle;
+}
-FUZZ_TARGET_INIT_HIDDEN(script_assets_test_minimizer, FuzzFrameworkEmptyInitFun, /* hidden */ true)
+FUZZ_TARGET_INIT_HIDDEN(script_assets_test_minimizer, test_init, /* hidden */ true)
{
if (buffer.size() < 2 || buffer.back() != '\n' || buffer[buffer.size() - 2] != ',') return;
const std::string str((const char*)buffer.data(), buffer.size() - 2);
try {
Test(str);
- } catch (const std::runtime_error&) {}
+ } catch (const std::runtime_error&) {
+ }
}
+
+} // namespace
diff --git a/src/test/fuzz/tx_pool.cpp b/src/test/fuzz/tx_pool.cpp
index fe8d17b24a..59229987ba 100644
--- a/src/test/fuzz/tx_pool.cpp
+++ b/src/test/fuzz/tx_pool.cpp
@@ -3,6 +3,7 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <consensus/validation.h>
+#include <miner.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@@ -77,13 +78,44 @@ void SetMempoolConstraints(ArgsManager& args, FuzzedDataProvider& fuzzed_data_pr
ToString(fuzzed_data_provider.ConsumeIntegralInRange<unsigned>(0, 999)));
}
+void Finish(FuzzedDataProvider& fuzzed_data_provider, MockedTxPool& tx_pool, CChainState& chainstate)
+{
+ WITH_LOCK(::cs_main, tx_pool.check(chainstate));
+ {
+ BlockAssembler::Options options;
+ options.nBlockMaxWeight = fuzzed_data_provider.ConsumeIntegralInRange(0U, MAX_BLOCK_WEIGHT);
+ options.blockMinFeeRate = CFeeRate{ConsumeMoney(fuzzed_data_provider)};
+ auto assembler = BlockAssembler{chainstate, *static_cast<CTxMemPool*>(&tx_pool), ::Params(), options};
+ auto block_template = assembler.CreateNewBlock(CScript{} << OP_TRUE);
+ Assert(block_template->block.vtx.size() >= 1);
+ }
+ const auto info_all = tx_pool.infoAll();
+ if (!info_all.empty()) {
+ const auto& tx_to_remove = *PickValue(fuzzed_data_provider, info_all).tx;
+ WITH_LOCK(tx_pool.cs, tx_pool.removeRecursive(tx_to_remove, /* dummy */ MemPoolRemovalReason::BLOCK));
+ std::vector<uint256> all_txids;
+ tx_pool.queryHashes(all_txids);
+ assert(all_txids.size() < info_all.size());
+ WITH_LOCK(::cs_main, tx_pool.check(chainstate));
+ }
+ SyncWithValidationInterfaceQueue();
+}
+
+void MockTime(FuzzedDataProvider& fuzzed_data_provider, const CChainState& chainstate)
+{
+ const auto time = ConsumeTime(fuzzed_data_provider,
+ chainstate.m_chain.Tip()->GetMedianTimePast() + 1,
+ std::numeric_limits<decltype(chainstate.m_chain.Tip()->nTime)>::max());
+ SetMockTime(time);
+}
+
FUZZ_TARGET_INIT(tx_pool_standard, initialize_tx_pool)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
const auto& node = g_setup->m_node;
auto& chainstate = node.chainman->ActiveChainstate();
- SetMockTime(ConsumeTime(fuzzed_data_provider));
+ MockTime(fuzzed_data_provider, chainstate);
SetMempoolConstraints(*node.args, fuzzed_data_provider);
// All RBF-spendable outpoints
@@ -163,7 +195,7 @@ FUZZ_TARGET_INIT(tx_pool_standard, initialize_tx_pool)
}();
if (fuzzed_data_provider.ConsumeBool()) {
- SetMockTime(ConsumeTime(fuzzed_data_provider));
+ MockTime(fuzzed_data_provider, chainstate);
}
if (fuzzed_data_provider.ConsumeBool()) {
SetMempoolConstraints(*node.args, fuzzed_data_provider);
@@ -237,23 +269,17 @@ FUZZ_TARGET_INIT(tx_pool_standard, initialize_tx_pool)
}
}
}
- WITH_LOCK(::cs_main, tx_pool.check(chainstate));
- const auto info_all = tx_pool.infoAll();
- if (!info_all.empty()) {
- const auto& tx_to_remove = *PickValue(fuzzed_data_provider, info_all).tx;
- WITH_LOCK(tx_pool.cs, tx_pool.removeRecursive(tx_to_remove, /* dummy */ MemPoolRemovalReason::BLOCK));
- std::vector<uint256> all_txids;
- tx_pool.queryHashes(all_txids);
- assert(all_txids.size() < info_all.size());
- WITH_LOCK(::cs_main, tx_pool.check(chainstate));
- }
- SyncWithValidationInterfaceQueue();
+ Finish(fuzzed_data_provider, tx_pool, chainstate);
}
FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
{
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
const auto& node = g_setup->m_node;
+ auto& chainstate = node.chainman->ActiveChainstate();
+
+ MockTime(fuzzed_data_provider, chainstate);
+ SetMempoolConstraints(*node.args, fuzzed_data_provider);
std::vector<uint256> txids;
for (const auto& outpoint : g_outpoints_coinbase_init_mature) {
@@ -265,11 +291,29 @@ FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
txids.push_back(ConsumeUInt256(fuzzed_data_provider));
}
- CTxMemPool tx_pool{/* estimator */ nullptr, /* check_ratio */ 1};
+ CTxMemPool tx_pool_{/* estimator */ nullptr, /* check_ratio */ 1};
+ MockedTxPool& tx_pool = *static_cast<MockedTxPool*>(&tx_pool_);
while (fuzzed_data_provider.ConsumeBool()) {
const auto mut_tx = ConsumeTransaction(fuzzed_data_provider, txids);
+ if (fuzzed_data_provider.ConsumeBool()) {
+ MockTime(fuzzed_data_provider, chainstate);
+ }
+ if (fuzzed_data_provider.ConsumeBool()) {
+ SetMempoolConstraints(*node.args, fuzzed_data_provider);
+ }
+ if (fuzzed_data_provider.ConsumeBool()) {
+ tx_pool.RollingFeeUpdate();
+ }
+ if (fuzzed_data_provider.ConsumeBool()) {
+ const auto& txid = fuzzed_data_provider.ConsumeBool() ?
+ mut_tx.GetHash() :
+ PickValue(fuzzed_data_provider, txids);
+ const auto delta = fuzzed_data_provider.ConsumeIntegralInRange<CAmount>(-50 * COIN, +50 * COIN);
+ tx_pool.PrioritiseTransaction(txid, delta);
+ }
+
const auto tx = MakeTransactionRef(mut_tx);
const bool bypass_limits = fuzzed_data_provider.ConsumeBool();
::fRequireStandard = fuzzed_data_provider.ConsumeBool();
@@ -278,8 +322,7 @@ FUZZ_TARGET_INIT(tx_pool, initialize_tx_pool)
if (accepted) {
txids.push_back(tx->GetHash());
}
-
- SyncWithValidationInterfaceQueue();
}
+ Finish(fuzzed_data_provider, tx_pool, chainstate);
}
} // namespace
diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp
index b8d846a995..574b694d1a 100644
--- a/src/test/fuzz/util.cpp
+++ b/src/test/fuzz/util.cpp
@@ -5,6 +5,7 @@
#include <test/fuzz/util.h>
#include <test/util/script.h>
#include <util/rbf.h>
+#include <util/time.h>
#include <version.h>
FuzzedSock::FuzzedSock(FuzzedDataProvider& fuzzed_data_provider)
@@ -216,6 +217,14 @@ void FillNode(FuzzedDataProvider& fuzzed_data_provider, CNode& node, bool init_v
}
}
+int64_t ConsumeTime(FuzzedDataProvider& fuzzed_data_provider, const std::optional<int64_t>& min, const std::optional<int64_t>& max) noexcept
+{
+ // Avoid t=0 (1970-01-01T00:00:00Z) since SetMockTime(0) disables mocktime.
+ static const int64_t time_min = ParseISO8601DateTime("1970-01-01T00:00:01Z");
+ static const int64_t time_max = ParseISO8601DateTime("9999-12-31T23:59:59Z");
+ return fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(min.value_or(time_min), max.value_or(time_max));
+}
+
CMutableTransaction ConsumeTransaction(FuzzedDataProvider& fuzzed_data_provider, const std::optional<std::vector<uint256>>& prevout_txids, const int max_num_in, const int max_num_out) noexcept
{
CMutableTransaction tx_mut;
@@ -267,13 +276,13 @@ CScriptWitness ConsumeScriptWitness(FuzzedDataProvider& fuzzed_data_provider, co
return ret;
}
-CScript ConsumeScript(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length, const bool maybe_p2wsh) noexcept
+CScript ConsumeScript(FuzzedDataProvider& fuzzed_data_provider, const std::optional<size_t>& max_length, const bool maybe_p2wsh) noexcept
{
const std::vector<uint8_t> b = ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length);
CScript r_script{b.begin(), b.end()};
if (maybe_p2wsh && fuzzed_data_provider.ConsumeBool()) {
uint256 script_hash;
- CSHA256().Write(&r_script[0], r_script.size()).Finalize(script_hash.begin());
+ CSHA256().Write(r_script.data(), r_script.size()).Finalize(script_hash.begin());
r_script.clear();
r_script << OP_0 << ToByteVector(script_hash);
}
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index 8f4f87fbdc..1a83f7a57a 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -26,7 +26,6 @@
#include <test/util/net.h>
#include <txmempool.h>
#include <uint256.h>
-#include <util/time.h>
#include <version.h>
#include <algorithm>
@@ -58,18 +57,20 @@ auto& PickValue(FuzzedDataProvider& fuzzed_data_provider, Collection& col)
return *it;
}
-[[nodiscard]] inline std::vector<uint8_t> ConsumeRandomLengthByteVector(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length = 4096) noexcept
+[[nodiscard]] inline std::vector<uint8_t> ConsumeRandomLengthByteVector(FuzzedDataProvider& fuzzed_data_provider, const std::optional<size_t>& max_length = std::nullopt) noexcept
{
- const std::string s = fuzzed_data_provider.ConsumeRandomLengthString(max_length);
+ const std::string s = max_length ?
+ fuzzed_data_provider.ConsumeRandomLengthString(*max_length) :
+ fuzzed_data_provider.ConsumeRandomLengthString();
return {s.begin(), s.end()};
}
-[[nodiscard]] inline std::vector<bool> ConsumeRandomLengthBitVector(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length = 4096) noexcept
+[[nodiscard]] inline std::vector<bool> ConsumeRandomLengthBitVector(FuzzedDataProvider& fuzzed_data_provider, const std::optional<size_t>& max_length = std::nullopt) noexcept
{
return BytesToBits(ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length));
}
-[[nodiscard]] inline CDataStream ConsumeDataStream(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length = 4096) noexcept
+[[nodiscard]] inline CDataStream ConsumeDataStream(FuzzedDataProvider& fuzzed_data_provider, const std::optional<size_t>& max_length = std::nullopt) noexcept
{
return CDataStream{ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length), SER_NETWORK, INIT_PROTO_VERSION};
}
@@ -96,7 +97,7 @@ template <typename T>
}
template <typename T>
-[[nodiscard]] inline std::optional<T> ConsumeDeserializable(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length = 4096) noexcept
+[[nodiscard]] inline std::optional<T> ConsumeDeserializable(FuzzedDataProvider& fuzzed_data_provider, const std::optional<size_t>& max_length = std::nullopt) noexcept
{
const std::vector<uint8_t> buffer = ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length);
CDataStream ds{buffer, SER_NETWORK, INIT_PROTO_VERSION};
@@ -127,19 +128,13 @@ template <typename WeakEnumType, size_t size>
return fuzzed_data_provider.ConsumeIntegralInRange<CAmount>(0, MAX_MONEY);
}
-[[nodiscard]] inline int64_t ConsumeTime(FuzzedDataProvider& fuzzed_data_provider) noexcept
-{
- // Avoid t=0 (1970-01-01T00:00:00Z) since SetMockTime(0) is a no-op.
- static const int64_t time_min = ParseISO8601DateTime("1970-01-01T00:00:01Z");
- static const int64_t time_max = ParseISO8601DateTime("9999-12-31T23:59:59Z");
- return fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(time_min, time_max);
-}
+[[nodiscard]] int64_t ConsumeTime(FuzzedDataProvider& fuzzed_data_provider, const std::optional<int64_t>& min = std::nullopt, const std::optional<int64_t>& max = std::nullopt) noexcept;
[[nodiscard]] CMutableTransaction ConsumeTransaction(FuzzedDataProvider& fuzzed_data_provider, const std::optional<std::vector<uint256>>& prevout_txids, const int max_num_in = 10, const int max_num_out = 10) noexcept;
[[nodiscard]] CScriptWitness ConsumeScriptWitness(FuzzedDataProvider& fuzzed_data_provider, const size_t max_stack_elem_size = 32) noexcept;
-[[nodiscard]] CScript ConsumeScript(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length = 4096, const bool maybe_p2wsh = false) noexcept;
+[[nodiscard]] CScript ConsumeScript(FuzzedDataProvider& fuzzed_data_provider, const std::optional<size_t>& max_length = std::nullopt, const bool maybe_p2wsh = false) noexcept;
[[nodiscard]] uint32_t ConsumeSequence(FuzzedDataProvider& fuzzed_data_provider) noexcept;
diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp
index 4dc0dd5f51..44fbfa5970 100644
--- a/src/test/script_standard_tests.cpp
+++ b/src/test/script_standard_tests.cpp
@@ -98,7 +98,7 @@ BOOST_AUTO_TEST_CASE(script_standard_Solver_success)
// TxoutType::WITNESS_V0_SCRIPTHASH
uint256 scriptHash;
- CSHA256().Write(&redeemScript[0], redeemScript.size())
+ CSHA256().Write(redeemScript.data(), redeemScript.size())
.Finalize(scriptHash.begin());
s.clear();
@@ -199,23 +199,20 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination)
s.clear();
s << ToByteVector(pubkey) << OP_CHECKSIG;
BOOST_CHECK(ExtractDestination(s, address));
- BOOST_CHECK(std::get_if<PKHash>(&address) &&
- *std::get_if<PKHash>(&address) == PKHash(pubkey));
+ BOOST_CHECK(std::get<PKHash>(address) == PKHash(pubkey));
// TxoutType::PUBKEYHASH
s.clear();
s << OP_DUP << OP_HASH160 << ToByteVector(pubkey.GetID()) << OP_EQUALVERIFY << OP_CHECKSIG;
BOOST_CHECK(ExtractDestination(s, address));
- BOOST_CHECK(std::get_if<PKHash>(&address) &&
- *std::get_if<PKHash>(&address) == PKHash(pubkey));
+ BOOST_CHECK(std::get<PKHash>(address) == PKHash(pubkey));
// TxoutType::SCRIPTHASH
CScript redeemScript(s); // initialize with leftover P2PKH script
s.clear();
s << OP_HASH160 << ToByteVector(CScriptID(redeemScript)) << OP_EQUAL;
BOOST_CHECK(ExtractDestination(s, address));
- BOOST_CHECK(std::get_if<ScriptHash>(&address) &&
- *std::get_if<ScriptHash>(&address) == ScriptHash(redeemScript));
+ BOOST_CHECK(std::get<ScriptHash>(address) == ScriptHash(redeemScript));
// TxoutType::MULTISIG
s.clear();
@@ -233,7 +230,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination)
BOOST_CHECK(ExtractDestination(s, address));
WitnessV0KeyHash keyhash;
CHash160().Write(pubkey).Finalize(keyhash);
- BOOST_CHECK(std::get_if<WitnessV0KeyHash>(&address) && *std::get_if<WitnessV0KeyHash>(&address) == keyhash);
+ BOOST_CHECK(std::get<WitnessV0KeyHash>(address) == keyhash);
// TxoutType::WITNESS_V0_SCRIPTHASH
s.clear();
@@ -241,7 +238,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination)
CSHA256().Write(redeemScript.data(), redeemScript.size()).Finalize(scripthash.begin());
s << OP_0 << ToByteVector(scripthash);
BOOST_CHECK(ExtractDestination(s, address));
- BOOST_CHECK(std::get_if<WitnessV0ScriptHash>(&address) && *std::get_if<WitnessV0ScriptHash>(&address) == scripthash);
+ BOOST_CHECK(std::get<WitnessV0ScriptHash>(address) == scripthash);
// TxoutType::WITNESS_UNKNOWN with unknown version
s.clear();
@@ -251,7 +248,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination)
unk.length = 33;
unk.version = 1;
std::copy(pubkey.begin(), pubkey.end(), unk.program);
- BOOST_CHECK(std::get_if<WitnessUnknown>(&address) && *std::get_if<WitnessUnknown>(&address) == unk);
+ BOOST_CHECK(std::get<WitnessUnknown>(address) == unk);
}
BOOST_AUTO_TEST_CASE(script_standard_ExtractDestinations)
@@ -275,8 +272,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestinations)
BOOST_CHECK_EQUAL(whichType, TxoutType::PUBKEY);
BOOST_CHECK_EQUAL(addresses.size(), 1U);
BOOST_CHECK_EQUAL(nRequired, 1);
- BOOST_CHECK(std::get_if<PKHash>(&addresses[0]) &&
- *std::get_if<PKHash>(&addresses[0]) == PKHash(pubkeys[0]));
+ BOOST_CHECK(std::get<PKHash>(addresses[0]) == PKHash(pubkeys[0]));
// TxoutType::PUBKEYHASH
s.clear();
@@ -285,8 +281,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestinations)
BOOST_CHECK_EQUAL(whichType, TxoutType::PUBKEYHASH);
BOOST_CHECK_EQUAL(addresses.size(), 1U);
BOOST_CHECK_EQUAL(nRequired, 1);
- BOOST_CHECK(std::get_if<PKHash>(&addresses[0]) &&
- *std::get_if<PKHash>(&addresses[0]) == PKHash(pubkeys[0]));
+ BOOST_CHECK(std::get<PKHash>(addresses[0]) == PKHash(pubkeys[0]));
// TxoutType::SCRIPTHASH
CScript redeemScript(s); // initialize with leftover P2PKH script
@@ -296,8 +291,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestinations)
BOOST_CHECK_EQUAL(whichType, TxoutType::SCRIPTHASH);
BOOST_CHECK_EQUAL(addresses.size(), 1U);
BOOST_CHECK_EQUAL(nRequired, 1);
- BOOST_CHECK(std::get_if<ScriptHash>(&addresses[0]) &&
- *std::get_if<ScriptHash>(&addresses[0]) == ScriptHash(redeemScript));
+ BOOST_CHECK(std::get<ScriptHash>(addresses[0]) == ScriptHash(redeemScript));
// TxoutType::MULTISIG
s.clear();
@@ -309,10 +303,8 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestinations)
BOOST_CHECK_EQUAL(whichType, TxoutType::MULTISIG);
BOOST_CHECK_EQUAL(addresses.size(), 2U);
BOOST_CHECK_EQUAL(nRequired, 2);
- BOOST_CHECK(std::get_if<PKHash>(&addresses[0]) &&
- *std::get_if<PKHash>(&addresses[0]) == PKHash(pubkeys[0]));
- BOOST_CHECK(std::get_if<PKHash>(&addresses[1]) &&
- *std::get_if<PKHash>(&addresses[1]) == PKHash(pubkeys[1]));
+ BOOST_CHECK(std::get<PKHash>(addresses[0]) == PKHash(pubkeys[0]));
+ BOOST_CHECK(std::get<PKHash>(addresses[1]) == PKHash(pubkeys[1]));
// TxoutType::NULL_DATA
s.clear();
@@ -378,7 +370,7 @@ BOOST_AUTO_TEST_CASE(script_standard_GetScriptFor_)
witnessScript << OP_1 << ToByteVector(pubkeys[0]) << OP_1 << OP_CHECKMULTISIG;
uint256 scriptHash;
- CSHA256().Write(&witnessScript[0], witnessScript.size())
+ CSHA256().Write(witnessScript.data(), witnessScript.size())
.Finalize(scriptHash.begin());
expected.clear();
diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp
index 14cfadf75d..171234f745 100644
--- a/src/test/script_tests.cpp
+++ b/src/test/script_tests.cpp
@@ -155,10 +155,10 @@ void DoTest(const CScript& scriptPubKey, const CScript& scriptSig, const CScript
if (libconsensus_flags == flags) {
int expectedSuccessCode = expect ? 1 : 0;
if (flags & bitcoinconsensus_SCRIPT_FLAGS_VERIFY_WITNESS) {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), txCredit.vout[0].nValue, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), txCredit.vout[0].nValue, stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
} else {
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), 0, (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
- BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script_with_amount(scriptPubKey.data(), scriptPubKey.size(), 0, stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
+ BOOST_CHECK_MESSAGE(bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), 0, libconsensus_flags, nullptr) == expectedSuccessCode, message);
}
}
#endif
@@ -224,7 +224,7 @@ struct KeyData
pubkey0 = key0.GetPubKey();
pubkey0H = key0.GetPubKey();
pubkey0C = key0C.GetPubKey();
- *const_cast<unsigned char*>(&pubkey0H[0]) = 0x06 | (pubkey0H[64] & 1);
+ *const_cast<unsigned char*>(pubkey0H.data()) = 0x06 | (pubkey0H[64] & 1);
key1.Set(vchKey1, vchKey1 + 32, false);
key1C.Set(vchKey1, vchKey1 + 32, true);
@@ -290,7 +290,7 @@ public:
} else if (wm == WitnessMode::SH) {
witscript = scriptPubKey;
uint256 hash;
- CSHA256().Write(&witscript[0], witscript.size()).Finalize(hash.begin());
+ CSHA256().Write(witscript.data(), witscript.size()).Finalize(hash.begin());
scriptPubKey = CScript() << witnessversion << ToByteVector(hash);
}
if (P2SH) {
@@ -774,7 +774,7 @@ BOOST_AUTO_TEST_CASE(script_build)
{
CScript witscript = CScript() << ToByteVector(keys.pubkey0);
uint256 hash;
- CSHA256().Write(&witscript[0], witscript.size()).Finalize(hash.begin());
+ CSHA256().Write(witscript.data(), witscript.size()).Finalize(hash.begin());
std::vector<unsigned char> hashBytes = ToByteVector(hash);
hashBytes.pop_back();
tests.push_back(TestBuilder(CScript() << OP_0 << hashBytes,
@@ -1520,7 +1520,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_returns_true)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 1);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_OK);
}
@@ -1543,7 +1543,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_index_err)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_INDEX);
}
@@ -1566,7 +1566,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_size)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size() * 2, nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size() * 2, nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_SIZE_MISMATCH);
}
@@ -1589,7 +1589,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_tx_serialization)
stream << 0xffffffff;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_TX_DESERIALIZE);
}
@@ -1612,7 +1612,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_amount_required_err)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_AMOUNT_REQUIRED);
}
@@ -1635,7 +1635,7 @@ BOOST_AUTO_TEST_CASE(bitcoinconsensus_verify_script_invalid_flags)
stream << spendTx;
bitcoinconsensus_error err;
- int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), (const unsigned char*)&stream[0], stream.size(), nIn, libconsensus_flags, &err);
+ int result = bitcoinconsensus_verify_script(scriptPubKey.data(), scriptPubKey.size(), stream.data(), stream.size(), nIn, libconsensus_flags, &err);
BOOST_CHECK_EQUAL(result, 0);
BOOST_CHECK_EQUAL(err, bitcoinconsensus_ERR_INVALID_FLAGS);
}
@@ -1733,7 +1733,7 @@ BOOST_AUTO_TEST_CASE(script_assets_test)
size_t length = file.tellg();
file.seekg(0, std::ios::beg);
std::string data(length, '\0');
- file.read(&data[0], data.size());
+ file.read(data.data(), data.size());
UniValue tests = read_json(data);
BOOST_CHECK(tests.isArray());
BOOST_CHECK(tests.size() > 0);
diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp
index 3079c9ff29..7af2b79f37 100644
--- a/src/test/streams_tests.cpp
+++ b/src/test/streams_tests.cpp
@@ -112,6 +112,17 @@ BOOST_AUTO_TEST_CASE(streams_vector_reader)
BOOST_CHECK_THROW(new_reader >> d, std::ios_base::failure);
}
+BOOST_AUTO_TEST_CASE(streams_vector_reader_rvalue)
+{
+ std::vector<uint8_t> data{0x82, 0xa7, 0x31};
+ VectorReader reader(SER_NETWORK, INIT_PROTO_VERSION, data, /* pos= */ 0);
+ uint32_t varint = 0;
+ // Deserialize into r-value
+ reader >> VARINT(varint);
+ BOOST_CHECK_EQUAL(varint, 54321);
+ BOOST_CHECK(reader.empty());
+}
+
BOOST_AUTO_TEST_CASE(bitstream_reader_writer)
{
CDataStream data(SER_NETWORK, INIT_PROTO_VERSION);
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index ab31662f97..82e70b5cdc 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -233,6 +233,7 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, TestChain100Setup)
// Mine 10 more blocks, putting at us height 110 where a valid assumeutxo value can
// be found.
+ constexpr int snapshot_height = 110;
mineBlocks(10);
initial_size += 10;
initial_total_coins += 10;
@@ -273,6 +274,11 @@ BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, TestChain100Setup)
chainman.ActiveChainstate().m_from_snapshot_blockhash,
*chainman.SnapshotBlockhash());
+ const AssumeutxoData& au_data = *ExpectedAssumeutxo(snapshot_height, ::Params());
+ const CBlockIndex* tip = chainman.ActiveTip();
+
+ BOOST_CHECK_EQUAL(tip->nChainTx, au_data.nChainTx);
+
// To be checked against later when we try loading a subsequent snapshot.
uint256 loaded_snapshot_blockhash{*chainman.SnapshotBlockhash()};
diff --git a/src/util/asmap.cpp b/src/util/asmap.cpp
index bd77d74218..bacc3690a2 100644
--- a/src/util/asmap.cpp
+++ b/src/util/asmap.cpp
@@ -93,8 +93,7 @@ uint32_t Interpret(const std::vector<bool> &asmap, const std::vector<bool> &ip)
jump = DecodeJump(pos, endpos);
if (jump == INVALID) break; // Jump offset straddles EOF
if (bits == 0) break; // No input bits left
- if (pos + jump < pos) break; // overflow
- if (pos + jump >= endpos) break; // Jumping past EOF
+ if (int64_t{jump} >= int64_t{endpos - pos}) break; // Jumping past EOF
if (ip[ip.size() - bits]) {
pos += jump;
}
@@ -156,8 +155,7 @@ bool SanityCheckASMap(const std::vector<bool>& asmap, int bits)
} else if (opcode == Instruction::JUMP) {
uint32_t jump = DecodeJump(pos, endpos);
if (jump == INVALID) return false; // Jump offset straddles EOF
- if (pos + jump < pos) return false; // overflow
- if (pos + jump > endpos) return false; // Jump out of range
+ if (int64_t{jump} > int64_t{endpos - pos}) return false; // Jump out of range
if (bits == 0) return false; // Consuming bits past the end of the input
--bits;
uint32_t jump_offset = pos - begin + jump;
diff --git a/src/validation.cpp b/src/validation.cpp
index bf774a0791..52bdee699c 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -66,10 +66,6 @@
static const unsigned int EXTRA_DESCENDANT_TX_SIZE_LIMIT = 10000;
/** Maximum kilobytes for transactions to store for processing during reorg */
static const unsigned int MAX_DISCONNECTED_TX_POOL_SIZE = 20000;
-/** The pre-allocation chunk size for blk?????.dat files (since 0.8) */
-static const unsigned int BLOCKFILE_CHUNK_SIZE = 0x1000000; // 16 MiB
-/** The pre-allocation chunk size for rev?????.dat files (since 0.8) */
-static const unsigned int UNDOFILE_CHUNK_SIZE = 0x100000; // 1 MiB
/** Time to wait between writing blocks/block index to disk. */
static constexpr std::chrono::hours DATABASE_WRITE_INTERVAL{1};
/** Time to wait between flushing chainstate to disk. */
@@ -135,14 +131,9 @@ Mutex g_best_block_mutex;
std::condition_variable g_best_block_cv;
uint256 g_best_block;
bool g_parallel_script_checks{false};
-std::atomic_bool fImporting(false);
-std::atomic_bool fReindex(false);
-bool fHavePruned = false;
-bool fPruneMode = false;
bool fRequireStandard = true;
bool fCheckBlockIndex = false;
bool fCheckpointsEnabled = DEFAULT_CHECKPOINTS_ENABLED;
-uint64_t nPruneTarget = 0;
int64_t nMaxTipAge = DEFAULT_MAX_TIP_AGE;
uint256 hashAssumeValid;
@@ -153,22 +144,17 @@ CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
// Internal stuff
namespace {
CBlockIndex* pindexBestInvalid = nullptr;
-
- RecursiveMutex cs_LastBlockFile;
- std::vector<CBlockFileInfo> vinfoBlockFile;
- int nLastBlockFile = 0;
- /** Global flag to indicate we should check to see if there are
- * block/undo files that should be deleted. Set on startup
- * or if we allocate more file space when we're in prune mode
- */
- bool fCheckForPruning = false;
-
- /** Dirty block index entries. */
- std::set<CBlockIndex*> setDirtyBlockIndex;
-
- /** Dirty block file entries. */
- std::set<int> setDirtyFileInfo;
-} // anon namespace
+} // namespace
+
+// Internal stuff from blockstorage ...
+extern RecursiveMutex cs_LastBlockFile;
+extern std::vector<CBlockFileInfo> vinfoBlockFile;
+extern int nLastBlockFile;
+extern bool fCheckForPruning;
+extern std::set<CBlockIndex*> setDirtyBlockIndex;
+extern std::set<int> setDirtyFileInfo;
+void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
+// ... TODO move fully to blockstorage
CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
{
@@ -205,9 +191,6 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
bool cacheFullScriptStore, PrecomputedTransactionData& txdata,
std::vector<CScriptCheck>* pvChecks = nullptr)
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false);
-static FlatFileSeq BlockFileSeq();
-static FlatFileSeq UndoFileSeq();
bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags)
{
@@ -1462,65 +1445,7 @@ bool CheckInputScripts(const CTransaction& tx, TxValidationState& state,
return true;
}
-static bool UndoWriteToDisk(const CBlockUndo& blockundo, FlatFilePos& pos, const uint256& hashBlock, const CMessageHeader::MessageStartChars& messageStart)
-{
- // Open history file to append
- CAutoFile fileout(OpenUndoFile(pos), SER_DISK, CLIENT_VERSION);
- if (fileout.IsNull())
- return error("%s: OpenUndoFile failed", __func__);
-
- // Write index header
- unsigned int nSize = GetSerializeSize(blockundo, fileout.GetVersion());
- fileout << messageStart << nSize;
-
- // Write undo data
- long fileOutPos = ftell(fileout.Get());
- if (fileOutPos < 0)
- return error("%s: ftell failed", __func__);
- pos.nPos = (unsigned int)fileOutPos;
- fileout << blockundo;
-
- // calculate & write checksum
- CHashWriter hasher(SER_GETHASH, PROTOCOL_VERSION);
- hasher << hashBlock;
- hasher << blockundo;
- fileout << hasher.GetHash();
-
- return true;
-}
-
-bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex)
-{
- FlatFilePos pos = pindex->GetUndoPos();
- if (pos.IsNull()) {
- return error("%s: no undo data available", __func__);
- }
-
- // Open history file to read
- CAutoFile filein(OpenUndoFile(pos, true), SER_DISK, CLIENT_VERSION);
- if (filein.IsNull())
- return error("%s: OpenUndoFile failed", __func__);
-
- // Read block
- uint256 hashChecksum;
- CHashVerifier<CAutoFile> verifier(&filein); // We need a CHashVerifier as reserializing may lose data
- try {
- verifier << pindex->pprev->GetBlockHash();
- verifier >> blockundo;
- filein >> hashChecksum;
- }
- catch (const std::exception& e) {
- return error("%s: Deserialize or I/O error - %s", __func__, e.what());
- }
-
- // Verify checksum
- if (hashChecksum != verifier.GetHash())
- return error("%s: Checksum mismatch", __func__);
-
- return true;
-}
-
-static bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str())
+bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage)
{
AbortNode(strMessage, userMessage);
return state.Error(strMessage);
@@ -1620,55 +1545,6 @@ DisconnectResult CChainState::DisconnectBlock(const CBlock& block, const CBlockI
return fClean ? DISCONNECT_OK : DISCONNECT_UNCLEAN;
}
-static void FlushUndoFile(int block_file, bool finalize = false)
-{
- FlatFilePos undo_pos_old(block_file, vinfoBlockFile[block_file].nUndoSize);
- if (!UndoFileSeq().Flush(undo_pos_old, finalize)) {
- AbortNode("Flushing undo file to disk failed. This is likely the result of an I/O error.");
- }
-}
-
-static void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false)
-{
- LOCK(cs_LastBlockFile);
- FlatFilePos block_pos_old(nLastBlockFile, vinfoBlockFile[nLastBlockFile].nSize);
- if (!BlockFileSeq().Flush(block_pos_old, fFinalize)) {
- AbortNode("Flushing block file to disk failed. This is likely the result of an I/O error.");
- }
- // we do not always flush the undo file, as the chain tip may be lagging behind the incoming blocks,
- // e.g. during IBD or a sync after a node going offline
- if (!fFinalize || finalize_undo) FlushUndoFile(nLastBlockFile, finalize_undo);
-}
-
-static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize);
-
-static bool WriteUndoDataForBlock(const CBlockUndo& blockundo, BlockValidationState& state, CBlockIndex* pindex, const CChainParams& chainparams)
-{
- // Write undo information to disk
- if (pindex->GetUndoPos().IsNull()) {
- FlatFilePos _pos;
- if (!FindUndoPos(state, pindex->nFile, _pos, ::GetSerializeSize(blockundo, CLIENT_VERSION) + 40))
- return error("ConnectBlock(): FindUndoPos failed");
- if (!UndoWriteToDisk(blockundo, _pos, pindex->pprev->GetBlockHash(), chainparams.MessageStart()))
- return AbortNode(state, "Failed to write undo data");
- // rev files are written in block height order, whereas blk files are written as blocks come in (often out of order)
- // we want to flush the rev (undo) file once we've written the last block, which is indicated by the last height
- // in the block file info as below; note that this does not catch the case where the undo writes are keeping up
- // with the block writes (usually when a synced up node is getting newly mined blocks) -- this case is caught in
- // the FindBlockPos function
- if (_pos.nFile < nLastBlockFile && static_cast<uint32_t>(pindex->nHeight) == vinfoBlockFile[_pos.nFile].nHeightLast) {
- FlushUndoFile(_pos.nFile, true);
- }
-
- // update nUndoPos in block index
- pindex->nUndoPos = _pos.nPos;
- pindex->nStatus |= BLOCK_HAVE_UNDO;
- setDirtyBlockIndex.insert(pindex);
- }
-
- return true;
-}
-
static CCheckQueue<CScriptCheck> scriptcheckqueue(128);
void StartScriptCheckWorkerThreads(int threads_num)
@@ -3102,84 +2978,6 @@ void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pi
}
}
-// TODO move to blockstorage
-bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, CChain& active_chain, uint64_t nTime, bool fKnown = false)
-{
- LOCK(cs_LastBlockFile);
-
- unsigned int nFile = fKnown ? pos.nFile : nLastBlockFile;
- if (vinfoBlockFile.size() <= nFile) {
- vinfoBlockFile.resize(nFile + 1);
- }
-
- bool finalize_undo = false;
- if (!fKnown) {
- while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) {
- // when the undo file is keeping up with the block file, we want to flush it explicitly
- // when it is lagging behind (more blocks arrive than are being connected), we let the
- // undo block write case handle it
- assert(std::addressof(::ChainActive()) == std::addressof(active_chain));
- finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight);
- nFile++;
- if (vinfoBlockFile.size() <= nFile) {
- vinfoBlockFile.resize(nFile + 1);
- }
- }
- pos.nFile = nFile;
- pos.nPos = vinfoBlockFile[nFile].nSize;
- }
-
- if ((int)nFile != nLastBlockFile) {
- if (!fKnown) {
- LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
- }
- FlushBlockFile(!fKnown, finalize_undo);
- nLastBlockFile = nFile;
- }
-
- vinfoBlockFile[nFile].AddBlock(nHeight, nTime);
- if (fKnown)
- vinfoBlockFile[nFile].nSize = std::max(pos.nPos + nAddSize, vinfoBlockFile[nFile].nSize);
- else
- vinfoBlockFile[nFile].nSize += nAddSize;
-
- if (!fKnown) {
- bool out_of_space;
- size_t bytes_allocated = BlockFileSeq().Allocate(pos, nAddSize, out_of_space);
- if (out_of_space) {
- return AbortNode("Disk space is too low!", _("Disk space is too low!"));
- }
- if (bytes_allocated != 0 && fPruneMode) {
- fCheckForPruning = true;
- }
- }
-
- setDirtyFileInfo.insert(nFile);
- return true;
-}
-
-static bool FindUndoPos(BlockValidationState &state, int nFile, FlatFilePos &pos, unsigned int nAddSize)
-{
- pos.nFile = nFile;
-
- LOCK(cs_LastBlockFile);
-
- pos.nPos = vinfoBlockFile[nFile].nUndoSize;
- vinfoBlockFile[nFile].nUndoSize += nAddSize;
- setDirtyFileInfo.insert(nFile);
-
- bool out_of_space;
- size_t bytes_allocated = UndoFileSeq().Allocate(pos, nAddSize, out_of_space);
- if (out_of_space) {
- return AbortNode(state, "Disk space is too low!", _("Disk space is too low!"));
- }
- if (bytes_allocated != 0 && fPruneMode) {
- fCheckForPruning = true;
- }
-
- return true;
-}
-
static bool CheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true)
{
// Check proof of work matches claimed amount
@@ -3737,18 +3535,6 @@ bool TestBlockValidity(BlockValidationState& state,
* BLOCK PRUNING CODE
*/
-/* Calculate the amount of disk space the block & undo files currently use */
-uint64_t CalculateCurrentUsage()
-{
- LOCK(cs_LastBlockFile);
-
- uint64_t retval = 0;
- for (const CBlockFileInfo &file : vinfoBlockFile) {
- retval += file.nSize + file.nUndoSize;
- }
- return retval;
-}
-
void BlockManager::PruneOneBlockFile(const int fileNumber)
{
AssertLockHeld(cs_main);
@@ -3783,17 +3569,6 @@ void BlockManager::PruneOneBlockFile(const int fileNumber)
setDirtyFileInfo.insert(fileNumber);
}
-
-void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
-{
- for (std::set<int>::iterator it = setFilesToPrune.begin(); it != setFilesToPrune.end(); ++it) {
- FlatFilePos pos(*it, 0);
- fs::remove(BlockFileSeq().FileName(pos));
- fs::remove(UndoFileSeq().FileName(pos));
- LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
- }
-}
-
void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
{
assert(fPruneMode && nManualPruneHeight > 0);
@@ -3888,30 +3663,6 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr
nLastBlockWeCanPrune, count);
}
-static FlatFileSeq BlockFileSeq()
-{
- return FlatFileSeq(gArgs.GetBlocksDirPath(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE);
-}
-
-static FlatFileSeq UndoFileSeq()
-{
- return FlatFileSeq(gArgs.GetBlocksDirPath(), "rev", UNDOFILE_CHUNK_SIZE);
-}
-
-FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly) {
- return BlockFileSeq().Open(pos, fReadOnly);
-}
-
-/** Open an undo file (rev?????.dat) */
-static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly) {
- return UndoFileSeq().Open(pos, fReadOnly);
-}
-
-fs::path GetBlockPosFilename(const FlatFilePos &pos)
-{
- return BlockFileSeq().FileName(pos);
-}
-
CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
{
AssertLockHeld(cs_main);
@@ -4744,18 +4495,6 @@ bool CChainState::ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size)
return ret;
}
-std::string CBlockFileInfo::ToString() const
-{
- return strprintf("CBlockFileInfo(blocks=%u, size=%u, heights=%u...%u, time=%s...%s)", nBlocks, nSize, nHeightFirst, nHeightLast, FormatISO8601Date(nTimeFirst), FormatISO8601Date(nTimeLast));
-}
-
-CBlockFileInfo* GetBlockFileInfo(size_t n)
-{
- LOCK(cs_LastBlockFile);
-
- return &vinfoBlockFile.at(n);
-}
-
static const uint64_t MEMPOOL_DUMP_VERSION = 1;
bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function)
@@ -5236,7 +4975,7 @@ bool ChainstateManager::PopulateAndValidateSnapshot(
}
assert(index);
- index->nChainTx = metadata.m_nchaintx;
+ index->nChainTx = au_data.nChainTx;
snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block);
LogPrintf("[snapshot] validated snapshot (%.2f MB)\n",
diff --git a/src/validation.h b/src/validation.h
index 63a8bdc096..8fa45c3f77 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -27,6 +27,7 @@
#include <serialize.h>
#include <util/check.h>
#include <util/hasher.h>
+#include <util/translation.h>
#include <atomic>
#include <map>
@@ -70,8 +71,6 @@ static const unsigned int DEFAULT_DESCENDANT_LIMIT = 25;
static const unsigned int DEFAULT_DESCENDANT_SIZE_LIMIT = 101;
/** Default for -mempoolexpiry, expiration time for mempool transactions in hours */
static const unsigned int DEFAULT_MEMPOOL_EXPIRY = 336;
-/** The maximum size of a blk?????.dat file (since 0.8) */
-static const unsigned int MAX_BLOCKFILE_SIZE = 0x8000000; // 128 MiB
/** Maximum number of dedicated script-checking threads allowed */
static const int MAX_SCRIPTCHECK_THREADS = 15;
/** -par default (number of script-checking threads, 0 = auto) */
@@ -113,8 +112,6 @@ typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
extern Mutex g_best_block_mutex;
extern std::condition_variable g_best_block_cv;
extern uint256 g_best_block;
-extern std::atomic_bool fImporting;
-extern std::atomic_bool fReindex;
/** Whether there are dedicated script-checking threads running.
* False indicates all script checking is done on the main threadMessageHandler thread.
*/
@@ -136,20 +133,9 @@ extern arith_uint256 nMinimumChainWork;
/** Best header we've seen so far (used for getheaders queries' starting points). */
extern CBlockIndex *pindexBestHeader;
-/** Pruning-related variables and constants */
-/** True if any block files have ever been pruned. */
-extern bool fHavePruned;
-/** True if we're running in -prune mode. */
-extern bool fPruneMode;
-/** Number of MiB of block files that we're trying to stay below. */
-extern uint64_t nPruneTarget;
/** Documentation for argument 'checklevel'. */
extern const std::vector<std::string> CHECKLEVEL_DOC;
-/** Open a block file (blk?????.dat) */
-FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly = false);
-/** Translation to a filesystem path */
-fs::path GetBlockPosFilename(const FlatFilePos &pos);
/** Unload database information */
void UnloadBlockIndex(CTxMemPool* mempool, ChainstateManager& chainman);
/** Run instances of script checking worker threads */
@@ -171,17 +157,11 @@ void StopScriptCheckWorkerThreads();
CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock);
CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams);
+bool AbortNode(BlockValidationState& state, const std::string& strMessage, const bilingual_str& userMessage = bilingual_str{});
+
/** Guess verification progress (as a fraction between 0.0=genesis and 1.0=current tip). */
double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex* pindex);
-/** Calculate the amount of disk space the block & undo files currently use */
-uint64_t CalculateCurrentUsage();
-
-/**
- * Actually unlink the specified files
- */
-void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune);
-
/** Prune block files up to a given height */
void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight);
@@ -1009,9 +989,6 @@ extern VersionBitsCache versionbitscache;
*/
int32_t ComputeBlockVersion(const CBlockIndex* pindexPrev, const Consensus::Params& params);
-/** Get block file info entry for one block file */
-CBlockFileInfo* GetBlockFileInfo(size_t n);
-
using FopenFn = std::function<FILE*(const fs::path&, const char*)>;
/** Dump the mempool to disk. */
@@ -1020,12 +997,6 @@ bool DumpMempool(const CTxMemPool& pool, FopenFn mockable_fopen_function = fsbri
/** Load the mempool from disk. */
bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mockable_fopen_function = fsbridge::fopen);
-//! Check whether the block associated with this index entry is pruned or not.
-inline bool IsBlockPruned(const CBlockIndex* pblockindex)
-{
- return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
-}
-
/**
* Return the expected assumeutxo value for a given height, if one exists.
*
diff --git a/src/wallet/crypter.cpp b/src/wallet/crypter.cpp
index b50f00e7d1..251778e401 100644
--- a/src/wallet/crypter.cpp
+++ b/src/wallet/crypter.cpp
@@ -78,7 +78,7 @@ bool CCrypter::Encrypt(const CKeyingMaterial& vchPlaintext, std::vector<unsigned
vchCiphertext.resize(vchPlaintext.size() + AES_BLOCKSIZE);
AES256CBCEncrypt enc(vchKey.data(), vchIV.data(), true);
- size_t nLen = enc.Encrypt(&vchPlaintext[0], vchPlaintext.size(), vchCiphertext.data());
+ size_t nLen = enc.Encrypt(vchPlaintext.data(), vchPlaintext.size(), vchCiphertext.data());
if(nLen < vchPlaintext.size())
return false;
vchCiphertext.resize(nLen);
@@ -97,7 +97,7 @@ bool CCrypter::Decrypt(const std::vector<unsigned char>& vchCiphertext, CKeyingM
vchPlaintext.resize(nLen);
AES256CBCDecrypt dec(vchKey.data(), vchIV.data(), true);
- nLen = dec.Decrypt(vchCiphertext.data(), vchCiphertext.size(), &vchPlaintext[0]);
+ nLen = dec.Decrypt(vchCiphertext.data(), vchCiphertext.size(), vchPlaintext.data());
if(nLen == 0)
return false;
vchPlaintext.resize(nLen);
@@ -121,7 +121,7 @@ bool DecryptSecret(const CKeyingMaterial& vMasterKey, const std::vector<unsigned
memcpy(chIV.data(), &nIV, WALLET_CRYPTO_IV_SIZE);
if(!cKeyCrypter.SetKey(vMasterKey, chIV))
return false;
- return cKeyCrypter.Decrypt(vchCiphertext, *((CKeyingMaterial*)&vchPlaintext));
+ return cKeyCrypter.Decrypt(vchCiphertext, vchPlaintext);
}
bool DecryptKey(const CKeyingMaterial& vMasterKey, const std::vector<unsigned char>& vchCryptedSecret, const CPubKey& vchPubKey, CKey& key)
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 653dbdfc1d..a851765c57 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -469,7 +469,7 @@ RPCHelpMan importpubkey()
if (!IsHex(request.params[0].get_str()))
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey must be a hex string");
std::vector<unsigned char> data(ParseHex(request.params[0].get_str()));
- CPubKey pubKey(data.begin(), data.end());
+ CPubKey pubKey(data);
if (!pubKey.IsFullyValid())
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey is not a valid public key");
@@ -871,7 +871,7 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d
switch (script_type) {
case TxoutType::PUBKEY: {
- CPubKey pubkey(solverdata[0].begin(), solverdata[0].end());
+ CPubKey pubkey(solverdata[0]);
import_data.used_keys.emplace(pubkey.GetID(), false);
return "";
}
@@ -893,7 +893,7 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d
}
case TxoutType::MULTISIG: {
for (size_t i = 1; i + 1< solverdata.size(); ++i) {
- CPubKey pubkey(solverdata[i].begin(), solverdata[i].end());
+ CPubKey pubkey(solverdata[i]);
import_data.used_keys.emplace(pubkey.GetID(), false);
}
return "";
@@ -997,7 +997,7 @@ static UniValue ProcessImportLegacy(ImportData& import_data, std::map<CKeyID, CP
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey \"" + str + "\" must be a hex string");
}
auto parsed_pubkey = ParseHex(str);
- CPubKey pubkey(parsed_pubkey.begin(), parsed_pubkey.end());
+ CPubKey pubkey(parsed_pubkey);
if (!pubkey.IsFullyValid()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Pubkey \"" + str + "\" is not a valid public key");
}
diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp
index 6d912be019..ea045eb6d8 100644
--- a/src/wallet/salvage.cpp
+++ b/src/wallet/salvage.cpp
@@ -154,8 +154,8 @@ bool RecoverDatabaseFile(const fs::path& file_path, bilingual_str& error, std::v
warnings.push_back(strprintf(Untranslated("WARNING: WalletBatch::Recover skipping %s: %s"), strType, strErr));
continue;
}
- Dbt datKey(&row.first[0], row.first.size());
- Dbt datValue(&row.second[0], row.second.size());
+ Dbt datKey(row.first.data(), row.first.size());
+ Dbt datValue(row.second.data(), row.second.size());
int ret2 = pdbCopy->put(ptxn, &datKey, &datValue, DB_NOOVERWRITE);
if (ret2 > 0)
fSuccess = false;
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index 149549410c..2eb9ca5c6d 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -162,7 +162,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s
break;
}
uint160 hash;
- CRIPEMD160().Write(&vSolutions[0][0], vSolutions[0].size()).Finalize(hash.begin());
+ CRIPEMD160().Write(vSolutions[0].data(), vSolutions[0].size()).Finalize(hash.begin());
CScriptID scriptID = CScriptID(hash);
CScript subscript;
if (keystore.GetCScript(scriptID, subscript)) {
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 011d59ecaf..c8e3c8f819 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -11,6 +11,7 @@
#include <vector>
#include <interfaces/chain.h>
+#include <node/blockstorage.h>
#include <node/context.h>
#include <policy/policy.h>
#include <rpc/server.h>
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index f0aaee7e4e..3d0ef9f8c0 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -603,12 +603,12 @@ bool CWallet::EncryptWallet(const SecureString& strWalletPassphrase)
CKeyingMaterial _vMasterKey;
_vMasterKey.resize(WALLET_CRYPTO_KEY_SIZE);
- GetStrongRandBytes(&_vMasterKey[0], WALLET_CRYPTO_KEY_SIZE);
+ GetStrongRandBytes(_vMasterKey.data(), WALLET_CRYPTO_KEY_SIZE);
CMasterKey kMasterKey;
kMasterKey.vchSalt.resize(WALLET_CRYPTO_SALT_SIZE);
- GetStrongRandBytes(&kMasterKey.vchSalt[0], WALLET_CRYPTO_SALT_SIZE);
+ GetStrongRandBytes(kMasterKey.vchSalt.data(), WALLET_CRYPTO_SALT_SIZE);
CCrypter crypter;
int64_t nStartTime = GetTimeMillis();
diff --git a/src/zmq/zmqpublishnotifier.cpp b/src/zmq/zmqpublishnotifier.cpp
index 25afa94d0f..6ae866cc07 100644
--- a/src/zmq/zmqpublishnotifier.cpp
+++ b/src/zmq/zmqpublishnotifier.cpp
@@ -168,7 +168,7 @@ bool CZMQAbstractPublishNotifier::SendZmqMessage(const char *command, const void
/* send three parts, command & data & a LE 4byte sequence number */
unsigned char msgseq[sizeof(uint32_t)];
- WriteLE32(&msgseq[0], nSequence);
+ WriteLE32(msgseq, nSequence);
int rc = zmq_send_multipart(psocket, command, strlen(command), data, size, msgseq, (size_t)sizeof(uint32_t), nullptr);
if (rc == -1)
return false;
diff --git a/test/README.md b/test/README.md
index 17bf8a1406..f25cbb3f8d 100644
--- a/test/README.md
+++ b/test/README.md
@@ -273,7 +273,7 @@ Please be aware that on Linux distributions all dependencies are usually availab
Individual tests can be run by directly calling the test script, e.g.:
```
-test/lint/lint-filenames.sh
+test/lint/lint-files.sh
```
You can run all the shell-based lint tests by running:
diff --git a/test/functional/feature_cltv.py b/test/functional/feature_cltv.py
index f2130fb588..6c51944d81 100755
--- a/test/functional/feature_cltv.py
+++ b/test/functional/feature_cltv.py
@@ -11,11 +11,9 @@ Test that the CHECKLOCKTIMEVERIFY soft-fork activates at (regtest) block height
from test_framework.blocktools import (
create_block,
create_coinbase,
- create_transaction,
)
from test_framework.messages import (
CTransaction,
- ToHex,
msg_block,
)
from test_framework.p2p import P2PInterface
@@ -27,12 +25,8 @@ from test_framework.script import (
OP_DROP,
)
from test_framework.test_framework import BitcoinTestFramework
-from test_framework.util import (
- assert_equal,
- hex_str_to_bytes,
-)
-
-from io import BytesIO
+from test_framework.util import assert_equal
+from test_framework.wallet import MiniWallet
CLTV_HEIGHT = 1351
@@ -41,19 +35,14 @@ CLTV_HEIGHT = 1351
# 1) prepending a given script to the scriptSig of vin 0 and
# 2) (optionally) modify the nSequence of vin 0 and the tx's nLockTime
def cltv_modify_tx(node, tx, prepend_scriptsig, nsequence=None, nlocktime=None):
+ assert_equal(len(tx.vin), 1)
if nsequence is not None:
tx.vin[0].nSequence = nsequence
tx.nLockTime = nlocktime
- # Need to re-sign, since nSequence and nLockTime changed
- signed_result = node.signrawtransactionwithwallet(ToHex(tx))
- new_tx = CTransaction()
- new_tx.deserialize(BytesIO(hex_str_to_bytes(signed_result['hex'])))
- else:
- new_tx = tx
-
- new_tx.vin[0].scriptSig = CScript(prepend_scriptsig + list(CScript(new_tx.vin[0].scriptSig)))
- return new_tx
+ tx.vin[0].scriptSig = CScript(prepend_scriptsig + list(CScript(tx.vin[0].scriptSig)))
+ tx.rehash()
+ return tx
def cltv_invalidate(node, tx, failure_reason):
@@ -98,9 +87,6 @@ class BIP65Test(BitcoinTestFramework):
self.setup_clean_chain = True
self.rpc_timeout = 480
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
-
def test_cltv_info(self, *, is_active):
assert_equal(self.nodes[0].getblockchaininfo()['softforks']['bip65'], {
"active": is_active,
@@ -111,22 +97,21 @@ class BIP65Test(BitcoinTestFramework):
def run_test(self):
peer = self.nodes[0].add_p2p_connection(P2PInterface())
+ wallet = MiniWallet(self.nodes[0], raw_script=True)
self.test_cltv_info(is_active=False)
self.log.info("Mining %d blocks", CLTV_HEIGHT - 2)
- self.coinbase_txids = [self.nodes[0].getblock(b)['tx'][0] for b in self.nodes[0].generate(CLTV_HEIGHT - 2)]
- self.nodeaddress = self.nodes[0].getnewaddress()
+ wallet.generate(10)
+ self.nodes[0].generate(CLTV_HEIGHT - 2 - 10)
self.log.info("Test that invalid-according-to-CLTV transactions can still appear in a block")
# create one invalid tx per CLTV failure reason (5 in total) and collect them
invalid_ctlv_txs = []
for i in range(5):
- spendtx = create_transaction(self.nodes[0], self.coinbase_txids[i],
- self.nodeaddress, amount=1.0)
+ spendtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx']
spendtx = cltv_invalidate(self.nodes[0], spendtx, i)
- spendtx.rehash()
invalid_ctlv_txs.append(spendtx)
tip = self.nodes[0].getbestblockhash()
@@ -160,10 +145,8 @@ class BIP65Test(BitcoinTestFramework):
# create and test one invalid tx per CLTV failure reason (5 in total)
for i in range(5):
- spendtx = create_transaction(self.nodes[0], self.coinbase_txids[10+i],
- self.nodeaddress, amount=1.0)
+ spendtx = wallet.create_self_transfer(from_node=self.nodes[0])['tx']
spendtx = cltv_invalidate(self.nodes[0], spendtx, i)
- spendtx.rehash()
expected_cltv_reject_reason = [
"non-mandatory-script-verify-flag (Operation not valid with the current stack size)",
@@ -197,7 +180,6 @@ class BIP65Test(BitcoinTestFramework):
self.log.info("Test that a version 4 block with a valid-according-to-CLTV transaction is accepted")
spendtx = cltv_validate(self.nodes[0], spendtx, CLTV_HEIGHT - 1)
- spendtx.rehash()
block.vtx.pop(1)
block.vtx.append(spendtx)
diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py
index a0bcd9f12a..de9d0d2e80 100755
--- a/test/functional/feature_config_args.py
+++ b/test/functional/feature_config_args.py
@@ -165,6 +165,7 @@ class ConfArgsTest(BitcoinTestFramework):
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"0 addresses found from DNS seeds",
+ "opencon thread start", # Ensure ThreadOpenConnections::start time is properly set
]):
self.start_node(0, extra_args=['-dnsseed=1', '-fixedseeds=1', f'-mocktime={start}'])
with self.nodes[0].assert_debug_log(expected_msgs=[
@@ -206,6 +207,7 @@ class ConfArgsTest(BitcoinTestFramework):
with self.nodes[0].assert_debug_log(expected_msgs=[
"Loaded 0 addresses from peers.dat",
"DNS seeding disabled",
+ "opencon thread start", # Ensure ThreadOpenConnections::start time is properly set
]):
self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=1', '-addnode=fakenodeaddr', f'-mocktime={start}'])
with self.nodes[0].assert_debug_log(expected_msgs=[
diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py
index 2cf0ef2251..b5ce18a48b 100755
--- a/test/functional/interface_bitcoin_cli.py
+++ b/test/functional/interface_bitcoin_cli.py
@@ -151,7 +151,7 @@ class TestBitcoinCli(BitcoinTestFramework):
assert_equal(cli_get_info['balance'], amounts[1])
self.log.info("Test -getinfo with -rpcwallet=unloaded wallet returns no balances")
- cli_get_info = self.nodes[0].cli('-getinfo', rpcwallet3).send_cli()
+ cli_get_info_keys = self.nodes[0].cli('-getinfo', rpcwallet3).send_cli().keys()
assert 'balance' not in cli_get_info_keys
assert 'balances' not in cli_get_info_keys
diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py
index 445cea6186..ab2556cd72 100755
--- a/test/functional/p2p_blocksonly.py
+++ b/test/functional/p2p_blocksonly.py
@@ -6,22 +6,25 @@
import time
-from test_framework.blocktools import create_transaction
from test_framework.messages import msg_tx
from test_framework.p2p import P2PInterface, P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
+from test_framework.wallet import MiniWallet
class P2PBlocksOnly(BitcoinTestFramework):
def set_test_params(self):
+ self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-blocksonly"]]
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
-
def run_test(self):
+ self.miniwallet = MiniWallet(self.nodes[0])
+ # Add enough mature utxos to the wallet, so that all txs spend confirmed coins
+ self.miniwallet.generate(2)
+ self.nodes[0].generate(100)
+
self.blocksonly_mode_tests()
self.blocks_relay_conn_tests()
@@ -30,14 +33,14 @@ class P2PBlocksOnly(BitcoinTestFramework):
assert_equal(self.nodes[0].getnetworkinfo()['localrelay'], False)
self.nodes[0].add_p2p_connection(P2PInterface())
- tx, txid, tx_hex = self.check_p2p_tx_violation()
+ tx, txid, wtxid, tx_hex = self.check_p2p_tx_violation()
self.log.info('Check that txs from rpc are not rejected and relayed to other peers')
tx_relay_peer = self.nodes[0].add_p2p_connection(P2PInterface())
assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True)
assert_equal(self.nodes[0].testmempoolaccept([tx_hex])[0]['allowed'], True)
- with self.nodes[0].assert_debug_log(['received getdata for: wtx {} peer=1'.format(txid)]):
+ with self.nodes[0].assert_debug_log(['received getdata for: wtx {} peer=1'.format(wtxid)]):
self.nodes[0].sendrawtransaction(tx_hex)
tx_relay_peer.wait_for_tx(txid)
assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
@@ -79,7 +82,7 @@ class P2PBlocksOnly(BitcoinTestFramework):
# Ensure we disconnect if a block-relay-only connection sends us a transaction
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="block-relay-only")
assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], False)
- _, txid, tx_hex = self.check_p2p_tx_violation(index=2)
+ _, txid, _, tx_hex = self.check_p2p_tx_violation(index=2)
self.log.info("Check that txs from RPC are not sent to blockrelay connection")
conn = self.nodes[0].add_outbound_p2p_connection(P2PTxInvStore(), p2p_idx=1, connection_type="block-relay-only")
@@ -95,19 +98,18 @@ class P2PBlocksOnly(BitcoinTestFramework):
def check_p2p_tx_violation(self, index=1):
self.log.info('Check that txs from P2P are rejected and result in disconnect')
input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(index), 2)['tx'][0]['txid']
- tx = create_transaction(self.nodes[0], input_txid, self.nodes[0].getnewaddress(), amount=(50 - 0.001))
- txid = tx.rehash()
- tx_hex = tx.serialize().hex()
+ utxo_to_spend = self.miniwallet.get_utxo(txid=input_txid)
+ spendtx = self.miniwallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_to_spend)
with self.nodes[0].assert_debug_log(['transaction sent in violation of protocol peer=0']):
- self.nodes[0].p2ps[0].send_message(msg_tx(tx))
+ self.nodes[0].p2ps[0].send_message(msg_tx(spendtx['tx']))
self.nodes[0].p2ps[0].wait_for_disconnect()
assert_equal(self.nodes[0].getmempoolinfo()['size'], 0)
# Remove the disconnected peer
del self.nodes[0].p2ps[0]
- return tx, txid, tx_hex
+ return spendtx['tx'], spendtx['txid'], spendtx['wtxid'], spendtx['hex']
if __name__ == '__main__':
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index c0b3c2cb12..788a81d4af 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -37,7 +37,7 @@ VALID_DATA_LIMIT = MAX_PROTOCOL_MESSAGE_LENGTH - 5 # Account for the 5-byte len
class msg_unrecognized:
"""Nonsensical message. Modeled after similar types in test_framework.messages."""
- msgtype = b'badmsg'
+ msgtype = b'badmsg\x01'
def __init__(self, *, str_data):
self.str_data = str_data.encode() if not isinstance(str_data, bytes) else str_data
@@ -104,7 +104,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_magic_bytes(self):
self.log.info("Test message with invalid magic bytes disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
- with self.nodes[0].assert_debug_log(['HEADER ERROR - MESSAGESTART (badmsg, 2 bytes), received ffffffff']):
+ with self.nodes[0].assert_debug_log(['Header error: Wrong MessageStart ffffffff received']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
# modify magic bytes
msg = b'\xff' * 4 + msg[4:]
@@ -115,7 +115,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_checksum(self):
self.log.info("Test message with invalid checksum logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
- with self.nodes[0].assert_debug_log(['CHECKSUM ERROR (badmsg, 2 bytes), expected 78df0a04 was ffffffff']):
+ with self.nodes[0].assert_debug_log(['Header error: Wrong checksum (badmsg, 2 bytes), expected 78df0a04 was ffffffff']):
msg = conn.build_message(msg_unrecognized(str_data="d"))
# Checksum is after start bytes (4B), message type (12B), len (4B)
cut_len = 4 + 12 + 4
@@ -130,7 +130,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_size(self):
self.log.info("Test message with oversized payload disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
- with self.nodes[0].assert_debug_log(['HEADER ERROR - SIZE (badmsg, 4000001 bytes)']):
+ with self.nodes[0].assert_debug_log(['Header error: Size too large (badmsg, 4000001 bytes)']):
msg = msg_unrecognized(str_data="d" * (VALID_DATA_LIMIT + 1))
msg = conn.build_message(msg)
conn.send_raw_message(msg)
@@ -140,7 +140,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
def test_msgtype(self):
self.log.info("Test message with invalid message type logs an error")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
- with self.nodes[0].assert_debug_log(['HEADER ERROR - COMMAND']):
+ with self.nodes[0].assert_debug_log(['Header error: Invalid message type']):
msg = msg_unrecognized(str_data="d")
msg = conn.build_message(msg)
# Modify msgtype
diff --git a/test/functional/rpc_addresses_deprecation.py b/test/functional/rpc_addresses_deprecation.py
index bc0559f3b5..bc0559f3b5 100644..100755
--- a/test/functional/rpc_addresses_deprecation.py
+++ b/test/functional/rpc_addresses_deprecation.py
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index ac53a280b4..00324347ed 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -275,7 +275,7 @@ class BlockchainTest(BitcoinTestFramework):
assert 'muhash' in res6
assert(res['hash_serialized_2'] != res6['muhash'])
- # muhash should not be included in gettxoutset unless requested.
+ # muhash should not be returned unless requested.
for r in [res, res2, res3, res4, res5]:
assert 'muhash' not in r
diff --git a/test/functional/rpc_dumptxoutset.py b/test/functional/rpc_dumptxoutset.py
index e65787ce08..dc469ba552 100755
--- a/test/functional/rpc_dumptxoutset.py
+++ b/test/functional/rpc_dumptxoutset.py
@@ -41,7 +41,7 @@ class DumptxoutsetTest(BitcoinTestFramework):
digest = hashlib.sha256(f.read()).hexdigest()
# UTXO snapshot hash should be deterministic based on mocked time.
assert_equal(
- digest, 'be032e5f248264ba08e11099ac09dbd001f6f87ffc68bf0f87043d8146d50664')
+ digest, '7ae82c986fa5445678d2a21453bb1c86d39e47af13da137640c2b1cf8093691c')
# Specifying a path to an existing file will fail.
assert_raises_rpc_error(
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index 5129ecb895..2e1b1e878f 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -728,10 +728,10 @@ class RawTransactionsTest(BitcoinTestFramework):
result6 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 0})
result_fee_rate = result['fee'] * 1000 / count_bytes(result['hex'])
- assert_fee_amount(result1['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
+ assert_fee_amount(result1['fee'], count_bytes(result1['hex']), 2 * result_fee_rate)
assert_fee_amount(result2['fee'], count_bytes(result2['hex']), 2 * result_fee_rate)
assert_fee_amount(result3['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
- assert_fee_amount(result4['fee'], count_bytes(result3['hex']), 10 * result_fee_rate)
+ assert_fee_amount(result4['fee'], count_bytes(result4['hex']), 10 * result_fee_rate)
assert_fee_amount(result5['fee'], count_bytes(result5['hex']), 0)
assert_fee_amount(result6['fee'], count_bytes(result6['hex']), 0)
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index 59ef18635b..395b50c4d8 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -17,6 +17,7 @@ from test_framework.messages import (
from test_framework.script import (
CScript,
OP_TRUE,
+ OP_NOP,
)
from test_framework.util import (
assert_equal,
@@ -26,11 +27,15 @@ from test_framework.util import (
class MiniWallet:
- def __init__(self, test_node):
+ def __init__(self, test_node, *, raw_script=False):
self._test_node = test_node
self._utxos = []
- self._address = ADDRESS_BCRT1_P2WSH_OP_TRUE
- self._scriptPubKey = hex_str_to_bytes(self._test_node.validateaddress(self._address)['scriptPubKey'])
+ if raw_script:
+ self._address = None
+ self._scriptPubKey = bytes(CScript([OP_TRUE]))
+ else:
+ self._address = ADDRESS_BCRT1_P2WSH_OP_TRUE
+ self._scriptPubKey = hex_str_to_bytes(self._test_node.validateaddress(self._address)['scriptPubKey'])
def scan_blocks(self, *, start=1, num):
"""Scan the blocks for self._address outputs and add them to self._utxos"""
@@ -47,7 +52,7 @@ class MiniWallet:
def generate(self, num_blocks):
"""Generate blocks with coinbase outputs to the internal address, and append the outputs to the internal list"""
- blocks = self._test_node.generatetoaddress(num_blocks, self._address)
+ blocks = self._test_node.generatetodescriptor(num_blocks, f'raw({self._scriptPubKey.hex()})')
for b in blocks:
cb_tx = self._test_node.getblock(blockhash=b, verbosity=2)['tx'][0]
self._utxos.append({'txid': cb_tx['txid'], 'vout': 0, 'value': cb_tx['vout'][0]['value']})
@@ -89,8 +94,12 @@ class MiniWallet:
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']))]
tx.vout = [CTxOut(int(send_value * COIN), self._scriptPubKey)]
- tx.wit.vtxinwit = [CTxInWitness()]
- tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
+ if not self._address:
+ # raw script
+ tx.vin[0].scriptSig = CScript([OP_NOP] * 35) # pad to identical size
+ else:
+ tx.wit.vtxinwit = [CTxInWitness()]
+ tx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])]
tx_hex = tx.serialize().hex()
tx_info = from_node.testmempoolaccept([tx_hex])[0]
@@ -98,7 +107,7 @@ class MiniWallet:
if mempool_valid:
assert_equal(tx_info['vsize'], vsize)
assert_equal(tx_info['fees']['base'], fee)
- return {'txid': tx_info['txid'], 'wtxid': tx_info['wtxid'], 'hex': tx_hex}
+ return {'txid': tx_info['txid'], 'wtxid': tx_info['wtxid'], 'hex': tx_hex, 'tx': tx}
def sendrawtransaction(self, *, from_node, tx_hex):
from_node.sendrawtransaction(tx_hex)
diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py
index ed62ef0e9d..0a3dd56620 100755
--- a/test/functional/wallet_importdescriptors.py
+++ b/test/functional/wallet_importdescriptors.py
@@ -459,6 +459,77 @@ class ImportDescriptorsTest(BitcoinTestFramework):
assert_equal(tx_signed_2['complete'], True)
self.nodes[1].sendrawtransaction(tx_signed_2['hex'])
+ self.log.info("We can create and use a huge multisig under P2WSH")
+ self.nodes[1].createwallet(wallet_name='wmulti_priv_big', blank=True, descriptors=True)
+ wmulti_priv_big = self.nodes[1].get_wallet_rpc('wmulti_priv_big')
+ xkey = "tprv8ZgxMBicQKsPeZSeYx7VXDDTs3XrTcmZQpRLbAeSQFCQGgKwR4gKpcxHaKdoTNHniv4EPDJNdzA3KxRrrBHcAgth8fU5X4oCndkkxk39iAt/*"
+ xkey_int = "tprv8ZgxMBicQKsPeZSeYx7VXDDTs3XrTcmZQpRLbAeSQFCQGgKwR4gKpcxHaKdoTNHniv4EPDJNdzA3KxRrrBHcAgth8fU5X4oCndkkxk39iAt/1/*"
+ res = wmulti_priv_big.importdescriptors([
+ {
+ "desc": descsum_create(f"wsh(sortedmulti(20,{(xkey + ',') * 19}{xkey}))"),
+ "active": True,
+ "range": 1000,
+ "next_index": 0,
+ "timestamp": "now"
+ },
+ {
+ "desc": descsum_create(f"wsh(sortedmulti(20,{(xkey_int + ',') * 19}{xkey_int}))"),
+ "active": True,
+ "internal": True,
+ "range": 1000,
+ "next_index": 0,
+ "timestamp": "now"
+ }])
+ assert_equal(res[0]['success'], True)
+ assert_equal(res[1]['success'], True)
+
+ addr = wmulti_priv_big.getnewaddress()
+ w0.sendtoaddress(addr, 10)
+ self.nodes[0].generate(1)
+ self.sync_all()
+ # It is standard and would relay.
+ txid = wmulti_priv_big.sendtoaddress(w0.getnewaddress(), 9.999)
+ decoded = wmulti_priv_big.decoderawtransaction(wmulti_priv_big.gettransaction(txid)['hex'])
+ # 20 sigs + dummy + witness script
+ assert_equal(len(decoded['vin'][0]['txinwitness']), 22)
+
+
+ self.log.info("Under P2SH, multisig are standard with up to 15 "
+ "compressed keys")
+ self.nodes[1].createwallet(wallet_name='multi_priv_big_legacy',
+ blank=True, descriptors=True)
+ multi_priv_big = self.nodes[1].get_wallet_rpc('multi_priv_big_legacy')
+ res = multi_priv_big.importdescriptors([
+ {
+ "desc": descsum_create(f"sh(multi(15,{(xkey + ',') * 14}{xkey}))"),
+ "active": True,
+ "range": 1000,
+ "next_index": 0,
+ "timestamp": "now"
+ },
+ {
+ "desc": descsum_create(f"sh(multi(15,{(xkey_int + ',') * 14}{xkey_int}))"),
+ "active": True,
+ "internal": True,
+ "range": 1000,
+ "next_index": 0,
+ "timestamp": "now"
+ }])
+ assert_equal(res[0]['success'], True)
+ assert_equal(res[1]['success'], True)
+
+ addr = multi_priv_big.getnewaddress("", "legacy")
+ w0.sendtoaddress(addr, 10)
+ self.nodes[0].generate(6)
+ self.sync_all()
+ # It is standard and would relay.
+ txid = multi_priv_big.sendtoaddress(w0.getnewaddress(), 10, "", "",
+ True)
+ decoded = multi_priv_big.decoderawtransaction(
+ multi_priv_big.gettransaction(txid)['hex']
+ )
+
+
self.log.info("Combo descriptors cannot be active")
self.test_importdesc({"desc": descsum_create("combo(tpubDCJtdt5dgJpdhW4MtaVYDhG4T4tF6jcLR1PxL43q9pq1mxvXgMS9Mzw1HnXG15vxUGQJMMSqCQHMTy3F1eW5VkgVroWzchsPD5BUojrcWs8/*)"),
"active": True,
diff --git a/test/lint/lint-filenames.sh b/test/lint/lint-filenames.sh
deleted file mode 100755
index 3f7491cd2b..0000000000
--- a/test/lint/lint-filenames.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-#
-# Copyright (c) 2018-2019 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#
-# Make sure only lowercase alphanumerics (a-z0-9), underscores (_),
-# hyphens (-) and dots (.) are used in source code filenames.
-
-export LC_ALL=C
-
-EXIT_CODE=0
-OUTPUT=$(git ls-files --full-name -- "*.[cC][pP][pP]" "*.[hH]" "*.[pP][yY]" "*.[sS][hH]" | \
- grep -vE '^[a-z0-9_./-]+$' | \
- grep -vE '^src/(secp256k1/|univalue/|test/fuzz/FuzzedDataProvider.h)')
-
-if [[ ${OUTPUT} != "" ]]; then
- echo "Use only lowercase alphanumerics (a-z0-9), underscores (_), hyphens (-) and dots (.)"
- echo "in source code filenames:"
- echo
- echo "${OUTPUT}"
- EXIT_CODE=1
-fi
-exit ${EXIT_CODE}
diff --git a/test/lint/lint-files.py b/test/lint/lint-files.py
new file mode 100755
index 0000000000..400921e5f3
--- /dev/null
+++ b/test/lint/lint-files.py
@@ -0,0 +1,203 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+"""
+This checks that all files in the repository have correct filenames and permissions
+"""
+
+import os
+import re
+import sys
+from subprocess import check_output
+from typing import Optional, NoReturn
+
+CMD_ALL_FILES = "git ls-files -z --full-name"
+CMD_SOURCE_FILES = 'git ls-files -z --full-name -- "*.[cC][pP][pP]" "*.[hH]" "*.[pP][yY]" "*.[sS][hH]"'
+CMD_SHEBANG_FILES = "git grep --full-name --line-number -I '^#!'"
+ALLOWED_FILENAME_REGEXP = "^[a-zA-Z0-9/_.@][a-zA-Z0-9/_.@-]*$"
+ALLOWED_SOURCE_FILENAME_REGEXP = "^[a-z0-9_./-]+$"
+ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP = (
+ "^src/(secp256k1/|univalue/|test/fuzz/FuzzedDataProvider.h)"
+)
+ALLOWED_PERMISSION_NON_EXECUTABLES = 644
+ALLOWED_PERMISSION_EXECUTABLES = 755
+ALLOWED_EXECUTABLE_SHEBANG = {
+ "py": [b"#!/usr/bin/env python3"],
+ "sh": [b"#!/usr/bin/env bash", b"#!/bin/sh"],
+}
+
+
+class FileMeta(object):
+ def __init__(self, file_path: str):
+ self.file_path = file_path
+
+ @property
+ def extension(self) -> Optional[str]:
+ """
+ Returns the file extension for a given filename string.
+ eg:
+ 'ci/lint_run_all.sh' -> 'sh'
+ 'ci/retry/retry' -> None
+ 'contrib/devtools/split-debug.sh.in' -> 'in'
+ """
+ return str(os.path.splitext(self.file_path)[1].strip(".") or None)
+
+ @property
+ def full_extension(self) -> Optional[str]:
+ """
+ Returns the full file extension for a given filename string.
+ eg:
+ 'ci/lint_run_all.sh' -> 'sh'
+ 'ci/retry/retry' -> None
+ 'contrib/devtools/split-debug.sh.in' -> 'sh.in'
+ """
+ filename_parts = self.file_path.split(os.extsep, 1)
+ try:
+ return filename_parts[1]
+ except IndexError:
+ return None
+
+ @property
+ def permissions(self) -> int:
+ """
+ Returns the octal file permission of the file
+ """
+ return int(oct(os.stat(self.file_path).st_mode)[-3:])
+
+
+def check_all_filenames() -> int:
+ """
+ Checks every file in the repository against an allowed regexp to make sure only lowercase or uppercase
+ alphanumerics (a-zA-Z0-9), underscores (_), hyphens (-), at (@) and dots (.) are used in repository filenames.
+ """
+ filenames = check_output(CMD_ALL_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
+ filename_regex = re.compile(ALLOWED_FILENAME_REGEXP)
+ failed_tests = 0
+ for filename in filenames:
+ if not filename_regex.match(filename):
+ print(
+ f"""File {repr(filename)} does not not match the allowed filename regexp ('{ALLOWED_FILENAME_REGEXP}')."""
+ )
+ failed_tests += 1
+ return failed_tests
+
+
+def check_source_filenames() -> int:
+ """
+ Checks only source files (*.cpp, *.h, *.py, *.sh) against a stricter allowed regexp to make sure only lowercase
+ alphanumerics (a-z0-9), underscores (_), hyphens (-) and dots (.) are used in source code filenames.
+
+ Additionally there is an exception regexp for directories or files which are excepted from matching this regexp.
+ """
+ filenames = check_output(CMD_SOURCE_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
+ filename_regex = re.compile(ALLOWED_SOURCE_FILENAME_REGEXP)
+ filename_exception_regex = re.compile(ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP)
+ failed_tests = 0
+ for filename in filenames:
+ if not filename_regex.match(filename) and not filename_exception_regex.match(filename):
+ print(
+ f"""File {repr(filename)} does not not match the allowed source filename regexp ('{ALLOWED_SOURCE_FILENAME_REGEXP}'), or the exception regexp ({ALLOWED_SOURCE_FILENAME_EXCEPTION_REGEXP})."""
+ )
+ failed_tests += 1
+ return failed_tests
+
+
+def check_all_file_permissions() -> int:
+ """
+ Checks all files in the repository match an allowed executable or non-executable file permission octal.
+
+ Additionally checks that for executable files, the file contains a shebang line
+ """
+ filenames = check_output(CMD_ALL_FILES, shell=True).decode("utf8").rstrip("\0").split("\0")
+ failed_tests = 0
+ for filename in filenames:
+ file_meta = FileMeta(filename)
+ if file_meta.permissions == ALLOWED_PERMISSION_EXECUTABLES:
+ with open(filename, "rb") as f:
+ shebang = f.readline().rstrip(b"\n")
+
+ # For any file with executable permissions the first line must contain a shebang
+ if not shebang.startswith(b"#!"):
+ print(
+ f"""File "{filename}" has permission {ALLOWED_PERMISSION_EXECUTABLES} (executable) and is thus expected to contain a shebang '#!'. Add shebang or do "chmod {ALLOWED_PERMISSION_NON_EXECUTABLES} {filename}" to make it non-executable."""
+ )
+ failed_tests += 1
+
+ # For certain file extensions that have been defined, we also check that the shebang conforms to a specific
+ # allowable set of shebangs
+ if file_meta.extension in ALLOWED_EXECUTABLE_SHEBANG.keys():
+ if shebang not in ALLOWED_EXECUTABLE_SHEBANG[file_meta.extension]:
+ print(
+ f"""File "{filename}" is missing expected shebang """
+ + " or ".join(
+ [
+ x.decode("utf-8")
+ for x in ALLOWED_EXECUTABLE_SHEBANG[file_meta.extension]
+ ]
+ )
+ )
+ failed_tests += 1
+
+ elif file_meta.permissions == ALLOWED_PERMISSION_NON_EXECUTABLES:
+ continue
+ else:
+ print(
+ f"""File "{filename}" has unexpected permission {file_meta.permissions}. Do "chmod {ALLOWED_PERMISSION_NON_EXECUTABLES} {filename}" (if non-executable) or "chmod {ALLOWED_PERMISSION_EXECUTABLES} {filename}" (if executable)."""
+ )
+ failed_tests += 1
+
+ return failed_tests
+
+
+def check_shebang_file_permissions() -> int:
+ """
+ Checks every file that contains a shebang line to ensure it has an executable permission
+ """
+ filenames = check_output(CMD_SHEBANG_FILES, shell=True).decode("utf8").strip().split("\n")
+
+ # The git grep command we use returns files which contain a shebang on any line within the file
+ # so we need to filter the list to only files with the shebang on the first line
+ filenames = [filename.split(":1:")[0] for filename in filenames if ":1:" in filename]
+
+ failed_tests = 0
+ for filename in filenames:
+ file_meta = FileMeta(filename)
+ if file_meta.permissions != ALLOWED_PERMISSION_EXECUTABLES:
+ # These file types are typically expected to be sourced and not executed directly
+ if file_meta.full_extension in ["bash", "init", "openrc", "sh.in"]:
+ continue
+
+ # *.py files which don't contain an `if __name__ == '__main__'` are not expected to be executed directly
+ if file_meta.extension == "py":
+ with open(filename, "r", encoding="utf8") as f:
+ file_data = f.read()
+ if not re.search("""if __name__ == ['"]__main__['"]:""", file_data):
+ continue
+
+ print(
+ f"""File "{filename}" contains a shebang line, but has the file permission {file_meta.permissions} instead of the expected executable permission {ALLOWED_PERMISSION_EXECUTABLES}. Do "chmod {ALLOWED_PERMISSION_EXECUTABLES} {filename}" (or remove the shebang line)."""
+ )
+ failed_tests += 1
+ return failed_tests
+
+
+def main() -> NoReturn:
+ failed_tests = 0
+ failed_tests += check_all_filenames()
+ failed_tests += check_source_filenames()
+ failed_tests += check_all_file_permissions()
+ failed_tests += check_shebang_file_permissions()
+
+ if failed_tests:
+ print(
+ f"ERROR: There were {failed_tests} failed tests in the lint-files.py lint test. Please resolve the above errors."
+ )
+ sys.exit(1)
+ else:
+ sys.exit(0)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/test/lint/lint-files.sh b/test/lint/lint-files.sh
new file mode 100755
index 0000000000..1e115778bd
--- /dev/null
+++ b/test/lint/lint-files.sh
@@ -0,0 +1,7 @@
+#!/usr/bin/env bash
+
+export LC_ALL=C
+
+set -e
+cd "$(dirname $0)/../.."
+test/lint/lint-files.py
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
index e5657f7555..737d35a397 100755
--- a/test/lint/lint-locale-dependence.sh
+++ b/test/lint/lint-locale-dependence.sh
@@ -43,7 +43,7 @@ KNOWN_VIOLATIONS=(
"src/dbwrapper.cpp.*stoul"
"src/dbwrapper.cpp:.*vsnprintf"
"src/httprpc.cpp.*trim"
- "src/init.cpp:.*atoi"
+ "src/node/blockstorage.cpp:.*atoi"
"src/qt/rpcconsole.cpp:.*atoi"
"src/rest.cpp:.*strtol"
"src/test/dbwrapper_tests.cpp:.*snprintf"
diff --git a/test/lint/lint-shebang.sh b/test/lint/lint-shebang.sh
deleted file mode 100755
index 13ebdfd78a..0000000000
--- a/test/lint/lint-shebang.sh
+++ /dev/null
@@ -1,24 +0,0 @@
-#!/usr/bin/env bash
-# Copyright (c) 2018-2020 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-# Assert expected shebang lines
-
-export LC_ALL=C
-EXIT_CODE=0
-for PYTHON_FILE in $(git ls-files -- "*.py"); do
- if [[ $(head -c 2 "${PYTHON_FILE}") == "#!" &&
- $(head -n 1 "${PYTHON_FILE}") != "#!/usr/bin/env python3" ]]; then
- echo "Missing shebang \"#!/usr/bin/env python3\" in ${PYTHON_FILE} (do not use python or python2)"
- EXIT_CODE=1
- fi
-done
-for SHELL_FILE in $(git ls-files -- "*.sh"); do
- if [[ $(head -n 1 "${SHELL_FILE}") != "#!/usr/bin/env bash" &&
- $(head -n 1 "${SHELL_FILE}") != "#!/bin/sh" ]]; then
- echo "Missing expected shebang \"#!/usr/bin/env bash\" or \"#!/bin/sh\" in ${SHELL_FILE}"
- EXIT_CODE=1
- fi
-done
-exit ${EXIT_CODE}
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index 3bcc554acb..b66ca00376 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -5,6 +5,8 @@
# names can be used.
# See https://github.com/google/sanitizers/issues/1364
signed-integer-overflow:txmempool.cpp
+# https://github.com/bitcoin/bitcoin/pull/21798#issuecomment-829180719
+signed-integer-overflow:policy/feerate.cpp
# -fsanitize=integer suppressions
# ===============================
@@ -85,6 +87,7 @@ implicit-signed-integer-truncation:chain.h
implicit-signed-integer-truncation:crypto/
implicit-signed-integer-truncation:cuckoocache.h
implicit-signed-integer-truncation:leveldb/
+implicit-signed-integer-truncation:miner.cpp
implicit-signed-integer-truncation:net.cpp
implicit-signed-integer-truncation:net_processing.cpp
implicit-signed-integer-truncation:streams.h