aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml4
-rw-r--r--Makefile.am15
-rw-r--r--build-aux/m4/bitcoin_find_bdb48.m419
-rw-r--r--build_msvc/README.md66
-rw-r--r--build_msvc/common.qt.init.vcxproj2
-rwxr-xr-xci/lint/04_install.sh3
-rwxr-xr-xci/test/00_setup_env_android.sh2
-rwxr-xr-xci/test/06_script_a.sh2
-rw-r--r--configure.ac42
-rw-r--r--contrib/devtools/pixie.py323
-rwxr-xr-xcontrib/devtools/security-check.py183
-rwxr-xr-xcontrib/devtools/symbol-check.py163
-rwxr-xr-xcontrib/devtools/test-security-check.py12
-rwxr-xr-xcontrib/devtools/test-symbol-check.py15
-rwxr-xr-xcontrib/guix/libexec/build.sh4
-rwxr-xr-xcontrib/signet/getcoins.py94
-rw-r--r--contrib/tracing/README.md7
-rwxr-xr-xcontrib/tracing/connectblock_benchmark.bt22
-rw-r--r--depends/packages/expat.mk2
-rw-r--r--depends/packages/fontconfig.mk2
-rw-r--r--depends/packages/freetype.mk2
-rw-r--r--depends/packages/libXau.mk2
-rw-r--r--depends/packages/qt.mk1
-rw-r--r--doc/descriptors.md41
-rw-r--r--doc/developer-notes.md30
-rw-r--r--doc/psbt.md3
-rw-r--r--doc/release-notes-23093.md11
-rw-r--r--doc/release-notes.md6
-rw-r--r--doc/tracing.md9
-rw-r--r--src/Makefile.am15
-rw-r--r--src/Makefile.qt.include14
-rw-r--r--src/Makefile.test.include3
-rw-r--r--src/addrdb.cpp30
-rw-r--r--src/addrdb.h8
-rw-r--r--src/addrman.cpp535
-rw-r--r--src/addrman.h374
-rw-r--r--src/addrman_impl.h271
-rw-r--r--src/bench/addrman.cpp20
-rw-r--r--src/bitcoin-cli.cpp2
-rw-r--r--src/bitcoin-tx.cpp19
-rw-r--r--src/chainparams.cpp28
-rw-r--r--src/core_read.cpp53
-rw-r--r--src/dbwrapper.cpp26
-rw-r--r--src/flatfile.cpp4
-rw-r--r--src/fs.cpp8
-rw-r--r--src/fs.h132
-rw-r--r--src/hash.cpp5
-rw-r--r--src/i2p.cpp2
-rw-r--r--src/init.cpp26
-rw-r--r--src/init/common.cpp14
-rw-r--r--src/interfaces/chain.h2
-rw-r--r--src/ipc/process.cpp4
-rw-r--r--src/key.cpp5
-rw-r--r--src/logging.cpp1
-rw-r--r--src/logging.h1
-rw-r--r--src/net.cpp15
-rw-r--r--src/net.h4
-rw-r--r--src/net_processing.cpp10
-rw-r--r--src/net_processing.h4
-rw-r--r--src/netaddress.cpp2
-rw-r--r--src/netaddress.h2
-rw-r--r--src/node/blockstorage.cpp19
-rw-r--r--src/node/context.h4
-rw-r--r--src/node/interfaces.cpp2
-rw-r--r--src/policy/fees.cpp4
-rw-r--r--src/policy/policy.cpp7
-rw-r--r--src/pubkey.cpp5
-rw-r--r--src/qt/bitcoin.cpp3
-rw-r--r--src/qt/guiutil.cpp10
-rw-r--r--src/qt/guiutil.h5
-rw-r--r--src/qt/intro.cpp2
-rw-r--r--src/qt/rpcconsole.cpp9
-rw-r--r--src/qt/walletcontroller.cpp24
-rw-r--r--src/qt/walletcontroller.h2
-rw-r--r--src/rest.cpp12
-rw-r--r--src/rpc/blockchain.cpp8
-rw-r--r--src/rpc/request.cpp8
-rw-r--r--src/rpc/server.cpp2
-rw-r--r--src/test/addrman_tests.cpp211
-rw-r--r--src/test/fs_tests.cpp27
-rw-r--r--src/test/fuzz/addrman.cpp194
-rw-r--r--src/test/fuzz/banman.cpp4
-rw-r--r--src/test/fuzz/connman.cpp2
-rw-r--r--src/test/fuzz/deserialize.cpp5
-rw-r--r--src/test/script_parse_tests.cpp55
-rw-r--r--src/test/settings_tests.cpp6
-rw-r--r--src/test/util/chainstate.h2
-rw-r--r--src/test/util/setup_common.cpp6
-rw-r--r--src/test/util_tests.cpp26
-rw-r--r--src/test/validation_chainstate_tests.cpp23
-rw-r--r--src/torcontrol.cpp8
-rw-r--r--src/util/asmap.cpp4
-rw-r--r--src/util/settings.cpp12
-rw-r--r--src/util/strencodings.h4
-rw-r--r--src/util/syscall_sandbox.cpp116
-rw-r--r--src/util/system.cpp28
-rw-r--r--src/validation.cpp34
-rw-r--r--src/wallet/bdb.cpp38
-rw-r--r--src/wallet/bdb.h4
-rw-r--r--src/wallet/coincontrol.h2
-rw-r--r--src/wallet/db.cpp15
-rw-r--r--src/wallet/dump.cpp10
-rw-r--r--src/wallet/interfaces.cpp4
-rw-r--r--src/wallet/load.cpp16
-rw-r--r--src/wallet/rpcdump.cpp8
-rw-r--r--src/wallet/rpcwallet.cpp153
-rw-r--r--src/wallet/scriptpubkeyman.cpp2
-rw-r--r--src/wallet/spend.cpp4
-rw-r--r--src/wallet/sqlite.cpp4
-rw-r--r--src/wallet/test/db_tests.cpp8
-rw-r--r--src/wallet/test/init_test_fixture.cpp8
-rw-r--r--src/wallet/test/init_tests.cpp8
-rw-r--r--src/wallet/test/wallet_tests.cpp2
-rw-r--r--src/wallet/wallet.cpp6
-rw-r--r--src/wallet/walletdb.cpp16
-rw-r--r--src/wallet/wallettool.cpp2
-rw-r--r--src/wallet/walletutil.cpp2
-rw-r--r--test/README.md15
-rwxr-xr-xtest/functional/combine_logs.py2
-rw-r--r--test/functional/data/__init__.py0
-rwxr-xr-xtest/functional/feature_addrman.py4
-rwxr-xr-xtest/functional/feature_bip68_sequence.py3
-rwxr-xr-xtest/functional/feature_fee_estimation.py149
-rwxr-xr-xtest/functional/feature_segwit.py6
-rwxr-xr-xtest/functional/feature_taproot.py3
-rwxr-xr-xtest/functional/interface_rest.py7
-rwxr-xr-xtest/functional/interface_zmq.py2
-rwxr-xr-xtest/functional/mempool_package_limits.py2
-rwxr-xr-xtest/functional/mempool_packages.py9
-rwxr-xr-xtest/functional/mining_prioritisetransaction.py2
-rwxr-xr-xtest/functional/p2p_compactblocks_blocksonly.py2
-rwxr-xr-xtest/functional/p2p_segwit.py5
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py3
-rwxr-xr-xtest/functional/rpc_invalid_address_message.py12
-rwxr-xr-xtest/functional/rpc_misc.py2
-rwxr-xr-xtest/functional/rpc_psbt.py3
-rwxr-xr-xtest/functional/rpc_signrawtransaction.py4
-rw-r--r--test/functional/test_framework/blocktools.py4
-rwxr-xr-xtest/functional/test_framework/script_util.py13
-rwxr-xr-xtest/functional/test_framework/test_framework.py8
-rwxr-xr-xtest/functional/test_framework/test_node.py16
-rw-r--r--test/functional/test_framework/wallet.py23
-rwxr-xr-xtest/functional/test_runner.py3
-rwxr-xr-xtest/functional/wallet_abandonconflict.py37
-rwxr-xr-xtest/functional/wallet_descriptor.py2
-rwxr-xr-xtest/functional/wallet_importdescriptors.py4
-rwxr-xr-xtest/functional/wallet_keypool.py14
-rwxr-xr-xtest/functional/wallet_multisig_descriptor_psbt.py163
-rwxr-xr-xtest/functional/wallet_send.py3
-rwxr-xr-xtest/functional/wallet_transactiontime_rescan.py8
-rwxr-xr-xtest/functional/wallet_upgradewallet.py20
-rwxr-xr-xtest/lint/lint-locale-dependence.sh5
-rwxr-xr-xtest/lint/lint-python.sh2
-rw-r--r--test/util/data/bitcoin-util-test.json54
154 files changed, 2564 insertions, 1981 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 48747ac138..05b264fb73 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -79,7 +79,7 @@ task:
<< : *FILTER_TEMPLATE
windows_container:
cpu: 4
- memory: 16G
+ memory: 8G
image: cirrusci/windowsservercore:visualstudio2019
timeout_in: 120m
env:
@@ -90,7 +90,7 @@ task:
QT_DOWNLOAD_URL: 'https://download.qt.io/official_releases/qt/5.12/5.12.11/single/qt-everywhere-src-5.12.11.zip'
QT_LOCAL_PATH: 'C:\qt-everywhere-src-5.12.11.zip'
QT_SOURCE_DIR: 'C:\qt-everywhere-src-5.12.11'
- QTBASEDIR: 'C:\Qt5.12.11_x64_static_vs2019_160900'
+ QTBASEDIR: 'C:\Qt_static'
x64_NATIVE_TOOLS: '"C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvars64.bat"'
IgnoreWarnIntDirInTempDetected: 'true'
merge_script:
diff --git a/Makefile.am b/Makefile.am
index ce66331910..af63cf0cbb 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -58,8 +58,7 @@ DIST_SHARE = \
BIN_CHECKS=$(top_srcdir)/contrib/devtools/symbol-check.py \
$(top_srcdir)/contrib/devtools/security-check.py \
- $(top_srcdir)/contrib/devtools/utils.py \
- $(top_srcdir)/contrib/devtools/pixie.py
+ $(top_srcdir)/contrib/devtools/utils.py
WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/bitcoin.ico \
$(top_srcdir)/share/pixmaps/nsis-header.bmp \
@@ -367,14 +366,14 @@ clean-local: clean-docs
test-security-check:
if TARGET_DARWIN
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO
endif
if TARGET_WINDOWS
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE
endif
if TARGET_LINUX
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF
- $(AM_V_at) CC='$(CC)' CPPFILT='$(CPPFILT)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF
endif
diff --git a/build-aux/m4/bitcoin_find_bdb48.m4 b/build-aux/m4/bitcoin_find_bdb48.m4
index 5fc5b493d3..3d6c8210ed 100644
--- a/build-aux/m4/bitcoin_find_bdb48.m4
+++ b/build-aux/m4/bitcoin_find_bdb48.m4
@@ -48,15 +48,22 @@ AC_DEFUN([BITCOIN_FIND_BDB48],[
if test "x$bdbpath" = "xX"; then
use_bdb=no
AC_MSG_RESULT([no])
- AC_MSG_ERROR([libdb_cxx headers missing, ]AC_PACKAGE_NAME[ requires this library for BDB wallet support (--without-bdb to disable BDB wallet support)])
+ AC_MSG_WARN([libdb_cxx headers missing])
+ AC_MSG_WARN(AC_PACKAGE_NAME[ requires this library for BDB (legacy) wallet support])
+ AC_MSG_WARN([Passing --without-bdb will suppress this warning])
elif test "x$bdb48path" = "xX"; then
BITCOIN_SUBDIR_TO_INCLUDE(BDB_CPPFLAGS,[${bdbpath}],db_cxx)
AC_ARG_WITH([incompatible-bdb],[AS_HELP_STRING([--with-incompatible-bdb], [allow using a bdb version other than 4.8])],[
- AC_MSG_WARN([Found Berkeley DB other than 4.8; BDB wallets opened by this build will not be portable!])
+ AC_MSG_WARN([Found Berkeley DB other than 4.8])
+ AC_MSG_WARN([BDB (legacy) wallets opened by this build will not be portable!])
+ use_bdb=yes
],[
- AC_MSG_ERROR([Found Berkeley DB other than 4.8, required for portable BDB wallets (--with-incompatible-bdb to ignore or --without-bdb to disable BDB wallet support)])
+ AC_MSG_WARN([Found Berkeley DB other than 4.8])
+ AC_MSG_WARN([BDB (legacy) wallets opened by this build would not be portable!])
+ AC_MSG_WARN([If this is intended, pass --with-incompatible-bdb])
+ AC_MSG_WARN([Passing --without-bdb will suppress this warning])
+ use_bdb=no
])
- use_bdb=yes
else
BITCOIN_SUBDIR_TO_INCLUDE(BDB_CPPFLAGS,[${bdb48path}],db_cxx)
bdbpath="${bdb48path}"
@@ -78,7 +85,9 @@ AC_DEFUN([BITCOIN_FIND_BDB48],[
])
done
if test "x$BDB_LIBS" = "x"; then
- AC_MSG_ERROR([libdb_cxx missing, ]AC_PACKAGE_NAME[ requires this library for BDB wallet support (--without-bdb to disable BDB wallet support)])
+ AC_MSG_WARN([libdb_cxx headers missing])
+ AC_MSG_WARN(AC_PACKAGE_NAME[ requires this library for BDB (legacy) wallet support])
+ AC_MSG_WARN([Passing --without-bdb will suppress this warning])
fi
fi
if test "x$use_bdb" != "xno"; then
diff --git a/build_msvc/README.md b/build_msvc/README.md
index c3705f6b03..36fb942c8e 100644
--- a/build_msvc/README.md
+++ b/build_msvc/README.md
@@ -3,67 +3,65 @@ Building Bitcoin Core with Visual Studio
Introduction
---------------------
-Solution and project files to build the Bitcoin Core applications `msbuild` or Visual Studio can be found in the `build_msvc` directory. The build has been tested with Visual Studio 2019 (building with earlier versions of Visual Studio should not be expected to work).
+Solution and project files to build Bitcoin Core with `msbuild` or Visual Studio can be found in the `build_msvc` directory. The build has been tested with Visual Studio 2019 (building with earlier versions of Visual Studio should not be expected to work).
-Building with Visual Studio is an alternative to the Linux based [cross-compiler build](https://github.com/bitcoin/bitcoin/blob/master/doc/build-windows.md).
+To build Bitcoin Core from the command-line, it is sufficient to only install the Visual Studio Build Tools component.
-Quick Start
----------------------
-The minimal steps required to build Bitcoin Core with the msbuild toolchain are below. More detailed instructions are contained in the following sections.
+Building with Visual Studio is an alternative to the Linux based [cross-compiler build](../doc/build-windows.md).
-```
-cd build_msvc
-py -3 msvc-autogen.py
-msbuild /m bitcoin.sln /p:Platform=x64 /p:Configuration=Release /t:build
-```
-Dependencies
+Prerequisites
---------------------
-A number of [open source libraries](https://github.com/bitcoin/bitcoin/blob/master/doc/dependencies.md) are required in order to be able to build Bitcoin Core.
+To build [dependencies](../doc/dependencies.md) (except for [Qt](#qt)),
+the default approach is to use the [vcpkg](https://docs.microsoft.com/en-us/cpp/vcpkg) package manager from Microsoft:
-Options for installing the dependencies in a Visual Studio compatible manner are:
+1. [Install](https://vcpkg.io/en/getting-started.html) vcpkg.
-- Use Microsoft's [vcpkg](https://docs.microsoft.com/en-us/cpp/vcpkg) to download the source packages and build locally. This is the recommended approach.
-- Download the source code, build each dependency, add the required include paths, link libraries and binary tools to the Visual Studio project files.
-- Use [nuget](https://www.nuget.org/) packages with the understanding that any binary files have been compiled by an untrusted third party.
+2. By default, vcpkg makes both `release` and `debug` builds for each package.
+To save build time and disk space, one could skip `debug` builds (example uses PowerShell):
+```powershell
-The [external dependencies](https://github.com/bitcoin/bitcoin/blob/master/doc/dependencies.md) required for building are listed in the `build_msvc/vcpkg.json` file. To ensure `msbuild` project files automatically install the `vcpkg` dependencies use:
-
-```
-vcpkg integrate install
+Add-Content -Path "vcpkg\triplets\x64-windows-static.cmake" -Value "set(VCPKG_BUILD_TYPE release)"
```
Qt
---------------------
-In order to build Bitcoin Core a static build of Qt is required. The runtime library version (e.g. v142) and platform type (x86 or x64) must also match.
+To build Bitcoin Core with the GUI, a static build of Qt is required.
+
+1. Download a single ZIP archive of Qt source code from https://download.qt.io/official_releases/qt/ (e.g., [`qt-everywhere-src-5.12.11.zip`](https://download.qt.io/official_releases/qt/5.12/5.12.11/single/qt-everywhere-src-5.12.11.zip)), and expand it into a dedicated folder. The following instructions assume that this folder is `C:\dev\qt-source`.
+
+2. Open "x64 Native Tools Command Prompt for VS 2019", and input the following commands:
+```cmd
+cd C:\dev\qt-source
+mkdir build
+cd build
+..\configure -release -silent -opensource -confirm-license -opengl desktop -no-shared -static -static-runtime -mp -qt-zlib -qt-pcre -qt-libpng -no-libjpeg -nomake examples -nomake tests -nomake tools -no-dbus -no-libudev -no-icu -no-gtk -no-opengles3 -no-angle -no-sql-sqlite -no-sql-odbc -no-sqlite -no-libudev -no-vulkan -skip qt3d -skip qtactiveqt -skip qtandroidextras -skip qtcanvas3d -skip qtcharts -skip qtconnectivity -skip qtdatavis3d -skip qtdeclarative -skip qtdoc -skip qtgamepad -skip qtgraphicaleffects -skip qtimageformats -skip qtlocation -skip qtmacextras -skip qtmultimedia -skip qtnetworkauth -skip qtpurchasing -skip qtquickcontrols -skip qtquickcontrols2 -skip qtscript -skip qtscxml -skip qtsensors -skip qtserialbus -skip qtserialport -skip qtspeech -skip qtvirtualkeyboard -skip qtwayland -skip qtwebchannel -skip qtwebengine -skip qtwebsockets -skip qtwebview -skip qtx11extras -skip qtxmlpatterns -no-openssl -no-feature-sql -no-feature-sqlmodel -prefix C:\Qt_static
+nmake
+nmake install
+```
-Some prebuilt x64 versions of Qt can be downloaded from [here](https://github.com/sipsorcery/qt_win_binary/releases). Please be aware these downloads are NOT officially sanctioned by Bitcoin Core and are provided for developer convenience only. They should NOT be used for builds that will be used in a production environment or with real funds.
+One could speed up building with [`jom`](https://wiki.qt.io/Jom), a replacement for `nmake` which makes use of all CPU cores.
-To determine which Qt prebuilt version to download open the `.cirrus.yml` file and note the `QT_DOWNLOAD_URL`. When extracting the zip file the destination path must be set to `C:\`. This is due to the way that Qt includes, libraries and tools use internal paths.
+To build Bitcoin Core without Qt, unload or disable the `bitcoin-qt`, `libbitcoin_qt` and `test_bitcoin-qt` projects.
-To build Bitcoin Core without Qt unload or disable the `bitcoin-qt`, `libbitcoin_qt` and `test_bitcoin-qt` projects.
Building
---------------------
-The instructions below use `vcpkg` to install the dependencies.
-
-- Install [`vcpkg`](https://github.com/Microsoft/vcpkg).
-
-- Use Python to generate `*.vcxproj` from Makefile
+1. Use Python to generate `*.vcxproj` from Makefile:
```
PS >py -3 msvc-autogen.py
```
-- An optional step is to adjust the settings in the `build_msvc` directory and the `common.init.vcxproj` file. This project file contains settings that are common to all projects such as the runtime library version and target Windows SDK version. The Qt directories can also be set.
+2. An optional step is to adjust the settings in the `build_msvc` directory and the `common.init.vcxproj` file. This project file contains settings that are common to all projects such as the runtime library version and target Windows SDK version. The Qt directories can also be set. To specify a non-default path to a static Qt package directory, use the `QTBASEDIR` environment variable.
-- To build from the command line with the Visual Studio 2019 toolchain use:
+3. To build from the command-line with the Visual Studio 2019 toolchain use:
-```
-msbuild /m bitcoin.sln /p:Platform=x64 /p:Configuration=Release /t:build
+```cmd
+msbuild -property:Configuration=Release -maxCpuCount -verbosity:minimal bitcoin.sln
```
-- Alternatively, open the `build_msvc/bitcoin.sln` file in Visual Studio 2019.
+Alternatively, open the `build_msvc/bitcoin.sln` file in Visual Studio 2019.
Security
---------------------
diff --git a/build_msvc/common.qt.init.vcxproj b/build_msvc/common.qt.init.vcxproj
index df2fd2fb49..cc8063e545 100644
--- a/build_msvc/common.qt.init.vcxproj
+++ b/build_msvc/common.qt.init.vcxproj
@@ -2,7 +2,7 @@
<Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
<PropertyGroup Label="QtGlobals">
- <QtBaseDir>C:\Qt5.12.11_x64_static_vs2019_160900</QtBaseDir>
+ <QtBaseDir Condition="'$(QTBASEDIR)' == ''">C:\Qt_static</QtBaseDir>
<QtPluginsLibraryDir>$(QtBaseDir)\plugins</QtPluginsLibraryDir>
<QtLibraryDir>$(QtBaseDir)\lib</QtLibraryDir>
<QtIncludeDir>$(QtBaseDir)\include</QtIncludeDir>
diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh
index 5587618f2d..991234a436 100755
--- a/ci/lint/04_install.sh
+++ b/ci/lint/04_install.sh
@@ -13,7 +13,8 @@ update-alternatives --install /usr/bin/clang-format-diff clang-format-diff $(whi
${CI_RETRY_EXE} pip3 install codespell==2.0.0
${CI_RETRY_EXE} pip3 install flake8==3.8.3
-${CI_RETRY_EXE} pip3 install mypy==0.781
+${CI_RETRY_EXE} pip3 install mypy==0.910
+${CI_RETRY_EXE} pip3 install pyzmq==22.3.0
${CI_RETRY_EXE} pip3 install vulture==2.3
SHELLCHECK_VERSION=v0.7.2
diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh
index 4ef3ae1ceb..2f9d1f2a9f 100755
--- a/ci/test/00_setup_env_android.sh
+++ b/ci/test/00_setup_env_android.sh
@@ -22,4 +22,4 @@ export ANDROID_HOME="${DEPENDS_DIR}/SDKs/android"
export ANDROID_NDK_HOME="${ANDROID_HOME}/ndk/${ANDROID_NDK_VERSION}"
export DEP_OPTS="ANDROID_SDK=${ANDROID_HOME} ANDROID_NDK=${ANDROID_NDK_HOME} ANDROID_API_LEVEL=${ANDROID_API_LEVEL} ANDROID_TOOLCHAIN_BIN=${ANDROID_NDK_HOME}/toolchains/llvm/prebuilt/linux-x86_64/bin/"
-export BITCOIN_CONFIG="--disable-ccache"
+export BITCOIN_CONFIG="--disable-ccache --disable-tests --enable-gui-tests --disable-bench --disable-fuzz-binary --without-utils --without-libs --without-daemon"
diff --git a/ci/test/06_script_a.sh b/ci/test/06_script_a.sh
index a42cd6cee1..b1d83883d1 100755
--- a/ci/test/06_script_a.sh
+++ b/ci/test/06_script_a.sh
@@ -10,7 +10,7 @@ if [ -n "$ANDROID_TOOLS_URL" ]; then
DOCKER_EXEC make distclean || true
DOCKER_EXEC ./autogen.sh
DOCKER_EXEC ./configure $BITCOIN_CONFIG --prefix=$DEPENDS_DIR/aarch64-linux-android || ( (DOCKER_EXEC cat config.log) && false)
- DOCKER_EXEC "cd src/qt && make $MAKEJOBS && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk"
+ DOCKER_EXEC "make $MAKEJOBS && cd src/qt && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk"
exit 0
fi
diff --git a/configure.ac b/configure.ac
index 7c05ef3af0..9e9284015b 100644
--- a/configure.ac
+++ b/configure.ac
@@ -107,13 +107,12 @@ AC_PATH_TOOL(GCOV, gcov)
AC_PATH_TOOL(LLVM_COV, llvm-cov)
AC_PATH_PROG(LCOV, lcov)
dnl Python 3.6 is specified in .python-version and should be used if available, see doc/dependencies.md
-AC_PATH_PROGS([PYTHON], [python3.6 python3.7 python3.8 python3.9 python3 python])
+AC_PATH_PROGS([PYTHON], [python3.6 python3.7 python3.8 python3.9, python3.10, python3 python])
AC_PATH_PROG(GENHTML, genhtml)
AC_PATH_PROG([GIT], [git])
AC_PATH_PROG(CCACHE,ccache)
AC_PATH_PROG(XGETTEXT,xgettext)
AC_PATH_PROG(HEXDUMP,hexdump)
-AC_PATH_TOOL(CPPFILT, c++filt)
AC_PATH_TOOL(OBJCOPY, objcopy)
AC_PATH_PROG(DOXYGEN, doxygen)
AM_CONDITIONAL([HAVE_DOXYGEN], [test -n "$DOXYGEN"])
@@ -321,7 +320,7 @@ AC_ARG_ENABLE([gprof],
dnl Turn warnings into errors
AC_ARG_ENABLE([werror],
[AS_HELP_STRING([--enable-werror],
- [Treat certain compiler warnings as errors (default is no)])],
+ [Treat compiler warnings as errors (default is no)])],
[enable_werror=$enableval],
[enable_werror=no])
@@ -409,34 +408,13 @@ if test "x$enable_werror" = "xyes"; then
if test "x$CXXFLAG_WERROR" = "x"; then
AC_MSG_ERROR("enable-werror set but -Werror is not usable")
fi
- AX_CHECK_COMPILE_FLAG([-Werror=gnu],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=gnu"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=vla],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=vla"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=shadow-field],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=shadow-field"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=switch],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=switch"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=thread-safety],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=thread-safety"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=range-loop-analysis],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=range-loop-analysis"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=unused-variable],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unused-variable"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=date-time],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=date-time"],,[[$CXXFLAG_WERROR]])
+ ERROR_CXXFLAGS=$CXXFLAG_WERROR
dnl -Wreturn-type is broken in GCC for MinGW-w64.
dnl https://sourceforge.net/p/mingw-w64/bugs/306/
- AX_CHECK_COMPILE_FLAG([-Werror=return-type], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=return-type"], [], [$CXXFLAG_WERROR],
+ AX_CHECK_COMPILE_FLAG([-Werror=return-type], [], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Wno-error=return-type"], [$CXXFLAG_WERROR],
[AC_LANG_SOURCE([[#include <cassert>
int f(){ assert(false); }]])])
-
- AX_CHECK_COMPILE_FLAG([-Werror=conditional-uninitialized],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=conditional-uninitialized"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=sign-compare],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=sign-compare"],,[[$CXXFLAG_WERROR]])
- dnl -Wsuggest-override is broken with GCC before 9.2
- dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78010
- AX_CHECK_COMPILE_FLAG([-Werror=suggest-override],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=suggest-override"],,[[$CXXFLAG_WERROR]],
- [AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])])
- AX_CHECK_COMPILE_FLAG([-Werror=unreachable-code-loop-increment],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=mismatched-tags], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=mismatched-tags"], [], [$CXXFLAG_WERROR])
- AX_CHECK_COMPILE_FLAG([-Werror=implicit-fallthrough], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=implicit-fallthrough"], [], [$CXXFLAG_WERROR])
-
- if test x$suppress_external_warnings != xno ; then
- AX_CHECK_COMPILE_FLAG([-Werror=documentation],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=documentation"],,[[$CXXFLAG_WERROR]])
- fi
fi
if test "x$CXXFLAGS_overridden" = "xno"; then
@@ -447,19 +425,18 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
AX_CHECK_COMPILE_FLAG([-Wformat -Wformat-security],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat -Wformat-security"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wvla],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wvla"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wshadow-field],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wshadow-field"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wswitch],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wswitch"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wthread-safety],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wthread-safety"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wrange-loop-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wrange-loop-analysis"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Wloop-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wloop-analysis"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wredundant-decls],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wredundant-decls"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wunused-variable],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunused-variable"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wunused-member-function],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunused-member-function"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wdate-time],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdate-time"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wconditional-uninitialized],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wconditional-uninitialized"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wsign-compare],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsign-compare"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wduplicated-branches],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-branches"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wduplicated-cond],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-cond"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wlogical-op],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wlogical-op"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Woverloaded-virtual],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Woverloaded-virtual"],,[[$CXXFLAG_WERROR]])
+ dnl -Wsuggest-override is broken with GCC before 9.2
+ dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78010
AX_CHECK_COMPILE_FLAG([-Wsuggest-override],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsuggest-override"],,[[$CXXFLAG_WERROR]],
[AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])])
AX_CHECK_COMPILE_FLAG([-Wunreachable-code-loop-increment],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]])
@@ -474,7 +451,6 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
dnl set the -Wno-foo case if it works.
AX_CHECK_COMPILE_FLAG([-Wunused-parameter],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-parameter"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wself-assign],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-self-assign"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wunused-local-typedef],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-local-typedef"],,[[$CXXFLAG_WERROR]])
if test x$suppress_external_warnings != xyes ; then
AX_CHECK_COMPILE_FLAG([-Wdeprecated-copy],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-deprecated-copy"],,[[$CXXFLAG_WERROR]])
fi
@@ -1661,8 +1637,8 @@ if test "x$use_ccache" != "xno"; then
fi
AC_MSG_RESULT($use_ccache)
if test "x$use_ccache" = "xyes"; then
- AX_CHECK_COMPILE_FLAG([-fdebug-prefix-map=A=B],[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -fdebug-prefix-map=\$(abs_srcdir)=."],,[[$CXXFLAG_WERROR]])
- AX_CHECK_PREPROC_FLAG([-fmacro-prefix-map=A=B],[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -fmacro-prefix-map=\$(abs_srcdir)=."],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-fdebug-prefix-map=A=B],[DEBUG_CXXFLAGS="$DEBUG_CXXFLAGS -fdebug-prefix-map=\$(abs_top_srcdir)=."],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_PREPROC_FLAG([-fmacro-prefix-map=A=B],[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -fmacro-prefix-map=\$(abs_top_srcdir)=."],,[[$CXXFLAG_WERROR]])
fi
fi
diff --git a/contrib/devtools/pixie.py b/contrib/devtools/pixie.py
deleted file mode 100644
index 64660968ad..0000000000
--- a/contrib/devtools/pixie.py
+++ /dev/null
@@ -1,323 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2020 Wladimir J. van der Laan
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-'''
-Compact, self-contained ELF implementation for bitcoin-core security checks.
-'''
-import struct
-import types
-from typing import Dict, List, Optional, Union, Tuple
-
-# you can find all these values in elf.h
-EI_NIDENT = 16
-
-# Byte indices in e_ident
-EI_CLASS = 4 # ELFCLASSxx
-EI_DATA = 5 # ELFDATAxxxx
-
-ELFCLASS32 = 1 # 32-bit
-ELFCLASS64 = 2 # 64-bit
-
-ELFDATA2LSB = 1 # little endian
-ELFDATA2MSB = 2 # big endian
-
-# relevant values for e_machine
-EM_386 = 3
-EM_PPC64 = 21
-EM_ARM = 40
-EM_AARCH64 = 183
-EM_X86_64 = 62
-EM_RISCV = 243
-
-# relevant values for e_type
-ET_DYN = 3
-
-# relevant values for sh_type
-SHT_PROGBITS = 1
-SHT_STRTAB = 3
-SHT_DYNAMIC = 6
-SHT_DYNSYM = 11
-SHT_GNU_verneed = 0x6ffffffe
-SHT_GNU_versym = 0x6fffffff
-
-# relevant values for p_type
-PT_LOAD = 1
-PT_GNU_STACK = 0x6474e551
-PT_GNU_RELRO = 0x6474e552
-
-# relevant values for p_flags
-PF_X = (1 << 0)
-PF_W = (1 << 1)
-PF_R = (1 << 2)
-
-# relevant values for d_tag
-DT_NEEDED = 1
-DT_FLAGS = 30
-
-# relevant values of `d_un.d_val' in the DT_FLAGS entry
-DF_BIND_NOW = 0x00000008
-
-# relevant d_tags with string payload
-STRING_TAGS = {DT_NEEDED}
-
-# rrlevant values for ST_BIND subfield of st_info (symbol binding)
-STB_LOCAL = 0
-
-class ELFRecord(types.SimpleNamespace):
- '''Unified parsing for ELF records.'''
- def __init__(self, data: bytes, offset: int, eh: 'ELFHeader', total_size: Optional[int]) -> None:
- hdr_struct = self.STRUCT[eh.ei_class][0][eh.ei_data]
- if total_size is not None and hdr_struct.size > total_size:
- raise ValueError(f'{self.__class__.__name__} header size too small ({total_size} < {hdr_struct.size})')
- for field, value in zip(self.STRUCT[eh.ei_class][1], hdr_struct.unpack(data[offset:offset + hdr_struct.size])):
- setattr(self, field, value)
-
-def BiStruct(chars: str) -> Dict[int, struct.Struct]:
- '''Compile a struct parser for both endians.'''
- return {
- ELFDATA2LSB: struct.Struct('<' + chars),
- ELFDATA2MSB: struct.Struct('>' + chars),
- }
-
-class ELFHeader(ELFRecord):
- FIELDS = ['e_type', 'e_machine', 'e_version', 'e_entry', 'e_phoff', 'e_shoff', 'e_flags', 'e_ehsize', 'e_phentsize', 'e_phnum', 'e_shentsize', 'e_shnum', 'e_shstrndx']
- STRUCT = {
- ELFCLASS32: (BiStruct('HHIIIIIHHHHHH'), FIELDS),
- ELFCLASS64: (BiStruct('HHIQQQIHHHHHH'), FIELDS),
- }
-
- def __init__(self, data: bytes, offset: int) -> None:
- self.e_ident = data[offset:offset + EI_NIDENT]
- if self.e_ident[0:4] != b'\x7fELF':
- raise ValueError('invalid ELF magic')
- self.ei_class = self.e_ident[EI_CLASS]
- self.ei_data = self.e_ident[EI_DATA]
-
- super().__init__(data, offset + EI_NIDENT, self, None)
-
- def __repr__(self) -> str:
- return f'Header(e_ident={self.e_ident!r}, e_type={self.e_type}, e_machine={self.e_machine}, e_version={self.e_version}, e_entry={self.e_entry}, e_phoff={self.e_phoff}, e_shoff={self.e_shoff}, e_flags={self.e_flags}, e_ehsize={self.e_ehsize}, e_phentsize={self.e_phentsize}, e_phnum={self.e_phnum}, e_shentsize={self.e_shentsize}, e_shnum={self.e_shnum}, e_shstrndx={self.e_shstrndx})'
-
-class Section(ELFRecord):
- name: Optional[bytes] = None
- FIELDS = ['sh_name', 'sh_type', 'sh_flags', 'sh_addr', 'sh_offset', 'sh_size', 'sh_link', 'sh_info', 'sh_addralign', 'sh_entsize']
- STRUCT = {
- ELFCLASS32: (BiStruct('IIIIIIIIII'), FIELDS),
- ELFCLASS64: (BiStruct('IIQQQQIIQQ'), FIELDS),
- }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
- super().__init__(data, offset, eh, eh.e_shentsize)
- self._data = data
-
- def __repr__(self) -> str:
- return f'Section(sh_name={self.sh_name}({self.name!r}), sh_type=0x{self.sh_type:x}, sh_flags={self.sh_flags}, sh_addr=0x{self.sh_addr:x}, sh_offset=0x{self.sh_offset:x}, sh_size={self.sh_size}, sh_link={self.sh_link}, sh_info={self.sh_info}, sh_addralign={self.sh_addralign}, sh_entsize={self.sh_entsize})'
-
- def contents(self) -> bytes:
- '''Return section contents.'''
- return self._data[self.sh_offset:self.sh_offset + self.sh_size]
-
-class ProgramHeader(ELFRecord):
- STRUCT = {
- # different ELF classes have the same fields, but in a different order to optimize space versus alignment
- ELFCLASS32: (BiStruct('IIIIIIII'), ['p_type', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_flags', 'p_align']),
- ELFCLASS64: (BiStruct('IIQQQQQQ'), ['p_type', 'p_flags', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_align']),
- }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
- super().__init__(data, offset, eh, eh.e_phentsize)
-
- def __repr__(self) -> str:
- return f'ProgramHeader(p_type={self.p_type}, p_offset={self.p_offset}, p_vaddr={self.p_vaddr}, p_paddr={self.p_paddr}, p_filesz={self.p_filesz}, p_memsz={self.p_memsz}, p_flags={self.p_flags}, p_align={self.p_align})'
-
-class Symbol(ELFRecord):
- STRUCT = {
- # different ELF classes have the same fields, but in a different order to optimize space versus alignment
- ELFCLASS32: (BiStruct('IIIBBH'), ['st_name', 'st_value', 'st_size', 'st_info', 'st_other', 'st_shndx']),
- ELFCLASS64: (BiStruct('IBBHQQ'), ['st_name', 'st_info', 'st_other', 'st_shndx', 'st_value', 'st_size']),
- }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader, symtab: Section, strings: bytes, version: Optional[bytes]) -> None:
- super().__init__(data, offset, eh, symtab.sh_entsize)
- self.name = _lookup_string(strings, self.st_name)
- self.version = version
-
- def __repr__(self) -> str:
- return f'Symbol(st_name={self.st_name}({self.name!r}), st_value={self.st_value}, st_size={self.st_size}, st_info={self.st_info}, st_other={self.st_other}, st_shndx={self.st_shndx}, version={self.version!r})'
-
- @property
- def is_import(self) -> bool:
- '''Returns whether the symbol is an imported symbol.'''
- return self.st_bind != STB_LOCAL and self.st_shndx == 0
-
- @property
- def is_export(self) -> bool:
- '''Returns whether the symbol is an exported symbol.'''
- return self.st_bind != STB_LOCAL and self.st_shndx != 0
-
- @property
- def st_bind(self) -> int:
- '''Returns STB_*.'''
- return self.st_info >> 4
-
-class Verneed(ELFRecord):
- DEF = (BiStruct('HHIII'), ['vn_version', 'vn_cnt', 'vn_file', 'vn_aux', 'vn_next'])
- STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
- super().__init__(data, offset, eh, None)
-
- def __repr__(self) -> str:
- return f'Verneed(vn_version={self.vn_version}, vn_cnt={self.vn_cnt}, vn_file={self.vn_file}, vn_aux={self.vn_aux}, vn_next={self.vn_next})'
-
-class Vernaux(ELFRecord):
- DEF = (BiStruct('IHHII'), ['vna_hash', 'vna_flags', 'vna_other', 'vna_name', 'vna_next'])
- STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader, strings: bytes) -> None:
- super().__init__(data, offset, eh, None)
- self.name = _lookup_string(strings, self.vna_name)
-
- def __repr__(self) -> str:
- return f'Veraux(vna_hash={self.vna_hash}, vna_flags={self.vna_flags}, vna_other={self.vna_other}, vna_name={self.vna_name}({self.name!r}), vna_next={self.vna_next})'
-
-class DynTag(ELFRecord):
- STRUCT = {
- ELFCLASS32: (BiStruct('II'), ['d_tag', 'd_val']),
- ELFCLASS64: (BiStruct('QQ'), ['d_tag', 'd_val']),
- }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader, section: Section) -> None:
- super().__init__(data, offset, eh, section.sh_entsize)
-
- def __repr__(self) -> str:
- return f'DynTag(d_tag={self.d_tag}, d_val={self.d_val})'
-
-def _lookup_string(data: bytes, index: int) -> bytes:
- '''Look up string by offset in ELF string table.'''
- endx = data.find(b'\x00', index)
- assert endx != -1
- return data[index:endx]
-
-VERSYM_S = BiStruct('H') # .gnu_version section has a single 16-bit integer per symbol in the linked section
-def _parse_symbol_table(section: Section, strings: bytes, eh: ELFHeader, versym: bytes, verneed: Dict[int, bytes]) -> List[Symbol]:
- '''Parse symbol table, return a list of symbols.'''
- data = section.contents()
- symbols = []
- versym_iter = (verneed.get(v[0]) for v in VERSYM_S[eh.ei_data].iter_unpack(versym))
- for ofs, version in zip(range(0, len(data), section.sh_entsize), versym_iter):
- symbols.append(Symbol(data, ofs, eh, section, strings, version))
- return symbols
-
-def _parse_verneed(section: Section, strings: bytes, eh: ELFHeader) -> Dict[int, bytes]:
- '''Parse .gnu.version_r section, return a dictionary of {versym: 'GLIBC_...'}.'''
- data = section.contents()
- ofs = 0
- result = {}
- while True:
- verneed = Verneed(data, ofs, eh)
- aofs = ofs + verneed.vn_aux
- while True:
- vernaux = Vernaux(data, aofs, eh, strings)
- result[vernaux.vna_other] = vernaux.name
- if not vernaux.vna_next:
- break
- aofs += vernaux.vna_next
-
- if not verneed.vn_next:
- break
- ofs += verneed.vn_next
-
- return result
-
-def _parse_dyn_tags(section: Section, strings: bytes, eh: ELFHeader) -> List[Tuple[int, Union[bytes, int]]]:
- '''Parse dynamic tags. Return array of tuples.'''
- data = section.contents()
- ofs = 0
- result = []
- for ofs in range(0, len(data), section.sh_entsize):
- tag = DynTag(data, ofs, eh, section)
- val = _lookup_string(strings, tag.d_val) if tag.d_tag in STRING_TAGS else tag.d_val
- result.append((tag.d_tag, val))
-
- return result
-
-class ELFFile:
- sections: List[Section]
- program_headers: List[ProgramHeader]
- dyn_symbols: List[Symbol]
- dyn_tags: List[Tuple[int, Union[bytes, int]]]
-
- def __init__(self, data: bytes) -> None:
- self.data = data
- self.hdr = ELFHeader(self.data, 0)
- self._load_sections()
- self._load_program_headers()
- self._load_dyn_symbols()
- self._load_dyn_tags()
- self._section_to_segment_mapping()
-
- def _load_sections(self) -> None:
- self.sections = []
- for idx in range(self.hdr.e_shnum):
- offset = self.hdr.e_shoff + idx * self.hdr.e_shentsize
- self.sections.append(Section(self.data, offset, self.hdr))
-
- shstr = self.sections[self.hdr.e_shstrndx].contents()
- for section in self.sections:
- section.name = _lookup_string(shstr, section.sh_name)
-
- def _load_program_headers(self) -> None:
- self.program_headers = []
- for idx in range(self.hdr.e_phnum):
- offset = self.hdr.e_phoff + idx * self.hdr.e_phentsize
- self.program_headers.append(ProgramHeader(self.data, offset, self.hdr))
-
- def _load_dyn_symbols(self) -> None:
- # first, load 'verneed' section
- verneed = None
- for section in self.sections:
- if section.sh_type == SHT_GNU_verneed:
- strtab = self.sections[section.sh_link].contents() # associated string table
- assert verneed is None # only one section of this kind please
- verneed = _parse_verneed(section, strtab, self.hdr)
- assert verneed is not None
-
- # then, correlate GNU versym sections with dynamic symbol sections
- versym = {}
- for section in self.sections:
- if section.sh_type == SHT_GNU_versym:
- versym[section.sh_link] = section
-
- # finally, load dynsym sections
- self.dyn_symbols = []
- for idx, section in enumerate(self.sections):
- if section.sh_type == SHT_DYNSYM: # find dynamic symbol tables
- strtab_data = self.sections[section.sh_link].contents() # associated string table
- versym_data = versym[idx].contents() # associated symbol version table
- self.dyn_symbols += _parse_symbol_table(section, strtab_data, self.hdr, versym_data, verneed)
-
- def _load_dyn_tags(self) -> None:
- self.dyn_tags = []
- for idx, section in enumerate(self.sections):
- if section.sh_type == SHT_DYNAMIC: # find dynamic tag tables
- strtab = self.sections[section.sh_link].contents() # associated string table
- self.dyn_tags += _parse_dyn_tags(section, strtab, self.hdr)
-
- def _section_to_segment_mapping(self) -> None:
- for ph in self.program_headers:
- ph.sections = []
- for section in self.sections:
- if ph.p_vaddr <= section.sh_addr < (ph.p_vaddr + ph.p_memsz):
- ph.sections.append(section)
-
- def query_dyn_tags(self, tag_in: int) -> List[Union[int, bytes]]:
- '''Return the values of all dyn tags with the specified tag.'''
- return [val for (tag, val) in self.dyn_tags if tag == tag_in]
-
-
-def load(filename: str) -> ELFFile:
- with open(filename, 'rb') as f:
- data = f.read()
- return ELFFile(data)
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 0b59d8eada..ef421aebb1 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -8,192 +8,155 @@ Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
'''
import sys
-from typing import List, Optional
+from typing import List
-import lief
-import pixie
+import lief #type:ignore
-def check_ELF_PIE(executable) -> bool:
- '''
- Check for position independent executable (PIE), allowing for address space randomization.
- '''
- elf = pixie.load(executable)
- return elf.hdr.e_type == pixie.ET_DYN
-
-def check_ELF_NX(executable) -> bool:
- '''
- Check that no sections are writable and executable (including the stack)
- '''
- elf = pixie.load(executable)
- have_wx = False
- have_gnu_stack = False
- for ph in elf.program_headers:
- if ph.p_type == pixie.PT_GNU_STACK:
- have_gnu_stack = True
- if (ph.p_flags & pixie.PF_W) != 0 and (ph.p_flags & pixie.PF_X) != 0: # section is both writable and executable
- have_wx = True
- return have_gnu_stack and not have_wx
-
-def check_ELF_RELRO(executable) -> bool:
+def check_ELF_RELRO(binary) -> bool:
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
- elf = pixie.load(executable)
have_gnu_relro = False
- for ph in elf.program_headers:
+ for segment in binary.segments:
# Note: not checking p_flags == PF_R: here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program
# header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also https://marc.info/?l=binutils&m=1498883354122353
- if ph.p_type == pixie.PT_GNU_RELRO:
+ if segment.type == lief.ELF.SEGMENT_TYPES.GNU_RELRO:
have_gnu_relro = True
have_bindnow = False
- for flags in elf.query_dyn_tags(pixie.DT_FLAGS):
- assert isinstance(flags, int)
- if flags & pixie.DF_BIND_NOW:
+ try:
+ flags = binary.get(lief.ELF.DYNAMIC_TAGS.FLAGS)
+ if flags.value & lief.ELF.DYNAMIC_FLAGS.BIND_NOW:
have_bindnow = True
+ except:
+ have_bindnow = False
return have_gnu_relro and have_bindnow
-def check_ELF_Canary(executable) -> bool:
+def check_ELF_Canary(binary) -> bool:
'''
Check for use of stack canary
'''
- elf = pixie.load(executable)
- ok = False
- for symbol in elf.dyn_symbols:
- if symbol.name == b'__stack_chk_fail':
- ok = True
- return ok
+ return binary.has_symbol('__stack_chk_fail')
-def check_ELF_separate_code(executable):
+def check_ELF_separate_code(binary):
'''
Check that sections are appropriately separated in virtual memory,
based on their permissions. This checks for missing -Wl,-z,separate-code
and potentially other problems.
'''
- elf = pixie.load(executable)
- R = pixie.PF_R
- W = pixie.PF_W
- E = pixie.PF_X
+ R = lief.ELF.SEGMENT_FLAGS.R
+ W = lief.ELF.SEGMENT_FLAGS.W
+ E = lief.ELF.SEGMENT_FLAGS.X
EXPECTED_FLAGS = {
# Read + execute
- b'.init': R | E,
- b'.plt': R | E,
- b'.plt.got': R | E,
- b'.plt.sec': R | E,
- b'.text': R | E,
- b'.fini': R | E,
+ '.init': R | E,
+ '.plt': R | E,
+ '.plt.got': R | E,
+ '.plt.sec': R | E,
+ '.text': R | E,
+ '.fini': R | E,
# Read-only data
- b'.interp': R,
- b'.note.gnu.property': R,
- b'.note.gnu.build-id': R,
- b'.note.ABI-tag': R,
- b'.gnu.hash': R,
- b'.dynsym': R,
- b'.dynstr': R,
- b'.gnu.version': R,
- b'.gnu.version_r': R,
- b'.rela.dyn': R,
- b'.rela.plt': R,
- b'.rodata': R,
- b'.eh_frame_hdr': R,
- b'.eh_frame': R,
- b'.qtmetadata': R,
- b'.gcc_except_table': R,
- b'.stapsdt.base': R,
+ '.interp': R,
+ '.note.gnu.property': R,
+ '.note.gnu.build-id': R,
+ '.note.ABI-tag': R,
+ '.gnu.hash': R,
+ '.dynsym': R,
+ '.dynstr': R,
+ '.gnu.version': R,
+ '.gnu.version_r': R,
+ '.rela.dyn': R,
+ '.rela.plt': R,
+ '.rodata': R,
+ '.eh_frame_hdr': R,
+ '.eh_frame': R,
+ '.qtmetadata': R,
+ '.gcc_except_table': R,
+ '.stapsdt.base': R,
# Writable data
- b'.init_array': R | W,
- b'.fini_array': R | W,
- b'.dynamic': R | W,
- b'.got': R | W,
- b'.data': R | W,
- b'.bss': R | W,
+ '.init_array': R | W,
+ '.fini_array': R | W,
+ '.dynamic': R | W,
+ '.got': R | W,
+ '.data': R | W,
+ '.bss': R | W,
}
- if elf.hdr.e_machine == pixie.EM_PPC64:
+ if binary.header.machine_type == lief.ELF.ARCH.PPC64:
# .plt is RW on ppc64 even with separate-code
- EXPECTED_FLAGS[b'.plt'] = R | W
+ EXPECTED_FLAGS['.plt'] = R | W
# For all LOAD program headers get mapping to the list of sections,
# and for each section, remember the flags of the associated program header.
flags_per_section = {}
- for ph in elf.program_headers:
- if ph.p_type == pixie.PT_LOAD:
- for section in ph.sections:
+ for segment in binary.segments:
+ if segment.type == lief.ELF.SEGMENT_TYPES.LOAD:
+ for section in segment.sections:
assert(section.name not in flags_per_section)
- flags_per_section[section.name] = ph.p_flags
+ flags_per_section[section.name] = segment.flags
# Spot-check ELF LOAD program header flags per section
# If these sections exist, check them against the expected R/W/E flags
for (section, flags) in flags_per_section.items():
if section in EXPECTED_FLAGS:
- if EXPECTED_FLAGS[section] != flags:
+ if int(EXPECTED_FLAGS[section]) != int(flags):
return False
return True
-def check_PE_DYNAMIC_BASE(executable) -> bool:
+def check_PE_DYNAMIC_BASE(binary) -> bool:
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
- binary = lief.parse(executable)
return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists
# Must support high-entropy 64-bit address space layout randomization
# in addition to DYNAMIC_BASE to have secure ASLR.
-def check_PE_HIGH_ENTROPY_VA(executable) -> bool:
+def check_PE_HIGH_ENTROPY_VA(binary) -> bool:
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
- binary = lief.parse(executable)
return lief.PE.DLL_CHARACTERISTICS.HIGH_ENTROPY_VA in binary.optional_header.dll_characteristics_lists
-def check_PE_RELOC_SECTION(executable) -> bool:
+def check_PE_RELOC_SECTION(binary) -> bool:
'''Check for a reloc section. This is required for functional ASLR.'''
- binary = lief.parse(executable)
return binary.has_relocations
-def check_MACHO_NOUNDEFS(executable) -> bool:
+def check_MACHO_NOUNDEFS(binary) -> bool:
'''
Check for no undefined references.
'''
- binary = lief.parse(executable)
return binary.header.has(lief.MachO.HEADER_FLAGS.NOUNDEFS)
-def check_MACHO_LAZY_BINDINGS(executable) -> bool:
+def check_MACHO_LAZY_BINDINGS(binary) -> bool:
'''
Check for no lazy bindings.
We don't use or check for MH_BINDATLOAD. See #18295.
'''
- binary = lief.parse(executable)
return binary.dyld_info.lazy_bind == (0,0)
-def check_MACHO_Canary(executable) -> bool:
+def check_MACHO_Canary(binary) -> bool:
'''
Check for use of stack canary
'''
- binary = lief.parse(executable)
return binary.has_symbol('___stack_chk_fail')
-def check_PIE(executable) -> bool:
+def check_PIE(binary) -> bool:
'''
Check for position independent executable (PIE),
allowing for address space randomization.
'''
- binary = lief.parse(executable)
return binary.is_pie
-def check_NX(executable) -> bool:
+def check_NX(binary) -> bool:
'''
Check for no stack execution
'''
- binary = lief.parse(executable)
return binary.has_nx
-def check_control_flow(executable) -> bool:
+def check_control_flow(binary) -> bool:
'''
Check for control flow instrumentation
'''
- binary = lief.parse(executable)
-
content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO)
if content == [243, 15, 30, 250]: # endbr64
@@ -203,8 +166,8 @@ def check_control_flow(executable) -> bool:
CHECKS = {
'ELF': [
- ('PIE', check_ELF_PIE),
- ('NX', check_ELF_NX),
+ ('PIE', check_PIE),
+ ('NX', check_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary),
('separate_code', check_ELF_separate_code),
@@ -226,30 +189,20 @@ CHECKS = {
]
}
-def identify_executable(executable) -> Optional[str]:
- with open(filename, 'rb') as f:
- magic = f.read(4)
- if magic.startswith(b'MZ'):
- return 'PE'
- elif magic.startswith(b'\x7fELF'):
- return 'ELF'
- elif magic.startswith(b'\xcf\xfa'):
- return 'MACHO'
- return None
-
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
- etype = identify_executable(filename)
- if etype is None:
- print(f'{filename}: unknown format')
+ binary = lief.parse(filename)
+ etype = binary.format.name
+ if etype == lief.EXE_FORMATS.UNKNOWN:
+ print(f'{filename}: unknown executable format')
retval = 1
continue
failed: List[str] = []
for (name, func) in CHECKS[etype]:
- if not func(filename):
+ if not func(binary):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 98cab1b7fc..136a9b70c1 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -10,14 +10,14 @@ Example usage:
find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
-import subprocess
import sys
-from typing import List, Optional
+from typing import List, Dict
-import lief
-import pixie
+import lief #type:ignore
-from utils import determine_wellknown_cmd
+# temporary constant, to be replaced with lief.ELF.ARCH.RISCV
+# https://github.com/lief-project/LIEF/pull/562
+LIEF_ELF_ARCH_RISCV = lief.ELF.ARCH(243)
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
@@ -43,12 +43,12 @@ from utils import determine_wellknown_cmd
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': {
- pixie.EM_386: (2,17),
- pixie.EM_X86_64: (2,17),
- pixie.EM_ARM: (2,17),
- pixie.EM_AARCH64:(2,17),
- pixie.EM_PPC64: (2,17),
- pixie.EM_RISCV: (2,27),
+ lief.ELF.ARCH.i386: (2,17),
+ lief.ELF.ARCH.x86_64: (2,17),
+ lief.ELF.ARCH.ARM: (2,17),
+ lief.ELF.ARCH.AARCH64:(2,17),
+ lief.ELF.ARCH.PPC64: (2,17),
+ LIEF_ELF_ARCH_RISCV: (2,27),
},
'LIBATOMIC': (1,0),
'V': (0,5,0), # xkb (bitcoin-qt only)
@@ -58,10 +58,35 @@ MAX_VERSIONS = {
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
-'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
+'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__',
+'__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
+# Expected linker-loader names can be found here:
+# https://sourceware.org/glibc/wiki/ABIList?action=recall&rev=16
+ELF_INTERPRETER_NAMES: Dict[lief.ELF.ARCH, Dict[lief.ENDIANNESS, str]] = {
+ lief.ELF.ARCH.i386: {
+ lief.ENDIANNESS.LITTLE: "/lib/ld-linux.so.2",
+ },
+ lief.ELF.ARCH.x86_64: {
+ lief.ENDIANNESS.LITTLE: "/lib64/ld-linux-x86-64.so.2",
+ },
+ lief.ELF.ARCH.ARM: {
+ lief.ENDIANNESS.LITTLE: "/lib/ld-linux-armhf.so.3",
+ },
+ lief.ELF.ARCH.AARCH64: {
+ lief.ENDIANNESS.LITTLE: "/lib/ld-linux-aarch64.so.1",
+ },
+ lief.ELF.ARCH.PPC64: {
+ lief.ENDIANNESS.BIG: "/lib64/ld64.so.1",
+ lief.ENDIANNESS.LITTLE: "/lib64/ld64.so.2",
+ },
+ LIEF_ELF_ARCH_RISCV: {
+ lief.ENDIANNESS.LITTLE: "/lib/ld-linux-riscv64-lp64d.so.1",
+ },
+}
+
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
@@ -133,31 +158,8 @@ PE_ALLOWED_LIBRARIES = {
'WTSAPI32.dll',
}
-class CPPFilt(object):
- '''
- Demangle C++ symbol names.
-
- Use a pipe to the 'c++filt' command.
- '''
- def __init__(self):
- self.proc = subprocess.Popen(determine_wellknown_cmd('CPPFILT', 'c++filt'), stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
-
- def __call__(self, mangled):
- self.proc.stdin.write(mangled + '\n')
- self.proc.stdin.flush()
- return self.proc.stdout.readline().rstrip()
-
- def close(self):
- self.proc.stdin.close()
- self.proc.stdout.close()
- self.proc.wait()
-
def check_version(max_versions, version, arch) -> bool:
- if '_' in version:
- (lib, _, ver) = version.rpartition('_')
- else:
- lib = version
- ver = '0'
+ (lib, _, ver) = version.rpartition('_')
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
@@ -166,48 +168,45 @@ def check_version(max_versions, version, arch) -> bool:
else:
return ver <= max_versions[lib][arch]
-def check_imported_symbols(filename) -> bool:
- elf = pixie.load(filename)
- cppfilt = CPPFilt()
+def check_imported_symbols(binary) -> bool:
ok: bool = True
- for symbol in elf.dyn_symbols:
- if not symbol.is_import:
+ for symbol in binary.imported_symbols:
+ if not symbol.imported:
continue
- sym = symbol.name.decode()
- version = symbol.version.decode() if symbol.version is not None else None
- if version and not check_version(MAX_VERSIONS, version, elf.hdr.e_machine):
- print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
- ok = False
+
+ version = symbol.symbol_version if symbol.has_version else None
+
+ if version:
+ aux_version = version.symbol_version_auxiliary.name if version.has_auxiliary_version else None
+ if aux_version and not check_version(MAX_VERSIONS, aux_version, binary.header.machine_type):
+ print(f'{filename}: symbol {symbol.name} from unsupported version {version}')
+ ok = False
return ok
-def check_exported_symbols(filename) -> bool:
- elf = pixie.load(filename)
- cppfilt = CPPFilt()
+def check_exported_symbols(binary) -> bool:
ok: bool = True
- for symbol in elf.dyn_symbols:
- if not symbol.is_export:
+
+ for symbol in binary.dynamic_symbols:
+ if not symbol.exported:
continue
- sym = symbol.name.decode()
- if elf.hdr.e_machine == pixie.EM_RISCV or sym in IGNORE_EXPORTS:
+ name = symbol.name
+ if binary.header.machine_type == LIEF_ELF_ARCH_RISCV or name in IGNORE_EXPORTS:
continue
- print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
+ print(f'{binary.name}: export of symbol {name} not allowed!')
ok = False
return ok
-def check_ELF_libraries(filename) -> bool:
+def check_ELF_libraries(binary) -> bool:
ok: bool = True
- elf = pixie.load(filename)
- for library_name in elf.query_dyn_tags(pixie.DT_NEEDED):
- assert(isinstance(library_name, bytes))
- if library_name.decode() not in ELF_ALLOWED_LIBRARIES:
- print('{}: NEEDED library {} is not allowed'.format(filename, library_name.decode()))
+ for library in binary.libraries:
+ if library not in ELF_ALLOWED_LIBRARIES:
+ print(f'{filename}: {library} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
-def check_MACHO_libraries(filename) -> bool:
+def check_MACHO_libraries(binary) -> bool:
ok: bool = True
- binary = lief.parse(filename)
for dylib in binary.libraries:
split = dylib.name.split('/')
if split[-1] not in MACHO_ALLOWED_LIBRARIES:
@@ -215,40 +214,42 @@ def check_MACHO_libraries(filename) -> bool:
ok = False
return ok
-def check_MACHO_min_os(filename) -> bool:
- binary = lief.parse(filename)
+def check_MACHO_min_os(binary) -> bool:
if binary.build_version.minos == [10,15,0]:
return True
return False
-def check_MACHO_sdk(filename) -> bool:
- binary = lief.parse(filename)
+def check_MACHO_sdk(binary) -> bool:
if binary.build_version.sdk == [10, 15, 6]:
return True
return False
-def check_PE_libraries(filename) -> bool:
+def check_PE_libraries(binary) -> bool:
ok: bool = True
- binary = lief.parse(filename)
for dylib in binary.libraries:
if dylib not in PE_ALLOWED_LIBRARIES:
print(f'{dylib} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
-def check_PE_subsystem_version(filename) -> bool:
- binary = lief.parse(filename)
+def check_PE_subsystem_version(binary) -> bool:
major: int = binary.optional_header.major_subsystem_version
minor: int = binary.optional_header.minor_subsystem_version
if major == 6 and minor == 1:
return True
return False
+def check_ELF_interpreter(binary) -> bool:
+ expected_interpreter = ELF_INTERPRETER_NAMES[binary.header.machine_type][binary.abstract.header.endianness]
+
+ return binary.concrete.interpreter == expected_interpreter
+
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
- ('LIBRARY_DEPENDENCIES', check_ELF_libraries)
+ ('LIBRARY_DEPENDENCIES', check_ELF_libraries),
+ ('INTERPRETER_NAME', check_ELF_interpreter),
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries),
@@ -261,30 +262,20 @@ CHECKS = {
]
}
-def identify_executable(executable) -> Optional[str]:
- with open(filename, 'rb') as f:
- magic = f.read(4)
- if magic.startswith(b'MZ'):
- return 'PE'
- elif magic.startswith(b'\x7fELF'):
- return 'ELF'
- elif magic.startswith(b'\xcf\xfa'):
- return 'MACHO'
- return None
-
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
- etype = identify_executable(filename)
- if etype is None:
- print(f'{filename}: unknown format')
+ binary = lief.parse(filename)
+ etype = binary.format.name
+ if etype == lief.EXE_FORMATS.UNKNOWN:
+ print(f'{filename}: unknown executable format')
retval = 1
continue
failed: List[str] = []
for (name, func) in CHECKS[etype]:
- if not func(filename):
+ if not func(binary):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index 14058e2cc8..0af7cdf5e6 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -7,6 +7,7 @@ Test script for security-check.py
'''
import os
import subprocess
+from typing import List
import unittest
from utils import determine_wellknown_cmd
@@ -27,7 +28,16 @@ def clean_files(source, executable):
os.remove(executable)
def call_security_check(cc, source, executable, options):
- subprocess.run([*cc,source,'-o',executable] + options, check=True)
+ # This should behave the same as AC_TRY_LINK, so arrange well-known flags
+ # in the same order as autoconf would.
+ #
+ # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for
+ # reference.
+ env_flags: List[str] = []
+ for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']:
+ env_flags += filter(None, os.environ.get(var, '').split(' '))
+
+ subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True)
p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
return (p.returncode, p.stdout.rstrip())
diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py
index 651589c11b..5246375fe3 100755
--- a/contrib/devtools/test-symbol-check.py
+++ b/contrib/devtools/test-symbol-check.py
@@ -13,7 +13,16 @@ import unittest
from utils import determine_wellknown_cmd
def call_symbol_check(cc: List[str], source, executable, options):
- subprocess.run([*cc,source,'-o',executable] + options, check=True)
+ # This should behave the same as AC_TRY_LINK, so arrange well-known flags
+ # in the same order as autoconf would.
+ #
+ # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for
+ # reference.
+ env_flags: List[str] = []
+ for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']:
+ env_flags += filter(None, os.environ.get(var, '').split(' '))
+
+ subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True)
p = subprocess.run(['./contrib/devtools/symbol-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
os.remove(source)
os.remove(executable)
@@ -51,7 +60,7 @@ class TestSymbolChecks(unittest.TestCase):
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']),
- (1, executable + ': symbol nextup from unsupported version GLIBC_2.24\n' +
+ (1, executable + ': symbol nextup from unsupported version GLIBC_2.24(3)\n' +
executable + ': failed IMPORTED_SYMBOLS'))
# -lutil is part of the libc6 package so a safe bet that it's installed
@@ -70,7 +79,7 @@ class TestSymbolChecks(unittest.TestCase):
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-lutil']),
- (1, executable + ': NEEDED library libutil.so.1 is not allowed\n' +
+ (1, executable + ': libutil.so.1 is not in ALLOWED_LIBRARIES!\n' +
executable + ': failed LIBRARY_DEPENDENCIES'))
# finally, check a simple conforming binary
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index 93526f8c45..93476d5f30 100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -169,8 +169,8 @@ case "$HOST" in
arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;;
aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;;
riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;;
- powerpc64-linux-gnu) echo /lib/ld64.so.1;;
- powerpc64le-linux-gnu) echo /lib/ld64.so.2;;
+ powerpc64-linux-gnu) echo /lib64/ld64.so.1;;
+ powerpc64le-linux-gnu) echo /lib64/ld64.so.2;;
*) exit 1 ;;
esac
)
diff --git a/contrib/signet/getcoins.py b/contrib/signet/getcoins.py
index dc203f1254..3d0aa5d132 100755
--- a/contrib/signet/getcoins.py
+++ b/contrib/signet/getcoins.py
@@ -4,18 +4,77 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
import argparse
+import io
+import requests
import subprocess
import sys
-import requests
DEFAULT_GLOBAL_FAUCET = 'https://signetfaucet.com/claim'
+DEFAULT_GLOBAL_CAPTCHA = 'https://signetfaucet.com/captcha'
GLOBAL_FIRST_BLOCK_HASH = '00000086d6b2636cb2a392d45edc4ec544a10024d30141c9adf4bfd9de533b53'
+# braille unicode block
+BASE = 0x2800
+BIT_PER_PIXEL = [
+ [0x01, 0x08],
+ [0x02, 0x10],
+ [0x04, 0x20],
+ [0x40, 0x80],
+]
+BW = 2
+BH = 4
+
+# imagemagick or compatible fork (used for converting SVG)
+CONVERT = 'convert'
+
+class PPMImage:
+ '''
+ Load a PPM image (Pillow-ish API).
+ '''
+ def __init__(self, f):
+ if f.readline() != b'P6\n':
+ raise ValueError('Invalid ppm format: header')
+ line = f.readline()
+ (width, height) = (int(x) for x in line.rstrip().split(b' '))
+ if f.readline() != b'255\n':
+ raise ValueError('Invalid ppm format: color depth')
+ data = f.read(width * height * 3)
+ stride = width * 3
+ self.size = (width, height)
+ self._grid = [[tuple(data[stride * y + 3 * x:stride * y + 3 * (x + 1)]) for x in range(width)] for y in range(height)]
+
+ def getpixel(self, pos):
+ return self._grid[pos[1]][pos[0]]
+
+def print_image(img, threshold=128):
+ '''Print black-and-white image to terminal in braille unicode characters.'''
+ x_blocks = (img.size[0] + BW - 1) // BW
+ y_blocks = (img.size[1] + BH - 1) // BH
+
+ for yb in range(y_blocks):
+ line = []
+ for xb in range(x_blocks):
+ ch = BASE
+ for y in range(BH):
+ for x in range(BW):
+ try:
+ val = img.getpixel((xb * BW + x, yb * BH + y))
+ except IndexError:
+ pass
+ else:
+ if val[0] < threshold:
+ ch |= BIT_PER_PIXEL[y][x]
+ line.append(chr(ch))
+ print(''.join(line))
+
parser = argparse.ArgumentParser(description='Script to get coins from a faucet.', epilog='You may need to start with double-dash (--) when providing bitcoin-cli arguments.')
parser.add_argument('-c', '--cmd', dest='cmd', default='bitcoin-cli', help='bitcoin-cli command to use')
parser.add_argument('-f', '--faucet', dest='faucet', default=DEFAULT_GLOBAL_FAUCET, help='URL of the faucet')
+parser.add_argument('-g', '--captcha', dest='captcha', default=DEFAULT_GLOBAL_CAPTCHA, help='URL of the faucet captcha, or empty if no captcha is needed')
parser.add_argument('-a', '--addr', dest='addr', default='', help='Bitcoin address to which the faucet should send')
parser.add_argument('-p', '--password', dest='password', default='', help='Faucet password, if any')
+parser.add_argument('-n', '--amount', dest='amount', default='0.001', help='Amount to request (0.001-0.1, default is 0.001)')
+parser.add_argument('-i', '--imagemagick', dest='imagemagick', default=CONVERT, help='Path to imagemagick convert utility')
parser.add_argument('bitcoin_cli_args', nargs='*', help='Arguments to pass on to bitcoin-cli (default: -signet)')
args = parser.parse_args()
@@ -43,14 +102,43 @@ if args.faucet.lower() == DEFAULT_GLOBAL_FAUCET:
if curr_signet_hash != GLOBAL_FIRST_BLOCK_HASH:
print('The global faucet cannot be used with a custom Signet network. Please use the global signet or setup your custom faucet to use this functionality.\n')
exit(1)
+else:
+ # For custom faucets, don't request captcha by default.
+ if args.captcha == DEFAULT_GLOBAL_CAPTCHA:
+ args.captcha = ''
if args.addr == '':
# get address for receiving coins
args.addr = bitcoin_cli(['getnewaddress', 'faucet', 'bech32'])
-data = {'address': args.addr, 'password': args.password}
+data = {'address': args.addr, 'password': args.password, 'amount': args.amount}
+
+# Store cookies
+# for debugging: print(session.cookies.get_dict())
+session = requests.Session()
+
+if args.captcha != '': # Retrieve a captcha
+ try:
+ res = session.get(args.captcha)
+ except:
+ print('Unexpected error when contacting faucet:', sys.exc_info()[0])
+ exit(1)
+
+ # Convert SVG image to PPM, and load it
+ try:
+ rv = subprocess.run([args.imagemagick, '-', '-depth', '8', 'ppm:-'], input=res.content, check=True, capture_output=True)
+ except FileNotFoundError:
+ print('The binary', args.imagemagick, 'could not be found. Please make sure ImageMagick (or a compatible fork) is installed and that the correct path is specified.')
+ exit(1)
+ img = PPMImage(io.BytesIO(rv.stdout))
+
+ # Terminal interaction
+ print_image(img)
+ print('Enter captcha: ', end='')
+ data['captcha'] = input()
+
try:
- res = requests.post(args.faucet, data=data)
+ res = session.post(args.faucet, data=data)
except:
print('Unexpected error when contacting faucet:', sys.exc_info()[0])
exit(1)
diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md
index 047354cda1..1f93474fa0 100644
--- a/contrib/tracing/README.md
+++ b/contrib/tracing/README.md
@@ -176,17 +176,12 @@ third acts as a duration threshold in milliseconds. When the `ConnectBlock()`
function takes longer than the threshold, information about the block, is
printed. For more details, see the header comment in the script.
-By default, `bpftrace` limits strings to 64 bytes due to the limited stack size
-in the kernel VM. Block hashes as zero-terminated hex strings are 65 bytes which
-exceed the string limit. The string size limit can be set to 65 bytes with the
-environment variable `BPFTRACE_STRLEN`.
-
The following command can be used to benchmark, for example, `ConnectBlock()`
between height 20000 and 38000 on SigNet while logging all blocks that take
longer than 25ms to connect.
```
-$ BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 20000 38000 25
+$ bpftrace contrib/tracing/connectblock_benchmark.bt 20000 38000 25
```
In a different terminal, starting Bitcoin Core in SigNet mode and with
diff --git a/contrib/tracing/connectblock_benchmark.bt b/contrib/tracing/connectblock_benchmark.bt
index d268eff7f8..6e7a98ef07 100755
--- a/contrib/tracing/connectblock_benchmark.bt
+++ b/contrib/tracing/connectblock_benchmark.bt
@@ -4,11 +4,8 @@
USAGE:
- BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt <start height> <end height> <logging threshold in ms>
+ bpftrace contrib/tracing/connectblock_benchmark.bt <start height> <end height> <logging threshold in ms>
- - The environment variable BPFTRACE_STRLEN needs to be set to 65 chars as
- strings are limited to 64 chars by default. Hex strings with Bitcoin block
- hashes are 64 hex chars + 1 null-termination char.
- <start height> sets the height at which the benchmark should start. Setting
the start height to 0 starts the benchmark immediately, even before the
first block is connected.
@@ -23,7 +20,7 @@
EXAMPLES:
- BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 300000 680000 1000
+ bpftrace contrib/tracing/connectblock_benchmark.bt 300000 680000 1000
When run together 'bitcoind -reindex', this benchmarks the time it takes to
connect the blocks between height 300.000 and 680.000 (inclusive) and prints
@@ -31,7 +28,7 @@
histogram with block connection times when the benchmark is finished.
- BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 0 0 500
+ bpftrace contrib/tracing/connectblock_benchmark.bt 0 0 500
When running together 'bitcoind', all newly connected blocks that
take longer than 500ms to connect are logged. A histogram with block
@@ -107,14 +104,23 @@ usdt:./src/bitcoind:validation:block_connected /arg1 >= $1 && (arg1 <= $2 || $2
*/
usdt:./src/bitcoind:validation:block_connected / (uint64) arg5 / 1000> $3 /
{
- $hash_str = str(arg0);
+ $hash = arg0;
$height = (int32) arg1;
$transactions = (uint64) arg2;
$inputs = (int32) arg3;
$sigops = (int64) arg4;
$duration = (int64) arg5;
- printf("Block %d (%s) %4d tx %5d ins %5d sigops took %4d ms\n", $height, $hash_str, $transactions, $inputs, $sigops, (uint64) $duration / 1000);
+
+ printf("Block %d (", $height);
+ /* Prints each byte of the block hash as hex in big-endian (the block-explorer format) */
+ $p = $hash + 31;
+ unroll(32) {
+ $b = *(uint8*)$p;
+ printf("%02x", $b);
+ $p -= 1;
+ }
+ printf(") %4d tx %5d ins %5d sigops took %4d ms\n", $transactions, $inputs, $sigops, (uint64) $duration / 1000);
}
diff --git a/depends/packages/expat.mk b/depends/packages/expat.mk
index 902fe43be2..41c1114be0 100644
--- a/depends/packages/expat.mk
+++ b/depends/packages/expat.mk
@@ -23,5 +23,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- rm lib/*.la
+ rm -rf share lib/*.la
endef
diff --git a/depends/packages/fontconfig.mk b/depends/packages/fontconfig.mk
index 0d5f94f380..22b5022f06 100644
--- a/depends/packages/fontconfig.mk
+++ b/depends/packages/fontconfig.mk
@@ -29,5 +29,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- rm lib/*.la
+ rm -rf var lib/*.la
endef
diff --git a/depends/packages/freetype.mk b/depends/packages/freetype.mk
index a1584608e1..aebc8a5f3b 100644
--- a/depends/packages/freetype.mk
+++ b/depends/packages/freetype.mk
@@ -23,5 +23,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- rm lib/*.la
+ rm -rf share/man lib/*.la
endef
diff --git a/depends/packages/libXau.mk b/depends/packages/libXau.mk
index 4c55c2df04..24e0e9d325 100644
--- a/depends/packages/libXau.mk
+++ b/depends/packages/libXau.mk
@@ -30,5 +30,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- rm lib/*.la
+ rm -rf share lib/*.la
endef
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 9004b064d6..12e0494ad4 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -248,7 +248,6 @@ endef
define $(package)_config_cmds
export PKG_CONFIG_SYSROOT_DIR=/ && \
export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \
- export PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig && \
cd qtbase && \
./configure -top-level $($(package)_config_opts)
endef
diff --git a/doc/descriptors.md b/doc/descriptors.md
index 3bbb626a42..57a0f99d70 100644
--- a/doc/descriptors.md
+++ b/doc/descriptors.md
@@ -139,6 +139,47 @@ Key order does not matter for `sortedmulti()`. `sortedmulti()` behaves in the sa
as `multi()` does but the keys are reordered in the resulting script such that they
are lexicographically ordered as described in BIP67.
+#### Basic multisig example
+
+For a good example of a basic M-of-N multisig between multiple participants using descriptor
+wallets and PSBTs, as well as a signing flow, see [this functional test](/test/functional/wallet_multisig_descriptor_psbt.py).
+
+Disclaimers: It is important to note that this example serves as a quick-start and is kept basic for readability. A downside of the approach
+outlined here is that each participant must maintain (and backup) two separate wallets: a signer and the corresponding multisig.
+It should also be noted that privacy best-practices are not "by default" here - participants should take care to only use the signer to sign
+transactions related to the multisig. Lastly, it is not recommended to use anything other than a Bitcoin Core descriptor wallet to serve as your
+signer(s). Other wallets, whether hardware or software, likely impose additional checks and safeguards to prevent users from signing transactions that
+could lead to loss of funds, or are deemed security hazards. Conforming to various 3rd-party checks and verifications is not in the scope of this example.
+
+The basic steps are:
+
+ 1. Every participant generates an xpub. The most straightforward way is to create a new descriptor wallet which we will refer to as
+ the participant's signer wallet. Avoid reusing this wallet for any purpose other than signing transactions from the
+ corresponding multisig we are about to create. Hint: extract the wallet's xpubs using `listdescriptors` and pick the one from the
+ `pkh` descriptor since it's least likely to be accidentally reused (legacy addresses)
+ 2. Create a watch-only descriptor wallet (blank, private keys disabled). Now the multisig is created by importing the two descriptors:
+ `wsh(sortedmulti(<M>,XPUB1/0/*,XPUB2/0/*,…,XPUBN/0/*))` and `wsh(sortedmulti(<M>,XPUB1/1/*,XPUB2/1/*,…,XPUBN/1/*))`
+ (one descriptor w/ `0` for receiving addresses and another w/ `1` for change). Every participant does this
+ 3. A receiving address is generated for the multisig. As a check to ensure step 2 was done correctly, every participant
+ should verify they get the same addresses
+ 4. Funds are sent to the resulting address
+ 5. A sending transaction from the multisig is created using `walletcreatefundedpsbt` (anyone can initiate this). It is simple to do
+ this in the GUI by going to the `Send` tab in the multisig wallet and creating an unsigned transaction (PSBT)
+ 6. At least `M` participants check the PSBT with their multisig using `decodepsbt` to verify the transaction is OK before signing it.
+ 7. (If OK) the participant signs the PSBT with their signer wallet using `walletprocesspsbt`. It is simple to do this in the GUI by
+ loading the PSBT from file and signing it
+ 8. The signed PSBTs are collected with `combinepsbt`, finalized w/ `finalizepsbt`, and then the resulting transaction is broadcasted
+ to the network. Note that any wallet (eg one of the signers or multisig) is capable of doing this.
+ 9. Checks that balances are correct after the transaction has been included in a block
+
+You may prefer a daisy chained signing flow where each participant signs the PSBT one after another until
+the PSBT has been signed `M` times and is "complete." For the most part, the steps above remain the same, except (6, 7)
+change slightly from signing the original PSBT in parallel to signing it in series. `combinepsbt` is not necessary with
+this signing flow and the last (`m`th) signer can just broadcast the PSBT after signing. Note that a parallel signing flow may be
+preferable in cases where there are more signers. This signing flow is also included in the test / Python example.
+[The test](/test/functional/wallet_multisig_descriptor_psbt.py) is meant to be documentation as much as it is a functional test, so
+it is kept as simple and readable as possible.
+
### BIP32 derived keys and chains
Most modern wallet software and hardware uses keys that are derived using
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 0a5a7066ab..a05ea93a46 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -12,6 +12,7 @@ Developer Notes
- [Generating Documentation](#generating-documentation)
- [Development tips and tricks](#development-tips-and-tricks)
- [Compiling for debugging](#compiling-for-debugging)
+ - [Show sources in debugging](#show-sources-in-debugging)
- [Compiling for gprof profiling](#compiling-for-gprof-profiling)
- [`debug.log`](#debuglog)
- [Signet, testnet, and regtest modes](#signet-testnet-and-regtest-modes)
@@ -253,6 +254,35 @@ Development tips and tricks
Run configure with `--enable-debug` to add additional compiler flags that
produce better debugging builds.
+### Show sources in debugging
+
+If you have ccache enabled, absolute paths are stripped from debug information
+with the -fdebug-prefix-map and -fmacro-prefix-map options (if supported by the
+compiler). This might break source file detection in case you move binaries
+after compilation, debug from the directory other than the project root or use
+an IDE that only supports absolute paths for debugging.
+
+There are a few possible fixes:
+
+1. Configure source file mapping.
+
+For `gdb` create or append to `.gdbinit` file:
+```
+set substitute-path ./src /path/to/project/root/src
+```
+
+For `lldb` create or append to `.lldbinit` file:
+```
+settings set target.source-map ./src /path/to/project/root/src
+```
+
+2. Add a symlink to the `./src` directory:
+```
+ln -s /path/to/project/root/src src
+```
+
+3. Use `debugedit` to modify debug information in the binary.
+
### Compiling for gprof profiling
Run configure with the `--enable-gprof` option, then make.
diff --git a/doc/psbt.md b/doc/psbt.md
index c411b31d5d..0f31cb8eba 100644
--- a/doc/psbt.md
+++ b/doc/psbt.md
@@ -92,6 +92,9 @@ hardware implementations will typically implement multiple roles simultaneously.
#### Multisig with multiple Bitcoin Core instances
+For a quick start see [Basic M-of-N multisig example using descriptor wallets and PSBTs](./descriptors.md#basic-multisig-example).
+If you are using legacy wallets feel free to continue with the example provided here.
+
Alice, Bob, and Carol want to create a 2-of-3 multisig address. They're all using
Bitcoin Core. We assume their wallets only contain the multisig funds. In case
they also have a personal wallet, this can be accomplished through the
diff --git a/doc/release-notes-23093.md b/doc/release-notes-23093.md
new file mode 100644
index 0000000000..68fbaec53c
--- /dev/null
+++ b/doc/release-notes-23093.md
@@ -0,0 +1,11 @@
+Notable changes
+===============
+
+Updated RPCs
+------------
+
+- `upgradewallet` will now automatically flush the keypool if upgrading
+from a non-HD wallet to an HD wallet, to immediately start using the
+newly-generated HD keys.
+- a new RPC `newkeypool` has been added, which will flush (entirely
+clear and refill) the keypool.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 3bf3e47169..b460cd3eb2 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -61,6 +61,12 @@ P2P and network changes
They will become eligible for address gossip after sending an ADDR, ADDRV2,
or GETADDR message. (#21528)
+Fee estimation changes
+----------------------
+
+- Fee estimation now takes the feerate of replacement (RBF) transactions into
+ account. (#22539)
+
Rescan startup parameter removed
--------------------------------
diff --git a/doc/tracing.md b/doc/tracing.md
index 87fc9603fe..57104c43a0 100644
--- a/doc/tracing.md
+++ b/doc/tracing.md
@@ -101,19 +101,12 @@ Is called *after* a block is connected to the chain. Can, for example, be used
to benchmark block connections together with `-reindex`.
Arguments passed:
-1. Block Header Hash as `pointer to C-style String` (64 characters)
+1. Block Header Hash as `pointer to unsigned chars` (i.e. 32 bytes in little-endian)
2. Block Height as `int32`
3. Transactions in the Block as `uint64`
4. Inputs spend in the Block as `int32`
5. SigOps in the Block (excluding coinbase SigOps) `uint64`
6. Time it took to connect the Block in microseconds (µs) as `uint64`
-7. Block Header Hash as `pointer to unsigned chars` (i.e. 32 bytes in little-endian)
-
-Note: The 7th argument can't be accessed by bpftrace and is purposefully chosen
-to be the block header hash as bytes. See [bpftrace argument limit] for more
-details.
-
-[bpftrace argument limit]: #bpftrace-argument-limit
## Adding tracepoints to Bitcoin Core
diff --git a/src/Makefile.am b/src/Makefile.am
index b366252ba3..9d15120b72 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -117,6 +117,7 @@ endif
BITCOIN_CORE_H = \
addrdb.h \
addrman.h \
+ addrman_impl.h \
attributes.h \
banman.h \
base58.h \
@@ -807,20 +808,8 @@ clean-local:
$(AM_V_GEN) $(WINDRES) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(CPPFLAGS) -DWINDRES_PREPROC -i $< -o $@
check-symbols: $(bin_PROGRAMS)
-if TARGET_DARWIN
- @echo "Checking macOS dynamic libraries..."
+ @echo "Running symbol and dynamic library checks..."
$(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
-endif
-
-if TARGET_WINDOWS
- @echo "Checking Windows dynamic libraries..."
- $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
-endif
-
-if TARGET_LINUX
- @echo "Checking glibc back compat..."
- $(AM_V_at) CPPFILT='$(CPPFILT)' $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
-endif
check-security: $(bin_PROGRAMS)
if HARDEN
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index f4b0b3adbe..1e3d75a8d8 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -168,10 +168,10 @@ BITCOIN_QT_H = \
qt/walletview.h \
qt/winshutdownmonitor.h
-RES_FONTS = \
+QT_RES_FONTS = \
qt/res/fonts/RobotoMono-Bold.ttf
-RES_ICONS = \
+QT_RES_ICONS = \
qt/res/icons/add.png \
qt/res/icons/address-book.png \
qt/res/icons/bitcoin.ico \
@@ -287,9 +287,9 @@ if ENABLE_WALLET
BITCOIN_QT_CPP += $(BITCOIN_QT_WALLET_CPP)
endif # ENABLE_WALLET
-RES_ANIMATION = $(wildcard $(srcdir)/qt/res/animation/spinner-*.png)
+QT_RES_ANIMATION = $(wildcard $(srcdir)/qt/res/animation/spinner-*.png)
-BITCOIN_RC = qt/res/bitcoin-qt-res.rc
+BITCOIN_QT_RC = qt/res/bitcoin-qt-res.rc
BITCOIN_QT_INCLUDES = -DQT_NO_KEYWORDS -DQT_USE_QSTRINGBUILDER
@@ -299,7 +299,7 @@ qt_libbitcoinqt_a_CXXFLAGS = $(AM_CXXFLAGS) $(QT_PIE_FLAGS)
qt_libbitcoinqt_a_OBJCXXFLAGS = $(AM_OBJCXXFLAGS) $(QT_PIE_FLAGS)
qt_libbitcoinqt_a_SOURCES = $(BITCOIN_QT_CPP) $(BITCOIN_QT_H) $(QT_FORMS_UI) \
- $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(RES_FONTS) $(RES_ICONS) $(RES_ANIMATION)
+ $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(QT_RES_FONTS) $(QT_RES_ICONS) $(QT_RES_ANIMATION)
if TARGET_DARWIN
qt_libbitcoinqt_a_SOURCES += $(BITCOIN_MM)
endif
@@ -321,7 +321,7 @@ bitcoin_qt_cxxflags = $(AM_CXXFLAGS) $(QT_PIE_FLAGS)
bitcoin_qt_sources = qt/main.cpp
if TARGET_WINDOWS
- bitcoin_qt_sources += $(BITCOIN_RC)
+ bitcoin_qt_sources += $(BITCOIN_QT_RC)
endif
bitcoin_qt_ldadd = qt/libbitcoinqt.a $(LIBBITCOIN_SERVER)
if ENABLE_WALLET
@@ -371,7 +371,7 @@ $(QT_QRC_LOCALE_CPP): $(QT_QRC_LOCALE) $(QT_QM)
$(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(RCC) -name bitcoin_locale --format-version 1 $(@D)/temp_$(<F) > $@
@rm $(@D)/temp_$(<F)
-$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(RES_FONTS) $(RES_ICONS) $(RES_ANIMATION)
+$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(QT_RES_FONTS) $(QT_RES_ICONS) $(QT_RES_ANIMATION)
@test -f $(RCC)
$(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(RCC) -name bitcoin --format-version 1 $< > $@
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index be63214c23..d70793ffa9 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -119,8 +119,9 @@ BITCOIN_TESTS =\
test/sanity_tests.cpp \
test/scheduler_tests.cpp \
test/script_p2sh_tests.cpp \
- test/script_tests.cpp \
+ test/script_parse_tests.cpp \
test/script_standard_tests.cpp \
+ test/script_tests.cpp \
test/scriptnum_tests.cpp \
test/serfloat_tests.cpp \
test/serialize_tests.cpp \
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index 94c77a6d89..bdb1fc6b2b 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -58,7 +58,7 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data
if (fileout.IsNull()) {
fileout.fclose();
remove(pathTmp);
- return error("%s: Failed to open file %s", __func__, pathTmp.string());
+ return error("%s: Failed to open file %s", __func__, fs::PathToString(pathTmp));
}
// Serialize
@@ -70,7 +70,7 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data
if (!FileCommit(fileout.Get())) {
fileout.fclose();
remove(pathTmp);
- return error("%s: Failed to flush file %s", __func__, pathTmp.string());
+ return error("%s: Failed to flush file %s", __func__, fs::PathToString(pathTmp));
}
fileout.fclose();
@@ -122,8 +122,8 @@ void DeserializeFileDB(const fs::path& path, Data& data, int version)
} // namespace
CBanDB::CBanDB(fs::path ban_list_path)
- : m_banlist_dat(ban_list_path.string() + ".dat"),
- m_banlist_json(ban_list_path.string() + ".json")
+ : m_banlist_dat(ban_list_path + ".dat"),
+ m_banlist_json(ban_list_path + ".json")
{
}
@@ -143,7 +143,7 @@ bool CBanDB::Write(const banmap_t& banSet)
bool CBanDB::Read(banmap_t& banSet)
{
if (fs::exists(m_banlist_dat)) {
- LogPrintf("banlist.dat ignored because it can only be read by " PACKAGE_NAME " version 22.x. Remove %s to silence this warning.\n", m_banlist_dat);
+ LogPrintf("banlist.dat ignored because it can only be read by " PACKAGE_NAME " version 22.x. Remove %s to silence this warning.\n", fs::quoted(fs::PathToString(m_banlist_dat)));
}
// If the JSON banlist does not exist, then recreate it
if (!fs::exists(m_banlist_json)) {
@@ -155,7 +155,7 @@ bool CBanDB::Read(banmap_t& banSet)
if (!util::ReadSettings(m_banlist_json, settings, errors)) {
for (const auto& err : errors) {
- LogPrintf("Cannot load banlist %s: %s\n", m_banlist_json.string(), err);
+ LogPrintf("Cannot load banlist %s: %s\n", fs::PathToString(m_banlist_json), err);
}
return false;
}
@@ -163,28 +163,28 @@ bool CBanDB::Read(banmap_t& banSet)
try {
BanMapFromJson(settings[JSON_KEY], banSet);
} catch (const std::runtime_error& e) {
- LogPrintf("Cannot parse banlist %s: %s\n", m_banlist_json.string(), e.what());
+ LogPrintf("Cannot parse banlist %s: %s\n", fs::PathToString(m_banlist_json), e.what());
return false;
}
return true;
}
-bool DumpPeerAddresses(const ArgsManager& args, const CAddrMan& addr)
+bool DumpPeerAddresses(const ArgsManager& args, const AddrMan& addr)
{
const auto pathAddr = args.GetDataDirNet() / "peers.dat";
return SerializeFileDB("peers", pathAddr, addr, CLIENT_VERSION);
}
-void ReadFromStream(CAddrMan& addr, CDataStream& ssPeers)
+void ReadFromStream(AddrMan& addr, CDataStream& ssPeers)
{
DeserializeDB(ssPeers, addr, false);
}
-std::optional<bilingual_str> LoadAddrman(const std::vector<bool>& asmap, const ArgsManager& args, std::unique_ptr<CAddrMan>& addrman)
+std::optional<bilingual_str> LoadAddrman(const std::vector<bool>& asmap, const ArgsManager& args, std::unique_ptr<AddrMan>& addrman)
{
auto check_addrman = std::clamp<int32_t>(args.GetIntArg("-checkaddrman", DEFAULT_ADDRMAN_CONSISTENCY_CHECKS), 0, 1000000);
- addrman = std::make_unique<CAddrMan>(asmap, /* deterministic */ false, /* consistency_check_ratio */ check_addrman);
+ addrman = std::make_unique<AddrMan>(asmap, /* deterministic */ false, /* consistency_check_ratio */ check_addrman);
int64_t nStart = GetTimeMillis();
const auto path_addr{args.GetDataDirNet() / "peers.dat"};
@@ -193,13 +193,13 @@ std::optional<bilingual_str> LoadAddrman(const std::vector<bool>& asmap, const A
LogPrintf("Loaded %i addresses from peers.dat %dms\n", addrman->size(), GetTimeMillis() - nStart);
} catch (const DbNotFoundError&) {
// Addrman can be in an inconsistent state after failure, reset it
- addrman = std::make_unique<CAddrMan>(asmap, /* deterministic */ false, /* consistency_check_ratio */ check_addrman);
- LogPrintf("Creating peers.dat because the file was not found (%s)\n", path_addr);
+ addrman = std::make_unique<AddrMan>(asmap, /* deterministic */ false, /* consistency_check_ratio */ check_addrman);
+ LogPrintf("Creating peers.dat because the file was not found (%s)\n", fs::quoted(fs::PathToString(path_addr)));
DumpPeerAddresses(args, *addrman);
} catch (const std::exception& e) {
addrman = nullptr;
return strprintf(_("Invalid or corrupt peers.dat (%s). If you believe this is a bug, please report it to %s. As a workaround, you can move the file (%s) out of the way (rename, move, or delete) to have a new one created on the next start."),
- e.what(), PACKAGE_BUGREPORT, path_addr);
+ e.what(), PACKAGE_BUGREPORT, fs::quoted(fs::PathToString(path_addr)));
}
return std::nullopt;
}
@@ -215,7 +215,7 @@ std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path)
std::vector<CAddress> anchors;
try {
DeserializeFileDB(anchors_db_path, anchors, CLIENT_VERSION | ADDRV2_FORMAT);
- LogPrintf("Loaded %i addresses from %s\n", anchors.size(), anchors_db_path.filename());
+ LogPrintf("Loaded %i addresses from %s\n", anchors.size(), fs::quoted(fs::PathToString(anchors_db_path.filename())));
} catch (const std::exception&) {
anchors.clear();
}
diff --git a/src/addrdb.h b/src/addrdb.h
index 33cc1f9204..19be4b5bb4 100644
--- a/src/addrdb.h
+++ b/src/addrdb.h
@@ -14,14 +14,14 @@
#include <vector>
class ArgsManager;
-class CAddrMan;
+class AddrMan;
class CAddress;
class CDataStream;
struct bilingual_str;
-bool DumpPeerAddresses(const ArgsManager& args, const CAddrMan& addr);
+bool DumpPeerAddresses(const ArgsManager& args, const AddrMan& addr);
/** Only used by tests. */
-void ReadFromStream(CAddrMan& addr, CDataStream& ssPeers);
+void ReadFromStream(AddrMan& addr, CDataStream& ssPeers);
/** Access to the banlist database (banlist.json) */
class CBanDB
@@ -48,7 +48,7 @@ public:
};
/** Returns an error string on failure */
-std::optional<bilingual_str> LoadAddrman(const std::vector<bool>& asmap, const ArgsManager& args, std::unique_ptr<CAddrMan>& addrman);
+std::optional<bilingual_str> LoadAddrman(const std::vector<bool>& asmap, const ArgsManager& args, std::unique_ptr<AddrMan>& addrman);
/**
* Dump the anchor IP address database (anchors.dat)
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 7c6b8fe64d..c364a7710b 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -4,25 +4,27 @@
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
#include <addrman.h>
+#include <addrman_impl.h>
-#include <clientversion.h>
#include <hash.h>
-#include <logging.h>
#include <netaddress.h>
+#include <protocol.h>
+#include <random.h>
#include <serialize.h>
#include <streams.h>
+#include <timedata.h>
+#include <tinyformat.h>
+#include <uint256.h>
#include <util/check.h>
#include <cmath>
#include <optional>
-#include <unordered_map>
-#include <unordered_set>
/** Over how many buckets entries with tried addresses from a single group (/16 for IPv4) are spread */
static constexpr uint32_t ADDRMAN_TRIED_BUCKETS_PER_GROUP{8};
/** Over how many buckets entries with new addresses originating from a single group are spread */
static constexpr uint32_t ADDRMAN_NEW_BUCKETS_PER_SOURCE_GROUP{64};
-/** Maximum number of times an address can be added to the new table */
+/** Maximum number of times an address can occur in the new table */
static constexpr int32_t ADDRMAN_NEW_BUCKETS_PER_ADDRESS{8};
/** How old addresses can maximally be */
static constexpr int64_t ADDRMAN_HORIZON_DAYS{30};
@@ -39,7 +41,7 @@ static constexpr size_t ADDRMAN_SET_TRIED_COLLISION_SIZE{10};
/** The maximum time we'll spend trying to resolve a tried table collision, in seconds */
static constexpr int64_t ADDRMAN_TEST_WINDOW{40*60}; // 40 minutes
-int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool> &asmap) const
+int AddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool>& asmap) const
{
uint64_t hash1 = (CHashWriter(SER_GETHASH, 0) << nKey << GetKey()).GetCheapHash();
uint64_t hash2 = (CHashWriter(SER_GETHASH, 0) << nKey << GetGroup(asmap) << (hash1 % ADDRMAN_TRIED_BUCKETS_PER_GROUP)).GetCheapHash();
@@ -49,7 +51,7 @@ int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool> &asma
return tried_bucket;
}
-int CAddrInfo::GetNewBucket(const uint256& nKey, const CNetAddr& src, const std::vector<bool> &asmap) const
+int AddrInfo::GetNewBucket(const uint256& nKey, const CNetAddr& src, const std::vector<bool>& asmap) const
{
std::vector<unsigned char> vchSourceGroupKey = src.GetGroup(asmap);
uint64_t hash1 = (CHashWriter(SER_GETHASH, 0) << nKey << GetGroup(asmap) << vchSourceGroupKey).GetCheapHash();
@@ -60,13 +62,13 @@ int CAddrInfo::GetNewBucket(const uint256& nKey, const CNetAddr& src, const std:
return new_bucket;
}
-int CAddrInfo::GetBucketPosition(const uint256 &nKey, bool fNew, int nBucket) const
+int AddrInfo::GetBucketPosition(const uint256& nKey, bool fNew, int nBucket) const
{
uint64_t hash1 = (CHashWriter(SER_GETHASH, 0) << nKey << (fNew ? uint8_t{'N'} : uint8_t{'K'}) << nBucket << GetKey()).GetCheapHash();
return hash1 % ADDRMAN_BUCKET_SIZE;
}
-bool CAddrInfo::IsTerrible(int64_t nNow) const
+bool AddrInfo::IsTerrible(int64_t nNow) const
{
if (nLastTry && nLastTry >= nNow - 60) // never remove things tried in the last minute
return false;
@@ -86,7 +88,7 @@ bool CAddrInfo::IsTerrible(int64_t nNow) const
return false;
}
-double CAddrInfo::GetChance(int64_t nNow) const
+double AddrInfo::GetChance(int64_t nNow) const
{
double fChance = 1.0;
int64_t nSinceLastTry = std::max<int64_t>(nNow - nLastTry, 0);
@@ -101,7 +103,7 @@ double CAddrInfo::GetChance(int64_t nNow) const
return fChance;
}
-CAddrMan::CAddrMan(std::vector<bool> asmap, bool deterministic, int32_t consistency_check_ratio)
+AddrManImpl::AddrManImpl(std::vector<bool>&& asmap, bool deterministic, int32_t consistency_check_ratio)
: insecure_rand{deterministic}
, nKey{deterministic ? uint256{1} : insecure_rand.rand256()}
, m_consistency_check_ratio{consistency_check_ratio}
@@ -119,8 +121,13 @@ CAddrMan::CAddrMan(std::vector<bool> asmap, bool deterministic, int32_t consiste
}
}
+AddrManImpl::~AddrManImpl()
+{
+ nKey.SetNull();
+}
+
template <typename Stream>
-void CAddrMan::Serialize(Stream& s_) const
+void AddrManImpl::Serialize(Stream& s_) const
{
LOCK(cs);
@@ -183,7 +190,7 @@ void CAddrMan::Serialize(Stream& s_) const
int nIds = 0;
for (const auto& entry : mapInfo) {
mapUnkIds[entry.first] = nIds;
- const CAddrInfo &info = entry.second;
+ const AddrInfo& info = entry.second;
if (info.nRefCount) {
assert(nIds != nNew); // this means nNew was wrong, oh ow
s << info;
@@ -192,7 +199,7 @@ void CAddrMan::Serialize(Stream& s_) const
}
nIds = 0;
for (const auto& entry : mapInfo) {
- const CAddrInfo &info = entry.second;
+ const AddrInfo& info = entry.second;
if (info.fInTried) {
assert(nIds != nTried); // this means nTried was wrong, oh ow
s << info;
@@ -223,7 +230,7 @@ void CAddrMan::Serialize(Stream& s_) const
}
template <typename Stream>
-void CAddrMan::Unserialize(Stream& s_)
+void AddrManImpl::Unserialize(Stream& s_)
{
LOCK(cs);
@@ -262,21 +269,21 @@ void CAddrMan::Unserialize(Stream& s_)
if (nNew > ADDRMAN_NEW_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE || nNew < 0) {
throw std::ios_base::failure(
- strprintf("Corrupt CAddrMan serialization: nNew=%d, should be in [0, %d]",
+ strprintf("Corrupt AddrMan serialization: nNew=%d, should be in [0, %d]",
nNew,
ADDRMAN_NEW_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE));
}
if (nTried > ADDRMAN_TRIED_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE || nTried < 0) {
throw std::ios_base::failure(
- strprintf("Corrupt CAddrMan serialization: nTried=%d, should be in [0, %d]",
+ strprintf("Corrupt AddrMan serialization: nTried=%d, should be in [0, %d]",
nTried,
ADDRMAN_TRIED_BUCKET_COUNT * ADDRMAN_BUCKET_SIZE));
}
// Deserialize entries from the new table.
for (int n = 0; n < nNew; n++) {
- CAddrInfo &info = mapInfo[n];
+ AddrInfo& info = mapInfo[n];
s >> info;
mapAddr[info] = n;
info.nRandomPos = vRandom.size();
@@ -287,7 +294,7 @@ void CAddrMan::Unserialize(Stream& s_)
// Deserialize entries from the tried table.
int nLost = 0;
for (int n = 0; n < nTried; n++) {
- CAddrInfo info;
+ AddrInfo info;
s >> info;
int nKBucket = info.GetTriedBucket(nKey, m_asmap);
int nKBucketPos = info.GetBucketPosition(nKey, false, nKBucket);
@@ -344,7 +351,7 @@ void CAddrMan::Unserialize(Stream& s_)
for (auto bucket_entry : bucket_entries) {
int bucket{bucket_entry.first};
const int entry_index{bucket_entry.second};
- CAddrInfo& info = mapInfo[entry_index];
+ AddrInfo& info = mapInfo[entry_index];
// Don't store the entry in the new bucket if it's not a valid address for our addrman
if (!info.IsValid()) continue;
@@ -394,16 +401,7 @@ void CAddrMan::Unserialize(Stream& s_)
}
}
-// explicit instantiation
-template void CAddrMan::Serialize(CHashWriter& s) const;
-template void CAddrMan::Serialize(CAutoFile& s) const;
-template void CAddrMan::Serialize(CDataStream& s) const;
-template void CAddrMan::Unserialize(CAutoFile& s);
-template void CAddrMan::Unserialize(CHashVerifier<CAutoFile>& s);
-template void CAddrMan::Unserialize(CDataStream& s);
-template void CAddrMan::Unserialize(CHashVerifier<CDataStream>& s);
-
-CAddrInfo* CAddrMan::Find(const CNetAddr& addr, int* pnId)
+AddrInfo* AddrManImpl::Find(const CNetAddr& addr, int* pnId)
{
AssertLockHeld(cs);
@@ -418,12 +416,12 @@ CAddrInfo* CAddrMan::Find(const CNetAddr& addr, int* pnId)
return nullptr;
}
-CAddrInfo* CAddrMan::Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId)
+AddrInfo* AddrManImpl::Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId)
{
AssertLockHeld(cs);
int nId = nIdCount++;
- mapInfo[nId] = CAddrInfo(addr, addrSource);
+ mapInfo[nId] = AddrInfo(addr, addrSource);
mapAddr[addr] = nId;
mapInfo[nId].nRandomPos = vRandom.size();
vRandom.push_back(nId);
@@ -432,7 +430,7 @@ CAddrInfo* CAddrMan::Create(const CAddress& addr, const CNetAddr& addrSource, in
return &mapInfo[nId];
}
-void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) const
+void AddrManImpl::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) const
{
AssertLockHeld(cs);
@@ -456,12 +454,12 @@ void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) const
vRandom[nRndPos2] = nId1;
}
-void CAddrMan::Delete(int nId)
+void AddrManImpl::Delete(int nId)
{
AssertLockHeld(cs);
assert(mapInfo.count(nId) != 0);
- CAddrInfo& info = mapInfo[nId];
+ AddrInfo& info = mapInfo[nId];
assert(!info.fInTried);
assert(info.nRefCount == 0);
@@ -472,14 +470,14 @@ void CAddrMan::Delete(int nId)
nNew--;
}
-void CAddrMan::ClearNew(int nUBucket, int nUBucketPos)
+void AddrManImpl::ClearNew(int nUBucket, int nUBucketPos)
{
AssertLockHeld(cs);
// if there is an entry in the specified bucket, delete it.
if (vvNew[nUBucket][nUBucketPos] != -1) {
int nIdDelete = vvNew[nUBucket][nUBucketPos];
- CAddrInfo& infoDelete = mapInfo[nIdDelete];
+ AddrInfo& infoDelete = mapInfo[nIdDelete];
assert(infoDelete.nRefCount > 0);
infoDelete.nRefCount--;
vvNew[nUBucket][nUBucketPos] = -1;
@@ -489,7 +487,7 @@ void CAddrMan::ClearNew(int nUBucket, int nUBucketPos)
}
}
-void CAddrMan::MakeTried(CAddrInfo& info, int nId)
+void AddrManImpl::MakeTried(AddrInfo& info, int nId)
{
AssertLockHeld(cs);
@@ -517,7 +515,7 @@ void CAddrMan::MakeTried(CAddrInfo& info, int nId)
// find an item to evict
int nIdEvict = vvTried[nKBucket][nKBucketPos];
assert(mapInfo.count(nIdEvict) == 1);
- CAddrInfo& infoOld = mapInfo[nIdEvict];
+ AddrInfo& infoOld = mapInfo[nIdEvict];
// Remove the to-be-evicted item from the tried set.
infoOld.fInTried = false;
@@ -542,7 +540,7 @@ void CAddrMan::MakeTried(CAddrInfo& info, int nId)
info.fInTried = true;
}
-void CAddrMan::Good_(const CService& addr, bool test_before_evict, int64_t nTime)
+void AddrManImpl::Good_(const CService& addr, bool test_before_evict, int64_t nTime)
{
AssertLockHeld(cs);
@@ -550,13 +548,13 @@ void CAddrMan::Good_(const CService& addr, bool test_before_evict, int64_t nTime
nLastGood = nTime;
- CAddrInfo* pinfo = Find(addr, &nId);
+ AddrInfo* pinfo = Find(addr, &nId);
// if not found, bail out
if (!pinfo)
return;
- CAddrInfo& info = *pinfo;
+ AddrInfo& info = *pinfo;
// check whether we are talking about the exact same CService (including same port)
if (info != addr)
@@ -598,7 +596,7 @@ void CAddrMan::Good_(const CService& addr, bool test_before_evict, int64_t nTime
}
}
-bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimePenalty)
+bool AddrManImpl::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimePenalty)
{
AssertLockHeld(cs);
@@ -607,7 +605,7 @@ bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimeP
bool fNew = false;
int nId;
- CAddrInfo* pinfo = Find(addr, &nId);
+ AddrInfo* pinfo = Find(addr, &nId);
// Do not set a penalty for a source's self-announcement
if (addr == source) {
@@ -654,7 +652,7 @@ bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimeP
if (vvNew[nUBucket][nUBucketPos] != nId) {
bool fInsert = vvNew[nUBucket][nUBucketPos] == -1;
if (!fInsert) {
- CAddrInfo& infoExisting = mapInfo[vvNew[nUBucket][nUBucketPos]];
+ AddrInfo& infoExisting = mapInfo[vvNew[nUBucket][nUBucketPos]];
if (infoExisting.IsTerrible() || (infoExisting.nRefCount > 1 && pinfo->nRefCount == 0)) {
// Overwrite the existing new table entry.
fInsert = true;
@@ -673,17 +671,17 @@ bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimeP
return fNew;
}
-void CAddrMan::Attempt_(const CService& addr, bool fCountFailure, int64_t nTime)
+void AddrManImpl::Attempt_(const CService& addr, bool fCountFailure, int64_t nTime)
{
AssertLockHeld(cs);
- CAddrInfo* pinfo = Find(addr);
+ AddrInfo* pinfo = Find(addr);
// if not found, bail out
if (!pinfo)
return;
- CAddrInfo& info = *pinfo;
+ AddrInfo& info = *pinfo;
// check whether we are talking about the exact same CService (including same port)
if (info != addr)
@@ -697,15 +695,13 @@ void CAddrMan::Attempt_(const CService& addr, bool fCountFailure, int64_t nTime)
}
}
-CAddrInfo CAddrMan::Select_(bool newOnly) const
+std::pair<CAddress, int64_t> AddrManImpl::Select_(bool newOnly) const
{
AssertLockHeld(cs);
- if (vRandom.empty())
- return CAddrInfo();
+ if (vRandom.empty()) return {};
- if (newOnly && nNew == 0)
- return CAddrInfo();
+ if (newOnly && nNew == 0) return {};
// Use a 50% chance for choosing between tried and new table entries.
if (!newOnly &&
@@ -722,9 +718,10 @@ CAddrInfo CAddrMan::Select_(bool newOnly) const
int nId = vvTried[nKBucket][nKBucketPos];
const auto it_found{mapInfo.find(nId)};
assert(it_found != mapInfo.end());
- const CAddrInfo& info{it_found->second};
- if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30))
- return info;
+ const AddrInfo& info{it_found->second};
+ if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30)) {
+ return {info, info.nLastTry};
+ }
fChanceFactor *= 1.2;
}
} else {
@@ -740,118 +737,16 @@ CAddrInfo CAddrMan::Select_(bool newOnly) const
int nId = vvNew[nUBucket][nUBucketPos];
const auto it_found{mapInfo.find(nId)};
assert(it_found != mapInfo.end());
- const CAddrInfo& info{it_found->second};
- if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30))
- return info;
- fChanceFactor *= 1.2;
- }
- }
-}
-
-void CAddrMan::Check() const
-{
- AssertLockHeld(cs);
-
- // Run consistency checks 1 in m_consistency_check_ratio times if enabled
- if (m_consistency_check_ratio == 0) return;
- if (insecure_rand.randrange(m_consistency_check_ratio) >= 1) return;
-
- const int err{ForceCheckAddrman()};
- if (err) {
- LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err);
- assert(false);
- }
-}
-
-int CAddrMan::ForceCheckAddrman() const
-{
- AssertLockHeld(cs);
-
- LogPrint(BCLog::ADDRMAN, "Addrman checks started: new %i, tried %i, total %u\n", nNew, nTried, vRandom.size());
-
- std::unordered_set<int> setTried;
- std::unordered_map<int, int> mapNew;
-
- if (vRandom.size() != (size_t)(nTried + nNew))
- return -7;
-
- for (const auto& entry : mapInfo) {
- int n = entry.first;
- const CAddrInfo& info = entry.second;
- if (info.fInTried) {
- if (!info.nLastSuccess)
- return -1;
- if (info.nRefCount)
- return -2;
- setTried.insert(n);
- } else {
- if (info.nRefCount < 0 || info.nRefCount > ADDRMAN_NEW_BUCKETS_PER_ADDRESS)
- return -3;
- if (!info.nRefCount)
- return -4;
- mapNew[n] = info.nRefCount;
- }
- const auto it{mapAddr.find(info)};
- if (it == mapAddr.end() || it->second != n) {
- return -5;
- }
- if (info.nRandomPos < 0 || (size_t)info.nRandomPos >= vRandom.size() || vRandom[info.nRandomPos] != n)
- return -14;
- if (info.nLastTry < 0)
- return -6;
- if (info.nLastSuccess < 0)
- return -8;
- }
-
- if (setTried.size() != (size_t)nTried)
- return -9;
- if (mapNew.size() != (size_t)nNew)
- return -10;
-
- for (int n = 0; n < ADDRMAN_TRIED_BUCKET_COUNT; n++) {
- for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
- if (vvTried[n][i] != -1) {
- if (!setTried.count(vvTried[n][i]))
- return -11;
- const auto it{mapInfo.find(vvTried[n][i])};
- if (it == mapInfo.end() || it->second.GetTriedBucket(nKey, m_asmap) != n) {
- return -17;
- }
- if (it->second.GetBucketPosition(nKey, false, n) != i) {
- return -18;
- }
- setTried.erase(vvTried[n][i]);
- }
- }
- }
-
- for (int n = 0; n < ADDRMAN_NEW_BUCKET_COUNT; n++) {
- for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
- if (vvNew[n][i] != -1) {
- if (!mapNew.count(vvNew[n][i]))
- return -12;
- const auto it{mapInfo.find(vvNew[n][i])};
- if (it == mapInfo.end() || it->second.GetBucketPosition(nKey, true, n) != i) {
- return -19;
- }
- if (--mapNew[vvNew[n][i]] == 0)
- mapNew.erase(vvNew[n][i]);
+ const AddrInfo& info{it_found->second};
+ if (insecure_rand.randbits(30) < fChanceFactor * info.GetChance() * (1 << 30)) {
+ return {info, info.nLastTry};
}
+ fChanceFactor *= 1.2;
}
}
-
- if (setTried.size())
- return -13;
- if (mapNew.size())
- return -15;
- if (nKey.IsNull())
- return -16;
-
- LogPrint(BCLog::ADDRMAN, "Addrman checks completed successfully\n");
- return 0;
}
-void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct, std::optional<Network> network) const
+std::vector<CAddress> AddrManImpl::GetAddr_(size_t max_addresses, size_t max_pct, std::optional<Network> network) const
{
AssertLockHeld(cs);
@@ -865,8 +760,9 @@ void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size
// gather a list of random nodes, skipping those of low quality
const int64_t now{GetAdjustedTime()};
+ std::vector<CAddress> addresses;
for (unsigned int n = 0; n < vRandom.size(); n++) {
- if (vAddr.size() >= nNodes)
+ if (addresses.size() >= nNodes)
break;
int nRndPos = insecure_rand.randrange(vRandom.size() - n) + n;
@@ -874,7 +770,7 @@ void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size
const auto it{mapInfo.find(vRandom[n])};
assert(it != mapInfo.end());
- const CAddrInfo& ai{it->second};
+ const AddrInfo& ai{it->second};
// Filter by network (optional)
if (network != std::nullopt && ai.GetNetClass() != network) continue;
@@ -882,21 +778,23 @@ void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size
// Filter for quality
if (ai.IsTerrible(now)) continue;
- vAddr.push_back(ai);
+ addresses.push_back(ai);
}
+
+ return addresses;
}
-void CAddrMan::Connected_(const CService& addr, int64_t nTime)
+void AddrManImpl::Connected_(const CService& addr, int64_t nTime)
{
AssertLockHeld(cs);
- CAddrInfo* pinfo = Find(addr);
+ AddrInfo* pinfo = Find(addr);
// if not found, bail out
if (!pinfo)
return;
- CAddrInfo& info = *pinfo;
+ AddrInfo& info = *pinfo;
// check whether we are talking about the exact same CService (including same port)
if (info != addr)
@@ -908,17 +806,17 @@ void CAddrMan::Connected_(const CService& addr, int64_t nTime)
info.nTime = nTime;
}
-void CAddrMan::SetServices_(const CService& addr, ServiceFlags nServices)
+void AddrManImpl::SetServices_(const CService& addr, ServiceFlags nServices)
{
AssertLockHeld(cs);
- CAddrInfo* pinfo = Find(addr);
+ AddrInfo* pinfo = Find(addr);
// if not found, bail out
if (!pinfo)
return;
- CAddrInfo& info = *pinfo;
+ AddrInfo& info = *pinfo;
// check whether we are talking about the exact same CService (including same port)
if (info != addr)
@@ -928,7 +826,7 @@ void CAddrMan::SetServices_(const CService& addr, ServiceFlags nServices)
info.nServices = nServices;
}
-void CAddrMan::ResolveCollisions_()
+void AddrManImpl::ResolveCollisions_()
{
AssertLockHeld(cs);
@@ -941,7 +839,7 @@ void CAddrMan::ResolveCollisions_()
if (mapInfo.count(id_new) != 1) {
erase_collision = true;
} else {
- CAddrInfo& info_new = mapInfo[id_new];
+ AddrInfo& info_new = mapInfo[id_new];
// Which tried bucket to move the entry to.
int tried_bucket = info_new.GetTriedBucket(nKey, m_asmap);
@@ -952,7 +850,7 @@ void CAddrMan::ResolveCollisions_()
// Get the to-be-evicted address that is being tested
int id_old = vvTried[tried_bucket][tried_bucket_pos];
- CAddrInfo& info_old = mapInfo[id_old];
+ AddrInfo& info_old = mapInfo[id_old];
// Has successfully connected in last X hours
if (GetAdjustedTime() - info_old.nLastSuccess < ADDRMAN_REPLACEMENT_HOURS*(60*60)) {
@@ -989,11 +887,11 @@ void CAddrMan::ResolveCollisions_()
}
}
-CAddrInfo CAddrMan::SelectTriedCollision_()
+std::pair<CAddress, int64_t> AddrManImpl::SelectTriedCollision_()
{
AssertLockHeld(cs);
- if (m_tried_collisions.size() == 0) return CAddrInfo();
+ if (m_tried_collisions.size() == 0) return {};
std::set<int>::iterator it = m_tried_collisions.begin();
@@ -1004,16 +902,291 @@ CAddrInfo CAddrMan::SelectTriedCollision_()
// If id_new not found in mapInfo remove it from m_tried_collisions
if (mapInfo.count(id_new) != 1) {
m_tried_collisions.erase(it);
- return CAddrInfo();
+ return {};
}
- const CAddrInfo& newInfo = mapInfo[id_new];
+ const AddrInfo& newInfo = mapInfo[id_new];
// which tried bucket to move the entry to
int tried_bucket = newInfo.GetTriedBucket(nKey, m_asmap);
int tried_bucket_pos = newInfo.GetBucketPosition(nKey, false, tried_bucket);
- int id_old = vvTried[tried_bucket][tried_bucket_pos];
+ const AddrInfo& info_old = mapInfo[vvTried[tried_bucket][tried_bucket_pos]];
+ return {info_old, info_old.nLastTry};
+}
+
+void AddrManImpl::Check() const
+{
+ AssertLockHeld(cs);
+
+ // Run consistency checks 1 in m_consistency_check_ratio times if enabled
+ if (m_consistency_check_ratio == 0) return;
+ if (insecure_rand.randrange(m_consistency_check_ratio) >= 1) return;
+
+ const int err{ForceCheckAddrman()};
+ if (err) {
+ LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err);
+ assert(false);
+ }
+}
+
+int AddrManImpl::ForceCheckAddrman() const
+{
+ AssertLockHeld(cs);
+
+ LogPrint(BCLog::ADDRMAN, "Addrman checks started: new %i, tried %i, total %u\n", nNew, nTried, vRandom.size());
+
+ std::unordered_set<int> setTried;
+ std::unordered_map<int, int> mapNew;
+
+ if (vRandom.size() != (size_t)(nTried + nNew))
+ return -7;
+
+ for (const auto& entry : mapInfo) {
+ int n = entry.first;
+ const AddrInfo& info = entry.second;
+ if (info.fInTried) {
+ if (!info.nLastSuccess)
+ return -1;
+ if (info.nRefCount)
+ return -2;
+ setTried.insert(n);
+ } else {
+ if (info.nRefCount < 0 || info.nRefCount > ADDRMAN_NEW_BUCKETS_PER_ADDRESS)
+ return -3;
+ if (!info.nRefCount)
+ return -4;
+ mapNew[n] = info.nRefCount;
+ }
+ const auto it{mapAddr.find(info)};
+ if (it == mapAddr.end() || it->second != n) {
+ return -5;
+ }
+ if (info.nRandomPos < 0 || (size_t)info.nRandomPos >= vRandom.size() || vRandom[info.nRandomPos] != n)
+ return -14;
+ if (info.nLastTry < 0)
+ return -6;
+ if (info.nLastSuccess < 0)
+ return -8;
+ }
+
+ if (setTried.size() != (size_t)nTried)
+ return -9;
+ if (mapNew.size() != (size_t)nNew)
+ return -10;
+
+ for (int n = 0; n < ADDRMAN_TRIED_BUCKET_COUNT; n++) {
+ for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
+ if (vvTried[n][i] != -1) {
+ if (!setTried.count(vvTried[n][i]))
+ return -11;
+ const auto it{mapInfo.find(vvTried[n][i])};
+ if (it == mapInfo.end() || it->second.GetTriedBucket(nKey, m_asmap) != n) {
+ return -17;
+ }
+ if (it->second.GetBucketPosition(nKey, false, n) != i) {
+ return -18;
+ }
+ setTried.erase(vvTried[n][i]);
+ }
+ }
+ }
+
+ for (int n = 0; n < ADDRMAN_NEW_BUCKET_COUNT; n++) {
+ for (int i = 0; i < ADDRMAN_BUCKET_SIZE; i++) {
+ if (vvNew[n][i] != -1) {
+ if (!mapNew.count(vvNew[n][i]))
+ return -12;
+ const auto it{mapInfo.find(vvNew[n][i])};
+ if (it == mapInfo.end() || it->second.GetBucketPosition(nKey, true, n) != i) {
+ return -19;
+ }
+ if (--mapNew[vvNew[n][i]] == 0)
+ mapNew.erase(vvNew[n][i]);
+ }
+ }
+ }
+
+ if (setTried.size())
+ return -13;
+ if (mapNew.size())
+ return -15;
+ if (nKey.IsNull())
+ return -16;
+
+ LogPrint(BCLog::ADDRMAN, "Addrman checks completed successfully\n");
+ return 0;
+}
+
+size_t AddrManImpl::size() const
+{
+ LOCK(cs); // TODO: Cache this in an atomic to avoid this overhead
+ return vRandom.size();
+}
+
+bool AddrManImpl::Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, int64_t nTimePenalty)
+{
+ LOCK(cs);
+ int nAdd = 0;
+ Check();
+ for (std::vector<CAddress>::const_iterator it = vAddr.begin(); it != vAddr.end(); it++)
+ nAdd += Add_(*it, source, nTimePenalty) ? 1 : 0;
+ Check();
+ if (nAdd) {
+ LogPrint(BCLog::ADDRMAN, "Added %i addresses from %s: %i tried, %i new\n", nAdd, source.ToString(), nTried, nNew);
+ }
+ return nAdd > 0;
+}
+
+void AddrManImpl::Good(const CService& addr, int64_t nTime)
+{
+ LOCK(cs);
+ Check();
+ Good_(addr, /* test_before_evict */ true, nTime);
+ Check();
+}
- return mapInfo[id_old];
+void AddrManImpl::Attempt(const CService& addr, bool fCountFailure, int64_t nTime)
+{
+ LOCK(cs);
+ Check();
+ Attempt_(addr, fCountFailure, nTime);
+ Check();
+}
+
+void AddrManImpl::ResolveCollisions()
+{
+ LOCK(cs);
+ Check();
+ ResolveCollisions_();
+ Check();
+}
+
+std::pair<CAddress, int64_t> AddrManImpl::SelectTriedCollision()
+{
+ LOCK(cs);
+ Check();
+ const auto ret = SelectTriedCollision_();
+ Check();
+ return ret;
+}
+
+std::pair<CAddress, int64_t> AddrManImpl::Select(bool newOnly) const
+{
+ LOCK(cs);
+ Check();
+ const auto addrRet = Select_(newOnly);
+ Check();
+ return addrRet;
+}
+
+std::vector<CAddress> AddrManImpl::GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network) const
+{
+ LOCK(cs);
+ Check();
+ const auto addresses = GetAddr_(max_addresses, max_pct, network);
+ Check();
+ return addresses;
+}
+
+void AddrManImpl::Connected(const CService& addr, int64_t nTime)
+{
+ LOCK(cs);
+ Check();
+ Connected_(addr, nTime);
+ Check();
+}
+
+void AddrManImpl::SetServices(const CService& addr, ServiceFlags nServices)
+{
+ LOCK(cs);
+ Check();
+ SetServices_(addr, nServices);
+ Check();
+}
+
+const std::vector<bool>& AddrManImpl::GetAsmap() const
+{
+ return m_asmap;
+}
+
+AddrMan::AddrMan(std::vector<bool> asmap, bool deterministic, int32_t consistency_check_ratio)
+ : m_impl(std::make_unique<AddrManImpl>(std::move(asmap), deterministic, consistency_check_ratio)) {}
+
+AddrMan::~AddrMan() = default;
+
+template <typename Stream>
+void AddrMan::Serialize(Stream& s_) const
+{
+ m_impl->Serialize<Stream>(s_);
+}
+
+template <typename Stream>
+void AddrMan::Unserialize(Stream& s_)
+{
+ m_impl->Unserialize<Stream>(s_);
+}
+
+// explicit instantiation
+template void AddrMan::Serialize(CHashWriter& s) const;
+template void AddrMan::Serialize(CAutoFile& s) const;
+template void AddrMan::Serialize(CDataStream& s) const;
+template void AddrMan::Unserialize(CAutoFile& s);
+template void AddrMan::Unserialize(CHashVerifier<CAutoFile>& s);
+template void AddrMan::Unserialize(CDataStream& s);
+template void AddrMan::Unserialize(CHashVerifier<CDataStream>& s);
+
+size_t AddrMan::size() const
+{
+ return m_impl->size();
+}
+
+bool AddrMan::Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, int64_t nTimePenalty)
+{
+ return m_impl->Add(vAddr, source, nTimePenalty);
+}
+
+void AddrMan::Good(const CService& addr, int64_t nTime)
+{
+ m_impl->Good(addr, nTime);
+}
+
+void AddrMan::Attempt(const CService& addr, bool fCountFailure, int64_t nTime)
+{
+ m_impl->Attempt(addr, fCountFailure, nTime);
+}
+
+void AddrMan::ResolveCollisions()
+{
+ m_impl->ResolveCollisions();
+}
+
+std::pair<CAddress, int64_t> AddrMan::SelectTriedCollision()
+{
+ return m_impl->SelectTriedCollision();
+}
+
+std::pair<CAddress, int64_t> AddrMan::Select(bool newOnly) const
+{
+ return m_impl->Select(newOnly);
+}
+
+std::vector<CAddress> AddrMan::GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network) const
+{
+ return m_impl->GetAddr(max_addresses, max_pct, network);
+}
+
+void AddrMan::Connected(const CService& addr, int64_t nTime)
+{
+ m_impl->Connected(addr, nTime);
+}
+
+void AddrMan::SetServices(const CService& addr, ServiceFlags nServices)
+{
+ m_impl->SetServices(addr, nServices);
+}
+
+const std::vector<bool>& AddrMan::GetAsmap() const
+{
+ return m_impl->GetAsmap();
}
diff --git a/src/addrman.h b/src/addrman.h
index 7dd8528bef..174ab4f811 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -6,94 +6,22 @@
#ifndef BITCOIN_ADDRMAN_H
#define BITCOIN_ADDRMAN_H
-#include <fs.h>
-#include <logging.h>
#include <netaddress.h>
#include <protocol.h>
-#include <sync.h>
+#include <streams.h>
#include <timedata.h>
#include <cstdint>
+#include <memory>
#include <optional>
-#include <set>
-#include <unordered_map>
+#include <utility>
#include <vector>
+class AddrManImpl;
+
/** Default for -checkaddrman */
static constexpr int32_t DEFAULT_ADDRMAN_CONSISTENCY_CHECKS{0};
-/**
- * Extended statistics about a CAddress
- */
-class CAddrInfo : public CAddress
-{
-public:
- //! last try whatsoever by us (memory only)
- int64_t nLastTry{0};
-
- //! last counted attempt (memory only)
- int64_t nLastCountAttempt{0};
-
-private:
- //! where knowledge about this address first came from
- CNetAddr source;
-
- //! last successful connection by us
- int64_t nLastSuccess{0};
-
- //! connection attempts since last successful attempt
- int nAttempts{0};
-
- //! reference count in new sets (memory only)
- int nRefCount{0};
-
- //! in tried set? (memory only)
- bool fInTried{false};
-
- //! position in vRandom
- mutable int nRandomPos{-1};
-
- friend class CAddrMan;
- friend class CAddrManDeterministic;
-
-public:
-
- SERIALIZE_METHODS(CAddrInfo, obj)
- {
- READWRITEAS(CAddress, obj);
- READWRITE(obj.source, obj.nLastSuccess, obj.nAttempts);
- }
-
- CAddrInfo(const CAddress &addrIn, const CNetAddr &addrSource) : CAddress(addrIn), source(addrSource)
- {
- }
-
- CAddrInfo() : CAddress(), source()
- {
- }
-
- //! Calculate in which "tried" bucket this entry belongs
- int GetTriedBucket(const uint256 &nKey, const std::vector<bool> &asmap) const;
-
- //! Calculate in which "new" bucket this entry belongs, given a certain source
- int GetNewBucket(const uint256 &nKey, const CNetAddr& src, const std::vector<bool> &asmap) const;
-
- //! Calculate in which "new" bucket this entry belongs, using its default source
- int GetNewBucket(const uint256 &nKey, const std::vector<bool> &asmap) const
- {
- return GetNewBucket(nKey, source, asmap);
- }
-
- //! Calculate in which position of a bucket to store this entry.
- int GetBucketPosition(const uint256 &nKey, bool fNew, int nBucket) const;
-
- //! Determine whether the statistics about this entry are bad enough so that it can just be deleted
- bool IsTerrible(int64_t nNow = GetAdjustedTime()) const;
-
- //! Calculate the relative chance this entry should be given when selecting nodes to connect to
- double GetChance(int64_t nNow = GetAdjustedTime()) const;
-};
-
/** Stochastic address manager
*
* Design goals:
@@ -123,290 +51,64 @@ public:
* * Several indexes are kept for high performance. Setting m_consistency_check_ratio with the -checkaddrman
* configuration option will introduce (expensive) consistency checks for the entire data structure.
*/
+class AddrMan
+{
+ const std::unique_ptr<AddrManImpl> m_impl;
-/** Total number of buckets for tried addresses */
-static constexpr int32_t ADDRMAN_TRIED_BUCKET_COUNT_LOG2{8};
-static constexpr int ADDRMAN_TRIED_BUCKET_COUNT{1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2};
-
-/** Total number of buckets for new addresses */
-static constexpr int32_t ADDRMAN_NEW_BUCKET_COUNT_LOG2{10};
-static constexpr int ADDRMAN_NEW_BUCKET_COUNT{1 << ADDRMAN_NEW_BUCKET_COUNT_LOG2};
+public:
+ explicit AddrMan(std::vector<bool> asmap, bool deterministic, int32_t consistency_check_ratio);
-/** Maximum allowed number of entries in buckets for new and tried addresses */
-static constexpr int32_t ADDRMAN_BUCKET_SIZE_LOG2{6};
-static constexpr int ADDRMAN_BUCKET_SIZE{1 << ADDRMAN_BUCKET_SIZE_LOG2};
+ ~AddrMan();
-/**
- * Stochastical (IP) address manager
- */
-class CAddrMan
-{
-public:
template <typename Stream>
- void Serialize(Stream& s_) const EXCLUSIVE_LOCKS_REQUIRED(!cs);
+ void Serialize(Stream& s_) const;
template <typename Stream>
- void Unserialize(Stream& s_) EXCLUSIVE_LOCKS_REQUIRED(!cs);
-
- explicit CAddrMan(std::vector<bool> asmap, bool deterministic, int32_t consistency_check_ratio);
-
- ~CAddrMan()
- {
- nKey.SetNull();
- }
+ void Unserialize(Stream& s_);
//! Return the number of (unique) addresses in all tables.
- size_t size() const
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs); // TODO: Cache this in an atomic to avoid this overhead
- return vRandom.size();
- }
+ size_t size() const;
//! Add addresses to addrman's new table.
- bool Add(const std::vector<CAddress> &vAddr, const CNetAddr& source, int64_t nTimePenalty = 0)
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- int nAdd = 0;
- Check();
- for (std::vector<CAddress>::const_iterator it = vAddr.begin(); it != vAddr.end(); it++)
- nAdd += Add_(*it, source, nTimePenalty) ? 1 : 0;
- Check();
- if (nAdd) {
- LogPrint(BCLog::ADDRMAN, "Added %i addresses from %s: %i tried, %i new\n", nAdd, source.ToString(), nTried, nNew);
- }
- return nAdd > 0;
- }
+ bool Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, int64_t nTimePenalty = 0);
- //! Mark an entry as accessible.
- void Good(const CService &addr, int64_t nTime = GetAdjustedTime())
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- Check();
- Good_(addr, /* test_before_evict */ true, nTime);
- Check();
- }
+ //! Mark an entry as accessible, possibly moving it from "new" to "tried".
+ void Good(const CService& addr, int64_t nTime = GetAdjustedTime());
//! Mark an entry as connection attempted to.
- void Attempt(const CService &addr, bool fCountFailure, int64_t nTime = GetAdjustedTime())
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- Check();
- Attempt_(addr, fCountFailure, nTime);
- Check();
- }
+ void Attempt(const CService& addr, bool fCountFailure, int64_t nTime = GetAdjustedTime());
//! See if any to-be-evicted tried table entries have been tested and if so resolve the collisions.
- void ResolveCollisions()
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- Check();
- ResolveCollisions_();
- Check();
- }
-
- //! Randomly select an address in tried that another address is attempting to evict.
- CAddrInfo SelectTriedCollision()
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- Check();
- const CAddrInfo ret = SelectTriedCollision_();
- Check();
- return ret;
- }
+ void ResolveCollisions();
/**
- * Choose an address to connect to.
+ * Randomly select an address in the tried table that another address is
+ * attempting to evict.
+ *
+ * @return CAddress The record for the selected tried peer.
+ * int64_t The last time we attempted to connect to that peer.
*/
- CAddrInfo Select(bool newOnly = false) const
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- Check();
- const CAddrInfo addrRet = Select_(newOnly);
- Check();
- return addrRet;
- }
+ std::pair<CAddress, int64_t> SelectTriedCollision();
/**
- * Return all or many randomly selected addresses, optionally by network.
+ * Choose an address to connect to.
*
- * @param[in] max_addresses Maximum number of addresses to return (0 = all).
- * @param[in] max_pct Maximum percentage of addresses to return (0 = all).
- * @param[in] network Select only addresses of this network (nullopt = all).
+ * @param[in] newOnly Whether to only select addresses from the new table.
+ * @return CAddress The record for the selected peer.
+ * int64_t The last time we attempted to connect to that peer.
*/
- std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network) const
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- Check();
- std::vector<CAddress> vAddr;
- GetAddr_(vAddr, max_addresses, max_pct, network);
- Check();
- return vAddr;
- }
-
- //! Outer function for Connected_()
- void Connected(const CService &addr, int64_t nTime = GetAdjustedTime())
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- Check();
- Connected_(addr, nTime);
- Check();
- }
-
- void SetServices(const CService &addr, ServiceFlags nServices)
- EXCLUSIVE_LOCKS_REQUIRED(!cs)
- {
- LOCK(cs);
- Check();
- SetServices_(addr, nServices);
- Check();
- }
-
- const std::vector<bool>& GetAsmap() const { return m_asmap; }
-
-private:
- //! A mutex to protect the inner data structures.
- mutable Mutex cs;
-
- //! Source of random numbers for randomization in inner loops
- mutable FastRandomContext insecure_rand GUARDED_BY(cs);
-
- //! secret key to randomize bucket select with
- uint256 nKey;
-
- //! Serialization versions.
- enum Format : uint8_t {
- V0_HISTORICAL = 0, //!< historic format, before commit e6b343d88
- V1_DETERMINISTIC = 1, //!< for pre-asmap files
- V2_ASMAP = 2, //!< for files including asmap version
- V3_BIP155 = 3, //!< same as V2_ASMAP plus addresses are in BIP155 format
- };
-
- //! The maximum format this software knows it can unserialize. Also, we always serialize
- //! in this format.
- //! The format (first byte in the serialized stream) can be higher than this and
- //! still this software may be able to unserialize the file - if the second byte
- //! (see `lowest_compatible` in `Unserialize()`) is less or equal to this.
- static constexpr Format FILE_FORMAT = Format::V3_BIP155;
-
- //! The initial value of a field that is incremented every time an incompatible format
- //! change is made (such that old software versions would not be able to parse and
- //! understand the new file format). This is 32 because we overtook the "key size"
- //! field which was 32 historically.
- //! @note Don't increment this. Increment `lowest_compatible` in `Serialize()` instead.
- static constexpr uint8_t INCOMPATIBILITY_BASE = 32;
-
- //! last used nId
- int nIdCount GUARDED_BY(cs){0};
-
- //! table with information about all nIds
- std::unordered_map<int, CAddrInfo> mapInfo GUARDED_BY(cs);
-
- //! find an nId based on its network address
- std::unordered_map<CNetAddr, int, CNetAddrHash> mapAddr GUARDED_BY(cs);
-
- //! randomly-ordered vector of all nIds
- //! This is mutable because it is unobservable outside the class, so any
- //! changes to it (even in const methods) are also unobservable.
- mutable std::vector<int> vRandom GUARDED_BY(cs);
-
- // number of "tried" entries
- int nTried GUARDED_BY(cs){0};
-
- //! list of "tried" buckets
- int vvTried[ADDRMAN_TRIED_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
-
- //! number of (unique) "new" entries
- int nNew GUARDED_BY(cs){0};
-
- //! list of "new" buckets
- int vvNew[ADDRMAN_NEW_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
-
- //! last time Good was called (memory only). Initially set to 1 so that "never" is strictly worse.
- int64_t nLastGood GUARDED_BY(cs){1};
-
- //! Holds addrs inserted into tried table that collide with existing entries. Test-before-evict discipline used to resolve these collisions.
- std::set<int> m_tried_collisions;
-
- /** Perform consistency checks every m_consistency_check_ratio operations (if non-zero). */
- const int32_t m_consistency_check_ratio;
-
- // Compressed IP->ASN mapping, loaded from a file when a node starts.
- // Should be always empty if no file was provided.
- // This mapping is then used for bucketing nodes in Addrman.
- //
- // If asmap is provided, nodes will be bucketed by
- // AS they belong to, in order to make impossible for a node
- // to connect to several nodes hosted in a single AS.
- // This is done in response to Erebus attack, but also to generally
- // diversify the connections every node creates,
- // especially useful when a large fraction of nodes
- // operate under a couple of cloud providers.
- //
- // If a new asmap was provided, the existing records
- // would be re-bucketed accordingly.
- const std::vector<bool> m_asmap;
-
- //! Find an entry.
- CAddrInfo* Find(const CNetAddr& addr, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Create a new entry and add it to the internal data structures mapInfo, mapAddr and vRandom.
- CAddrInfo* Create(const CAddress &addr, const CNetAddr &addrSource, int *pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Swap two elements in vRandom.
- void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) const EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Move an entry from the "new" table(s) to the "tried" table
- void MakeTried(CAddrInfo& info, int nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Delete an entry. It must not be in tried, and have refcount 0.
- void Delete(int nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Clear a position in a "new" table. This is the only place where entries are actually deleted.
- void ClearNew(int nUBucket, int nUBucketPos) EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Mark an entry "good", possibly moving it from "new" to "tried".
- void Good_(const CService &addr, bool test_before_evict, int64_t time) EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Add an entry to the "new" table.
- bool Add_(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty) EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Mark an entry as attempted to connect.
- void Attempt_(const CService &addr, bool fCountFailure, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Select an address to connect to, if newOnly is set to true, only the new table is selected from.
- CAddrInfo Select_(bool newOnly) const EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! See if any to-be-evicted tried table entries have been tested and if so resolve the collisions.
- void ResolveCollisions_() EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Return a random to-be-evicted tried table address.
- CAddrInfo SelectTriedCollision_() EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Consistency check, taking into account m_consistency_check_ratio. Will std::abort if an inconsistency is detected.
- void Check() const EXCLUSIVE_LOCKS_REQUIRED(cs);
-
- //! Perform consistency check, regardless of m_consistency_check_ratio.
- //! @returns an error code or zero.
- int ForceCheckAddrman() const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ std::pair<CAddress, int64_t> Select(bool newOnly = false) const;
/**
* Return all or many randomly selected addresses, optionally by network.
*
- * @param[out] vAddr Vector of randomly selected addresses from vRandom.
* @param[in] max_addresses Maximum number of addresses to return (0 = all).
* @param[in] max_pct Maximum percentage of addresses to return (0 = all).
* @param[in] network Select only addresses of this network (nullopt = all).
+ *
+ * @return A vector of randomly selected addresses from vRandom.
*/
- void GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct, std::optional<Network> network) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+ std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network) const;
/** We have successfully connected to this peer. Calling this function
* updates the CAddress's nTime, which is used in our IsTerrible()
@@ -419,13 +121,15 @@ private:
* @param[in] addr The address of the peer we were connected to
* @param[in] nTime The time that we were last connected to this peer
*/
- void Connected_(const CService& addr, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void Connected(const CService& addr, int64_t nTime = GetAdjustedTime());
//! Update an entry's service bits.
- void SetServices_(const CService &addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs);
+ void SetServices(const CService& addr, ServiceFlags nServices);
+
+ const std::vector<bool>& GetAsmap() const;
- friend class CAddrManTest;
- friend class CAddrManDeterministic;
+ friend class AddrManTest;
+ friend class AddrManDeterministic;
};
#endif // BITCOIN_ADDRMAN_H
diff --git a/src/addrman_impl.h b/src/addrman_impl.h
new file mode 100644
index 0000000000..1dc7f25f9c
--- /dev/null
+++ b/src/addrman_impl.h
@@ -0,0 +1,271 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_ADDRMAN_IMPL_H
+#define BITCOIN_ADDRMAN_IMPL_H
+
+#include <logging.h>
+#include <netaddress.h>
+#include <protocol.h>
+#include <serialize.h>
+#include <sync.h>
+#include <uint256.h>
+
+#include <cstdint>
+#include <optional>
+#include <set>
+#include <unordered_map>
+#include <unordered_set>
+#include <utility>
+#include <vector>
+
+/** Total number of buckets for tried addresses */
+static constexpr int32_t ADDRMAN_TRIED_BUCKET_COUNT_LOG2{8};
+static constexpr int ADDRMAN_TRIED_BUCKET_COUNT{1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2};
+/** Total number of buckets for new addresses */
+static constexpr int32_t ADDRMAN_NEW_BUCKET_COUNT_LOG2{10};
+static constexpr int ADDRMAN_NEW_BUCKET_COUNT{1 << ADDRMAN_NEW_BUCKET_COUNT_LOG2};
+/** Maximum allowed number of entries in buckets for new and tried addresses */
+static constexpr int32_t ADDRMAN_BUCKET_SIZE_LOG2{6};
+static constexpr int ADDRMAN_BUCKET_SIZE{1 << ADDRMAN_BUCKET_SIZE_LOG2};
+
+/**
+ * Extended statistics about a CAddress
+ */
+class AddrInfo : public CAddress
+{
+public:
+ //! last try whatsoever by us (memory only)
+ int64_t nLastTry{0};
+
+ //! last counted attempt (memory only)
+ int64_t nLastCountAttempt{0};
+
+ //! where knowledge about this address first came from
+ CNetAddr source;
+
+ //! last successful connection by us
+ int64_t nLastSuccess{0};
+
+ //! connection attempts since last successful attempt
+ int nAttempts{0};
+
+ //! reference count in new sets (memory only)
+ int nRefCount{0};
+
+ //! in tried set? (memory only)
+ bool fInTried{false};
+
+ //! position in vRandom
+ mutable int nRandomPos{-1};
+
+ SERIALIZE_METHODS(AddrInfo, obj)
+ {
+ READWRITEAS(CAddress, obj);
+ READWRITE(obj.source, obj.nLastSuccess, obj.nAttempts);
+ }
+
+ AddrInfo(const CAddress &addrIn, const CNetAddr &addrSource) : CAddress(addrIn), source(addrSource)
+ {
+ }
+
+ AddrInfo() : CAddress(), source()
+ {
+ }
+
+ //! Calculate in which "tried" bucket this entry belongs
+ int GetTriedBucket(const uint256 &nKey, const std::vector<bool> &asmap) const;
+
+ //! Calculate in which "new" bucket this entry belongs, given a certain source
+ int GetNewBucket(const uint256 &nKey, const CNetAddr& src, const std::vector<bool> &asmap) const;
+
+ //! Calculate in which "new" bucket this entry belongs, using its default source
+ int GetNewBucket(const uint256 &nKey, const std::vector<bool> &asmap) const
+ {
+ return GetNewBucket(nKey, source, asmap);
+ }
+
+ //! Calculate in which position of a bucket to store this entry.
+ int GetBucketPosition(const uint256 &nKey, bool fNew, int nBucket) const;
+
+ //! Determine whether the statistics about this entry are bad enough so that it can just be deleted
+ bool IsTerrible(int64_t nNow = GetAdjustedTime()) const;
+
+ //! Calculate the relative chance this entry should be given when selecting nodes to connect to
+ double GetChance(int64_t nNow = GetAdjustedTime()) const;
+};
+
+class AddrManImpl
+{
+public:
+ AddrManImpl(std::vector<bool>&& asmap, bool deterministic, int32_t consistency_check_ratio);
+
+ ~AddrManImpl();
+
+ template <typename Stream>
+ void Serialize(Stream& s_) const EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ template <typename Stream>
+ void Unserialize(Stream& s_) EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ size_t size() const EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ bool Add(const std::vector<CAddress>& vAddr, const CNetAddr& source, int64_t nTimePenalty)
+ EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ void Good(const CService& addr, int64_t nTime)
+ EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ void Attempt(const CService& addr, bool fCountFailure, int64_t nTime)
+ EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ void ResolveCollisions() EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ std::pair<CAddress, int64_t> SelectTriedCollision() EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ std::pair<CAddress, int64_t> Select(bool newOnly) const
+ EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network) const
+ EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ void Connected(const CService& addr, int64_t nTime)
+ EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ void SetServices(const CService& addr, ServiceFlags nServices)
+ EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
+ const std::vector<bool>& GetAsmap() const;
+
+ friend class AddrManTest;
+ friend class AddrManDeterministic;
+
+private:
+ //! A mutex to protect the inner data structures.
+ mutable Mutex cs;
+
+ //! Source of random numbers for randomization in inner loops
+ mutable FastRandomContext insecure_rand GUARDED_BY(cs);
+
+ //! secret key to randomize bucket select with
+ uint256 nKey;
+
+ //! Serialization versions.
+ enum Format : uint8_t {
+ V0_HISTORICAL = 0, //!< historic format, before commit e6b343d88
+ V1_DETERMINISTIC = 1, //!< for pre-asmap files
+ V2_ASMAP = 2, //!< for files including asmap version
+ V3_BIP155 = 3, //!< same as V2_ASMAP plus addresses are in BIP155 format
+ };
+
+ //! The maximum format this software knows it can unserialize. Also, we always serialize
+ //! in this format.
+ //! The format (first byte in the serialized stream) can be higher than this and
+ //! still this software may be able to unserialize the file - if the second byte
+ //! (see `lowest_compatible` in `Unserialize()`) is less or equal to this.
+ static constexpr Format FILE_FORMAT = Format::V3_BIP155;
+
+ //! The initial value of a field that is incremented every time an incompatible format
+ //! change is made (such that old software versions would not be able to parse and
+ //! understand the new file format). This is 32 because we overtook the "key size"
+ //! field which was 32 historically.
+ //! @note Don't increment this. Increment `lowest_compatible` in `Serialize()` instead.
+ static constexpr uint8_t INCOMPATIBILITY_BASE = 32;
+
+ //! last used nId
+ int nIdCount GUARDED_BY(cs){0};
+
+ //! table with information about all nIds
+ std::unordered_map<int, AddrInfo> mapInfo GUARDED_BY(cs);
+
+ //! find an nId based on its network address
+ std::unordered_map<CNetAddr, int, CNetAddrHash> mapAddr GUARDED_BY(cs);
+
+ //! randomly-ordered vector of all nIds
+ //! This is mutable because it is unobservable outside the class, so any
+ //! changes to it (even in const methods) are also unobservable.
+ mutable std::vector<int> vRandom GUARDED_BY(cs);
+
+ // number of "tried" entries
+ int nTried GUARDED_BY(cs){0};
+
+ //! list of "tried" buckets
+ int vvTried[ADDRMAN_TRIED_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
+
+ //! number of (unique) "new" entries
+ int nNew GUARDED_BY(cs){0};
+
+ //! list of "new" buckets
+ int vvNew[ADDRMAN_NEW_BUCKET_COUNT][ADDRMAN_BUCKET_SIZE] GUARDED_BY(cs);
+
+ //! last time Good was called (memory only). Initially set to 1 so that "never" is strictly worse.
+ int64_t nLastGood GUARDED_BY(cs){1};
+
+ //! Holds addrs inserted into tried table that collide with existing entries. Test-before-evict discipline used to resolve these collisions.
+ std::set<int> m_tried_collisions;
+
+ /** Perform consistency checks every m_consistency_check_ratio operations (if non-zero). */
+ const int32_t m_consistency_check_ratio;
+
+ // Compressed IP->ASN mapping, loaded from a file when a node starts.
+ // Should be always empty if no file was provided.
+ // This mapping is then used for bucketing nodes in Addrman.
+ //
+ // If asmap is provided, nodes will be bucketed by
+ // AS they belong to, in order to make impossible for a node
+ // to connect to several nodes hosted in a single AS.
+ // This is done in response to Erebus attack, but also to generally
+ // diversify the connections every node creates,
+ // especially useful when a large fraction of nodes
+ // operate under a couple of cloud providers.
+ //
+ // If a new asmap was provided, the existing records
+ // would be re-bucketed accordingly.
+ const std::vector<bool> m_asmap;
+
+ //! Find an entry.
+ AddrInfo* Find(const CNetAddr& addr, int* pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ //! Create a new entry and add it to the internal data structures mapInfo, mapAddr and vRandom.
+ AddrInfo* Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ //! Swap two elements in vRandom.
+ void SwapRandom(unsigned int nRandomPos1, unsigned int nRandomPos2) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ //! Delete an entry. It must not be in tried, and have refcount 0.
+ void Delete(int nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ //! Clear a position in a "new" table. This is the only place where entries are actually deleted.
+ void ClearNew(int nUBucket, int nUBucketPos) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ //! Move an entry from the "new" table(s) to the "tried" table
+ void MakeTried(AddrInfo& info, int nId) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ void Good_(const CService& addr, bool test_before_evict, int64_t time) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ bool Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimePenalty) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ void Attempt_(const CService& addr, bool fCountFailure, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ std::pair<CAddress, int64_t> Select_(bool newOnly) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ std::vector<CAddress> GetAddr_(size_t max_addresses, size_t max_pct, std::optional<Network> network) const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ void Connected_(const CService& addr, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ void SetServices_(const CService& addr, ServiceFlags nServices) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ void ResolveCollisions_() EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ std::pair<CAddress, int64_t> SelectTriedCollision_() EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ //! Consistency check, taking into account m_consistency_check_ratio. Will std::abort if an inconsistency is detected.
+ void Check() const EXCLUSIVE_LOCKS_REQUIRED(cs);
+
+ //! Perform consistency check, regardless of m_consistency_check_ratio.
+ //! @returns an error code or zero.
+ int ForceCheckAddrman() const EXCLUSIVE_LOCKS_REQUIRED(cs);
+};
+
+#endif // BITCOIN_ADDRMAN_IMPL_H
diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp
index bebf86a09d..d6834a239b 100644
--- a/src/bench/addrman.cpp
+++ b/src/bench/addrman.cpp
@@ -53,14 +53,14 @@ static void CreateAddresses()
}
}
-static void AddAddressesToAddrMan(CAddrMan& addrman)
+static void AddAddressesToAddrMan(AddrMan& addrman)
{
for (size_t source_i = 0; source_i < NUM_SOURCES; ++source_i) {
addrman.Add(g_addresses[source_i], g_sources[source_i]);
}
}
-static void FillAddrMan(CAddrMan& addrman)
+static void FillAddrMan(AddrMan& addrman)
{
CreateAddresses();
@@ -74,26 +74,26 @@ static void AddrManAdd(benchmark::Bench& bench)
CreateAddresses();
bench.run([&] {
- CAddrMan addrman{/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0};
+ AddrMan addrman{/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0};
AddAddressesToAddrMan(addrman);
});
}
static void AddrManSelect(benchmark::Bench& bench)
{
- CAddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
FillAddrMan(addrman);
bench.run([&] {
const auto& address = addrman.Select();
- assert(address.GetPort() > 0);
+ assert(address.first.GetPort() > 0);
});
}
static void AddrManGetAddr(benchmark::Bench& bench)
{
- CAddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
FillAddrMan(addrman);
@@ -105,7 +105,7 @@ static void AddrManGetAddr(benchmark::Bench& bench)
static void AddrManAddThenGood(benchmark::Bench& bench)
{
- auto markSomeAsGood = [](CAddrMan& addrman) {
+ auto markSomeAsGood = [](AddrMan& addrman) {
for (size_t source_i = 0; source_i < NUM_SOURCES; ++source_i) {
for (size_t addr_i = 0; addr_i < NUM_ADDRESSES_PER_SOURCE; ++addr_i) {
addrman.Good(g_addresses[source_i][addr_i]);
@@ -117,12 +117,12 @@ static void AddrManAddThenGood(benchmark::Bench& bench)
bench.run([&] {
// To make the benchmark independent of the number of evaluations, we always prepare a new addrman.
- // This is necessary because CAddrMan::Good() method modifies the object, affecting the timing of subsequent calls
+ // This is necessary because AddrMan::Good() method modifies the object, affecting the timing of subsequent calls
// to the same method and we want to do the same amount of work in every loop iteration.
//
// This has some overhead (exactly the result of AddrManAdd benchmark), but that overhead is constant so improvements in
- // CAddrMan::Good() will still be noticeable.
- CAddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ // AddrMan::Good() will still be noticeable.
+ AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
AddAddressesToAddrMan(addrman);
markSomeAsGood(addrman);
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 3c22ee0f67..43e986a765 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -786,7 +786,7 @@ static UniValue CallRPC(BaseRequestHandler* rh, const std::string& strMethod, co
if (failedToGetAuthCookie) {
throw std::runtime_error(strprintf(
"Could not locate RPC credentials. No authentication cookie could be found, and RPC password is not set. See -rpcpassword and -stdinrpcpass. Configuration file: (%s)",
- GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)).string()));
+ fs::PathToString(GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)))));
} else {
throw std::runtime_error("Authorization failed: Incorrect rpcuser or rpcpassword");
}
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index fc3bc6aa71..eb97cfc6f6 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -235,6 +235,16 @@ static void MutateTxRBFOptIn(CMutableTransaction& tx, const std::string& strInId
}
}
+template <typename T>
+static T TrimAndParse(const std::string& int_str, const std::string& err)
+{
+ const auto parsed{ToIntegral<T>(TrimString(int_str))};
+ if (!parsed.has_value()) {
+ throw std::runtime_error(err + " '" + int_str + "'");
+ }
+ return parsed.value();
+}
+
static void MutateTxAddInput(CMutableTransaction& tx, const std::string& strInput)
{
std::vector<std::string> vStrInputParts;
@@ -261,8 +271,9 @@ static void MutateTxAddInput(CMutableTransaction& tx, const std::string& strInpu
// extract the optional sequence number
uint32_t nSequenceIn = CTxIn::SEQUENCE_FINAL;
- if (vStrInputParts.size() > 2)
- nSequenceIn = std::stoul(vStrInputParts[2]);
+ if (vStrInputParts.size() > 2) {
+ nSequenceIn = TrimAndParse<uint32_t>(vStrInputParts.at(2), "invalid TX sequence id");
+ }
// append to transaction input list
CTxIn txin(txid, vout, CScript(), nSequenceIn);
@@ -352,10 +363,10 @@ static void MutateTxAddOutMultiSig(CMutableTransaction& tx, const std::string& s
CAmount value = ExtractAndValidateValue(vStrInputParts[0]);
// Extract REQUIRED
- uint32_t required = stoul(vStrInputParts[1]);
+ const uint32_t required{TrimAndParse<uint32_t>(vStrInputParts.at(1), "invalid multisig required number")};
// Extract NUMKEYS
- uint32_t numkeys = stoul(vStrInputParts[2]);
+ const uint32_t numkeys{TrimAndParse<uint32_t>(vStrInputParts.at(2), "invalid multisig total number")};
// Validate there are the correct number of pubkeys
if (vStrInputParts.size() < numkeys + 3)
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index b155745794..2e823c1211 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -118,15 +118,15 @@ public:
// This is fine at runtime as we'll fall back to using them as an addrfetch if they don't support the
// service bits we want, but we should get them updated to support all service bits wanted by any
// release ASAP to avoid it where possible.
- vSeeds.emplace_back("seed.bitcoin.sipa.be"); // Pieter Wuille, only supports x1, x5, x9, and xd
- vSeeds.emplace_back("dnsseed.bluematt.me"); // Matt Corallo, only supports x9
- vSeeds.emplace_back("dnsseed.bitcoin.dashjr.org"); // Luke Dashjr
- vSeeds.emplace_back("seed.bitcoinstats.com"); // Christian Decker, supports x1 - xf
- vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch"); // Jonas Schnelli, only supports x1, x5, x9, and xd
- vSeeds.emplace_back("seed.btc.petertodd.org"); // Peter Todd, only supports x1, x5, x9, and xd
- vSeeds.emplace_back("seed.bitcoin.sprovoost.nl"); // Sjors Provoost
- vSeeds.emplace_back("dnsseed.emzy.de"); // Stephan Oeste
- vSeeds.emplace_back("seed.bitcoin.wiz.biz"); // Jason Maurice
+ vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9
+ vSeeds.emplace_back("dnsseed.bitcoin.dashjr.org."); // Luke Dashjr
+ vSeeds.emplace_back("seed.bitcoinstats.com."); // Christian Decker, supports x1 - xf
+ vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("seed.btc.petertodd.org."); // Peter Todd, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost
+ vSeeds.emplace_back("dnsseed.emzy.de."); // Stephan Oeste
+ vSeeds.emplace_back("seed.bitcoin.wiz.biz."); // Jason Maurice
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,0);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,5);
@@ -230,10 +230,10 @@ public:
vFixedSeeds.clear();
vSeeds.clear();
// nodes with support for servicebits filtering should be at the top
- vSeeds.emplace_back("testnet-seed.bitcoin.jonasschnelli.ch");
- vSeeds.emplace_back("seed.tbtc.petertodd.org");
- vSeeds.emplace_back("seed.testnet.bitcoin.sprovoost.nl");
- vSeeds.emplace_back("testnet-seed.bluematt.me"); // Just a static list of stable node(s), only supports x9
+ vSeeds.emplace_back("testnet-seed.bitcoin.jonasschnelli.ch.");
+ vSeeds.emplace_back("seed.tbtc.petertodd.org.");
+ vSeeds.emplace_back("seed.testnet.bitcoin.sprovoost.nl.");
+ vSeeds.emplace_back("testnet-seed.bluematt.me."); // Just a static list of stable node(s), only supports x9
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,111);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196);
@@ -280,7 +280,7 @@ public:
if (!args.IsArgSet("-signetchallenge")) {
bin = ParseHex("512103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430210359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c452ae");
- vSeeds.emplace_back("seed.signet.bitcoin.sprovoost.nl");
+ vSeeds.emplace_back("seed.signet.bitcoin.sprovoost.nl.");
// Hardcoded nodes can be removed once there are more DNS seeds
vSeeds.emplace_back("178.128.221.177");
diff --git a/src/core_read.cpp b/src/core_read.cpp
index 320811b9e9..2149b428d2 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -26,20 +26,20 @@ opcodetype ParseOpCode(const std::string& s)
{
static std::map<std::string, opcodetype> mapOpNames;
- if (mapOpNames.empty())
- {
- for (unsigned int op = 0; op <= MAX_OPCODE; op++)
- {
+ if (mapOpNames.empty()) {
+ for (unsigned int op = 0; op <= MAX_OPCODE; op++) {
// Allow OP_RESERVED to get into mapOpNames
- if (op < OP_NOP && op != OP_RESERVED)
+ if (op < OP_NOP && op != OP_RESERVED) {
continue;
+ }
std::string strName = GetOpName(static_cast<opcodetype>(op));
- if (strName == "OP_UNKNOWN")
+ if (strName == "OP_UNKNOWN") {
continue;
+ }
mapOpNames[strName] = static_cast<opcodetype>(op);
// Convenience: OP_ADD and just ADD are both recognized:
- if (strName.compare(0, 3, "OP_") == 0) { // strName starts with "OP_"
+ if (strName.compare(0, 3, "OP_") == 0) { // strName starts with "OP_"
mapOpNames[strName.substr(3)] = static_cast<opcodetype>(op);
}
}
@@ -59,44 +59,35 @@ CScript ParseScript(const std::string& s)
std::vector<std::string> words;
boost::algorithm::split(words, s, boost::algorithm::is_any_of(" \t\n"), boost::algorithm::token_compress_on);
- for (std::vector<std::string>::const_iterator w = words.begin(); w != words.end(); ++w)
- {
- if (w->empty())
- {
+ for (const std::string& w : words) {
+ if (w.empty()) {
// Empty string, ignore. (boost::split given '' will return one word)
- }
- else if (std::all_of(w->begin(), w->end(), ::IsDigit) ||
- (w->front() == '-' && w->size() > 1 && std::all_of(w->begin()+1, w->end(), ::IsDigit)))
+ } else if (std::all_of(w.begin(), w.end(), ::IsDigit) ||
+ (w.front() == '-' && w.size() > 1 && std::all_of(w.begin() + 1, w.end(), ::IsDigit)))
{
// Number
- int64_t n = LocaleIndependentAtoi<int64_t>(*w);
+ const auto num{ToIntegral<int64_t>(w)};
- //limit the range of numbers ParseScript accepts in decimal
- //since numbers outside -0xFFFFFFFF...0xFFFFFFFF are illegal in scripts
- if (n > int64_t{0xffffffff} || n < -1 * int64_t{0xffffffff}) {
+ // limit the range of numbers ParseScript accepts in decimal
+ // since numbers outside -0xFFFFFFFF...0xFFFFFFFF are illegal in scripts
+ if (!num.has_value() || num > int64_t{0xffffffff} || num < -1 * int64_t{0xffffffff}) {
throw std::runtime_error("script parse error: decimal numeric value only allowed in the "
"range -0xFFFFFFFF...0xFFFFFFFF");
}
- result << n;
- }
- else if (w->substr(0,2) == "0x" && w->size() > 2 && IsHex(std::string(w->begin()+2, w->end())))
- {
+ result << num.value();
+ } else if (w.substr(0, 2) == "0x" && w.size() > 2 && IsHex(std::string(w.begin() + 2, w.end()))) {
// Raw hex data, inserted NOT pushed onto stack:
- std::vector<unsigned char> raw = ParseHex(std::string(w->begin()+2, w->end()));
+ std::vector<unsigned char> raw = ParseHex(std::string(w.begin() + 2, w.end()));
result.insert(result.end(), raw.begin(), raw.end());
- }
- else if (w->size() >= 2 && w->front() == '\'' && w->back() == '\'')
- {
+ } else if (w.size() >= 2 && w.front() == '\'' && w.back() == '\'') {
// Single-quoted string, pushed as data. NOTE: this is poor-man's
// parsing, spaces/tabs/newlines in single-quoted strings won't work.
- std::vector<unsigned char> value(w->begin()+1, w->end()-1);
+ std::vector<unsigned char> value(w.begin() + 1, w.end() - 1);
result << value;
- }
- else
- {
+ } else {
// opcode, e.g. OP_ADD or ADD:
- result << ParseOpCode(*w);
+ result << ParseOpCode(w);
}
}
diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp
index 3a1086bf4c..2fdc54464a 100644
--- a/src/dbwrapper.cpp
+++ b/src/dbwrapper.cpp
@@ -115,7 +115,7 @@ static leveldb::Options GetOptions(size_t nCacheSize)
}
CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bool fWipe, bool obfuscate)
- : m_name{path.stem().string()}
+ : m_name{fs::PathToString(path.stem())}
{
penv = nullptr;
readoptions.verify_checksums = true;
@@ -129,21 +129,21 @@ CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bo
options.env = penv;
} else {
if (fWipe) {
- LogPrintf("Wiping LevelDB in %s\n", path.string());
- leveldb::Status result = leveldb::DestroyDB(path.string(), options);
+ LogPrintf("Wiping LevelDB in %s\n", fs::PathToString(path));
+ leveldb::Status result = leveldb::DestroyDB(fs::PathToString(path), options);
dbwrapper_private::HandleError(result);
}
TryCreateDirectories(path);
- LogPrintf("Opening LevelDB in %s\n", path.string());
+ LogPrintf("Opening LevelDB in %s\n", fs::PathToString(path));
}
- leveldb::Status status = leveldb::DB::Open(options, path.string(), &pdb);
+ leveldb::Status status = leveldb::DB::Open(options, fs::PathToString(path), &pdb);
dbwrapper_private::HandleError(status);
LogPrintf("Opened LevelDB successfully\n");
if (gArgs.GetBoolArg("-forcecompactdb", false)) {
- LogPrintf("Starting database compaction of %s\n", path.string());
+ LogPrintf("Starting database compaction of %s\n", fs::PathToString(path));
pdb->CompactRange(nullptr, nullptr);
- LogPrintf("Finished database compaction of %s\n", path.string());
+ LogPrintf("Finished database compaction of %s\n", fs::PathToString(path));
}
// The base-case obfuscation key, which is a noop.
@@ -160,10 +160,10 @@ CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bo
Write(OBFUSCATE_KEY_KEY, new_key);
obfuscate_key = new_key;
- LogPrintf("Wrote new obfuscate key for %s: %s\n", path.string(), HexStr(obfuscate_key));
+ LogPrintf("Wrote new obfuscate key for %s: %s\n", fs::PathToString(path), HexStr(obfuscate_key));
}
- LogPrintf("Using obfuscation key for %s: %s\n", path.string(), HexStr(obfuscate_key));
+ LogPrintf("Using obfuscation key for %s: %s\n", fs::PathToString(path), HexStr(obfuscate_key));
}
CDBWrapper::~CDBWrapper()
@@ -197,13 +197,15 @@ bool CDBWrapper::WriteBatch(CDBBatch& batch, bool fSync)
return true;
}
-size_t CDBWrapper::DynamicMemoryUsage() const {
+size_t CDBWrapper::DynamicMemoryUsage() const
+{
std::string memory;
- if (!pdb->GetProperty("leveldb.approximate-memory-usage", &memory)) {
+ std::optional<size_t> parsed;
+ if (!pdb->GetProperty("leveldb.approximate-memory-usage", &memory) || !(parsed = ToIntegral<size_t>(memory))) {
LogPrint(BCLog::LEVELDB, "Failed to get approximate-memory-usage property\n");
return 0;
}
- return stoul(memory);
+ return parsed.value();
}
// Prefixed with null character to avoid collisions with other keys
diff --git a/src/flatfile.cpp b/src/flatfile.cpp
index 151f1a38f1..929808c7fa 100644
--- a/src/flatfile.cpp
+++ b/src/flatfile.cpp
@@ -41,11 +41,11 @@ FILE* FlatFileSeq::Open(const FlatFilePos& pos, bool read_only)
if (!file && !read_only)
file = fsbridge::fopen(path, "wb+");
if (!file) {
- LogPrintf("Unable to open file %s\n", path.string());
+ LogPrintf("Unable to open file %s\n", fs::PathToString(path));
return nullptr;
}
if (pos.nPos && fseek(file, pos.nPos, SEEK_SET)) {
- LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
+ LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, fs::PathToString(path));
fclose(file);
return nullptr;
}
diff --git a/src/fs.cpp b/src/fs.cpp
index b9b3c46d8d..8cae7f32c6 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -24,7 +24,7 @@ namespace fsbridge {
FILE *fopen(const fs::path& p, const char *mode)
{
#ifndef WIN32
- return ::fopen(p.string().c_str(), mode);
+ return ::fopen(p.c_str(), mode);
#else
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t> utf8_cvt;
return ::_wfopen(p.wstring().c_str(), utf8_cvt.from_bytes(mode).c_str());
@@ -46,7 +46,7 @@ static std::string GetErrorReason()
FileLock::FileLock(const fs::path& file)
{
- fd = open(file.string().c_str(), O_RDWR);
+ fd = open(file.c_str(), O_RDWR);
if (fd == -1) {
reason = GetErrorReason();
}
@@ -249,9 +249,9 @@ void ofstream::close()
#else // __GLIBCXX__
#if BOOST_VERSION >= 107700
-static_assert(sizeof(*BOOST_FILESYSTEM_C_STR(fs::path())) == sizeof(wchar_t),
+static_assert(sizeof(*BOOST_FILESYSTEM_C_STR(boost::filesystem::path())) == sizeof(wchar_t),
#else
-static_assert(sizeof(*fs::path().BOOST_FILESYSTEM_C_STR) == sizeof(wchar_t),
+static_assert(sizeof(*boost::filesystem::path().BOOST_FILESYSTEM_C_STR) == sizeof(wchar_t),
#endif // BOOST_VERSION >= 107700
"Warning: This build is using boost::filesystem ofstream and ifstream "
"implementations which will fail to open paths containing multibyte "
diff --git a/src/fs.h b/src/fs.h
index d77b90be66..4a0bf39e95 100644
--- a/src/fs.h
+++ b/src/fs.h
@@ -13,9 +13,132 @@
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
+#include <tinyformat.h>
/** Filesystem operations and types */
-namespace fs = boost::filesystem;
+namespace fs {
+
+using namespace boost::filesystem;
+
+/**
+ * Path class wrapper to prepare application code for transition from
+ * boost::filesystem library to std::filesystem implementation. The main
+ * purpose of the class is to define fs::path::u8string() and fs::u8path()
+ * functions not present in boost. It also blocks calls to the
+ * fs::path(std::string) implicit constructor and the fs::path::string()
+ * method, which worked well in the boost::filesystem implementation, but have
+ * unsafe and unpredictable behavior on Windows in the std::filesystem
+ * implementation (see implementation note in \ref PathToString for details).
+ */
+class path : public boost::filesystem::path
+{
+public:
+ using boost::filesystem::path::path;
+
+ // Allow path objects arguments for compatibility.
+ path(boost::filesystem::path path) : boost::filesystem::path::path(std::move(path)) {}
+ path& operator=(boost::filesystem::path path) { boost::filesystem::path::operator=(std::move(path)); return *this; }
+ path& operator/=(boost::filesystem::path path) { boost::filesystem::path::operator/=(std::move(path)); return *this; }
+
+ // Allow literal string arguments, which are safe as long as the literals are ASCII.
+ path(const char* c) : boost::filesystem::path(c) {}
+ path& operator=(const char* c) { boost::filesystem::path::operator=(c); return *this; }
+ path& operator/=(const char* c) { boost::filesystem::path::operator/=(c); return *this; }
+ path& append(const char* c) { boost::filesystem::path::append(c); return *this; }
+
+ // Disallow std::string arguments to avoid locale-dependent decoding on windows.
+ path(std::string) = delete;
+ path& operator=(std::string) = delete;
+ path& operator/=(std::string) = delete;
+ path& append(std::string) = delete;
+
+ // Disallow std::string conversion method to avoid locale-dependent encoding on windows.
+ std::string string() const = delete;
+
+ // Define UTF-8 string conversion method not present in boost::filesystem but present in std::filesystem.
+ std::string u8string() const { return boost::filesystem::path::string(); }
+};
+
+// Define UTF-8 string conversion function not present in boost::filesystem but present in std::filesystem.
+static inline path u8path(const std::string& string)
+{
+ return boost::filesystem::path(string);
+}
+
+// Disallow implicit std::string conversion for system_complete to avoid
+// locale-dependent encoding on windows.
+static inline path system_complete(const path& p)
+{
+ return boost::filesystem::system_complete(p);
+}
+
+// Disallow implicit std::string conversion for exists to avoid
+// locale-dependent encoding on windows.
+static inline bool exists(const path& p)
+{
+ return boost::filesystem::exists(p);
+}
+
+// Allow explicit quoted stream I/O.
+static inline auto quoted(const std::string& s)
+{
+ return boost::io::quoted(s, '&');
+}
+
+// Allow safe path append operations.
+static inline path operator+(path p1, path p2)
+{
+ p1 += std::move(p2);
+ return p1;
+}
+
+/**
+ * Convert path object to byte string. On POSIX, paths natively are byte
+ * strings so this is trivial. On Windows, paths natively are Unicode, so an
+ * encoding step is necessary.
+ *
+ * The inverse of \ref PathToString is \ref PathFromString. The strings
+ * returned and parsed by these functions can be used to call POSIX APIs, and
+ * for roundtrip conversion, logging, and debugging. But they are not
+ * guaranteed to be valid UTF-8, and are generally meant to be used internally,
+ * not externally. When communicating with external programs and libraries that
+ * require UTF-8, fs::path::u8string() and fs::u8path() methods can be used.
+ * For other applications, if support for non UTF-8 paths is required, or if
+ * higher-level JSON or XML or URI or C-style escapes are preferred, it may be
+ * also be appropriate to use different path encoding functions.
+ *
+ * Implementation note: On Windows, the std::filesystem::path(string)
+ * constructor and std::filesystem::path::string() method are not safe to use
+ * here, because these methods encode the path using C++'s narrow multibyte
+ * encoding, which on Windows corresponds to the current "code page", which is
+ * unpredictable and typically not able to represent all valid paths. So
+ * std::filesystem::path::u8string() and std::filesystem::u8path() functions
+ * are used instead on Windows. On POSIX, u8string/u8path functions are not
+ * safe to use because paths are not always valid UTF-8, so plain string
+ * methods which do not transform the path there are used.
+ */
+static inline std::string PathToString(const path& path)
+{
+#ifdef WIN32
+ return path.u8string();
+#else
+ static_assert(std::is_same<path::string_type, std::string>::value, "PathToString not implemented on this platform");
+ return path.boost::filesystem::path::string();
+#endif
+}
+
+/**
+ * Convert byte string to path object. Inverse of \ref PathToString.
+ */
+static inline path PathFromString(const std::string& string)
+{
+#ifdef WIN32
+ return u8path(string);
+#else
+ return boost::filesystem::path(string);
+#endif
+}
+} // namespace fs
/** Bridge operations to C stdio */
namespace fsbridge {
@@ -103,4 +226,11 @@ namespace fsbridge {
#endif // WIN32 && __GLIBCXX__
};
+// Disallow path operator<< formatting in tinyformat to avoid locale-dependent
+// encoding on windows.
+namespace tinyformat {
+template<> inline void formatValue(std::ostream&, const char*, const char*, int, const boost::filesystem::path&) = delete;
+template<> inline void formatValue(std::ostream&, const char*, const char*, int, const fs::path&) = delete;
+} // namespace tinyformat
+
#endif // BITCOIN_FS_H
diff --git a/src/hash.cpp b/src/hash.cpp
index 92c923fbd2..0e5bd975e4 100644
--- a/src/hash.cpp
+++ b/src/hash.cpp
@@ -75,10 +75,7 @@ unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vData
void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char header, const unsigned char data[32], unsigned char output[64])
{
unsigned char num[4];
- num[0] = (nChild >> 24) & 0xFF;
- num[1] = (nChild >> 16) & 0xFF;
- num[2] = (nChild >> 8) & 0xFF;
- num[3] = (nChild >> 0) & 0xFF;
+ WriteBE32(num, nChild);
CHMAC_SHA512(chainCode.begin(), chainCode.size()).Write(&header, 1).Write(data, 32).Write(num, 4).Finalize(output);
}
diff --git a/src/i2p.cpp b/src/i2p.cpp
index 5e7e42fb77..35ac8731f2 100644
--- a/src/i2p.cpp
+++ b/src/i2p.cpp
@@ -328,7 +328,7 @@ void Session::GenerateAndSavePrivateKey(const Sock& sock)
if (!WriteBinaryFile(m_private_key_file,
std::string(m_private_key.begin(), m_private_key.end()))) {
throw std::runtime_error(
- strprintf("Cannot save I2P private key to %s", m_private_key_file));
+ strprintf("Cannot save I2P private key to %s", fs::quoted(fs::PathToString(m_private_key_file))));
}
}
diff --git a/src/init.cpp b/src/init.cpp
index 4495ded64d..164b7bb55d 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -113,7 +113,7 @@ static const char* BITCOIN_PID_FILENAME = "bitcoind.pid";
static fs::path GetPidFile(const ArgsManager& args)
{
- return AbsPathForConfigVal(fs::path(args.GetArg("-pid", BITCOIN_PID_FILENAME)));
+ return AbsPathForConfigVal(fs::PathFromString(args.GetArg("-pid", BITCOIN_PID_FILENAME)));
}
[[nodiscard]] static bool CreatePidFile(const ArgsManager& args)
@@ -127,7 +127,7 @@ static fs::path GetPidFile(const ArgsManager& args)
#endif
return true;
} else {
- return InitError(strprintf(_("Unable to create the PID file '%s': %s"), GetPidFile(args).string(), std::strerror(errno)));
+ return InitError(strprintf(_("Unable to create the PID file '%s': %s"), fs::PathToString(GetPidFile(args)), std::strerror(errno)));
}
}
@@ -1062,10 +1062,10 @@ static bool LockDataDirectory(bool probeOnly)
// Make sure only a single Bitcoin process is using the data directory.
fs::path datadir = gArgs.GetDataDirNet();
if (!DirIsWritable(datadir)) {
- return InitError(strprintf(_("Cannot write to data directory '%s'; check permissions."), datadir.string()));
+ return InitError(strprintf(_("Cannot write to data directory '%s'; check permissions."), fs::PathToString(datadir)));
}
if (!LockDirectory(datadir, ".lock", probeOnly)) {
- return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), datadir.string(), PACKAGE_NAME));
+ return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), fs::PathToString(datadir), PACKAGE_NAME));
}
return true;
}
@@ -1126,12 +1126,12 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, nFD);
// Warn about relative -datadir path.
- if (args.IsArgSet("-datadir") && !fs::path(args.GetArg("-datadir", "")).is_absolute()) {
+ if (args.IsArgSet("-datadir") && !fs::PathFromString(args.GetArg("-datadir", "")).is_absolute()) {
LogPrintf("Warning: relative datadir option '%s' specified, which will be interpreted relative to the " /* Continued */
"current working directory '%s'. This is fragile, because if bitcoin is started in the future "
"from a different location, it will be unable to locate the current data files. There could "
"also be data loss if bitcoin is started while in a temporary directory.\n",
- args.GetArg("-datadir", ""), fs::current_path().string());
+ args.GetArg("-datadir", ""), fs::PathToString(fs::current_path()));
}
InitSignatureCache();
@@ -1215,20 +1215,20 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// Read asmap file if configured
std::vector<bool> asmap;
if (args.IsArgSet("-asmap")) {
- fs::path asmap_path = fs::path(args.GetArg("-asmap", ""));
+ fs::path asmap_path = fs::PathFromString(args.GetArg("-asmap", ""));
if (asmap_path.empty()) {
- asmap_path = DEFAULT_ASMAP_FILENAME;
+ asmap_path = fs::PathFromString(DEFAULT_ASMAP_FILENAME);
}
if (!asmap_path.is_absolute()) {
asmap_path = gArgs.GetDataDirNet() / asmap_path;
}
if (!fs::exists(asmap_path)) {
- InitError(strprintf(_("Could not find asmap file %s"), asmap_path));
+ InitError(strprintf(_("Could not find asmap file %s"), fs::quoted(fs::PathToString(asmap_path))));
return false;
}
asmap = DecodeAsmap(asmap_path);
if (asmap.size() == 0) {
- InitError(strprintf(_("Could not parse asmap file %s"), asmap_path));
+ InitError(strprintf(_("Could not parse asmap file %s"), fs::quoted(fs::PathToString(asmap_path))));
return false;
}
const uint256 asmap_version = SerializeHash(asmap);
@@ -1653,11 +1653,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// ********************************************************* Step 11: import blocks
if (!CheckDiskSpace(gArgs.GetDataDirNet())) {
- InitError(strprintf(_("Error: Disk space is low for %s"), gArgs.GetDataDirNet()));
+ InitError(strprintf(_("Error: Disk space is low for %s"), fs::quoted(fs::PathToString(gArgs.GetDataDirNet()))));
return false;
}
if (!CheckDiskSpace(gArgs.GetBlocksDirPath())) {
- InitError(strprintf(_("Error: Disk space is low for %s"), gArgs.GetBlocksDirPath()));
+ InitError(strprintf(_("Error: Disk space is low for %s"), fs::quoted(fs::PathToString(gArgs.GetBlocksDirPath()))));
return false;
}
@@ -1685,7 +1685,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
std::vector<fs::path> vImportFiles;
for (const std::string& strFile : args.GetArgs("-loadblock")) {
- vImportFiles.push_back(strFile);
+ vImportFiles.push_back(fs::PathFromString(strFile));
}
chainman.m_load_block = std::thread(&util::TraceThread, "loadblk", [=, &chainman, &args] {
diff --git a/src/init/common.cpp b/src/init/common.cpp
index 5c1f469081..8f9e0ebc87 100644
--- a/src/init/common.cpp
+++ b/src/init/common.cpp
@@ -81,7 +81,7 @@ void AddLoggingArgs(ArgsManager& argsman)
void SetLoggingOptions(const ArgsManager& args)
{
LogInstance().m_print_to_file = !args.IsArgNegated("-debuglogfile");
- LogInstance().m_file_path = AbsPathForConfigVal(args.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE));
+ LogInstance().m_file_path = AbsPathForConfigVal(fs::PathFromString(args.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE)));
LogInstance().m_print_to_console = args.GetBoolArg("-printtoconsole", !args.GetBoolArg("-daemon", false));
LogInstance().m_log_timestamps = args.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
LogInstance().m_log_time_micros = args.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
@@ -128,24 +128,24 @@ bool StartLogging(const ArgsManager& args)
}
if (!LogInstance().StartLogging()) {
return InitError(strprintf(Untranslated("Could not open debug log file %s"),
- LogInstance().m_file_path.string()));
+ fs::PathToString(LogInstance().m_file_path)));
}
if (!LogInstance().m_log_timestamps)
LogPrintf("Startup time: %s\n", FormatISO8601DateTime(GetTime()));
- LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
- LogPrintf("Using data directory %s\n", gArgs.GetDataDirNet().string());
+ LogPrintf("Default data directory %s\n", fs::PathToString(GetDefaultDataDir()));
+ LogPrintf("Using data directory %s\n", fs::PathToString(gArgs.GetDataDirNet()));
// Only log conf file usage message if conf file actually exists.
fs::path config_file_path = GetConfigFile(args.GetArg("-conf", BITCOIN_CONF_FILENAME));
if (fs::exists(config_file_path)) {
- LogPrintf("Config file: %s\n", config_file_path.string());
+ LogPrintf("Config file: %s\n", fs::PathToString(config_file_path));
} else if (args.IsArgSet("-conf")) {
// Warn if no conf file exists at path provided by user
- InitWarning(strprintf(_("The specified config file %s does not exist"), config_file_path.string()));
+ InitWarning(strprintf(_("The specified config file %s does not exist"), fs::PathToString(config_file_path)));
} else {
// Not categorizing as "Warning" because it's the default behavior
- LogPrintf("Config file: %s (not found, skipping)\n", config_file_path.string());
+ LogPrintf("Config file: %s (not found, skipping)\n", fs::PathToString(config_file_path));
}
// Log the config arguments to debug.log
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 9a97cad1f8..d4ceb517dd 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -289,7 +289,7 @@ public:
virtual void requestMempoolTransactions(Notifications& notifications) = 0;
//! Check if Taproot has activated
- virtual bool isTaprootActive() const = 0;
+ virtual bool isTaprootActive() = 0;
};
//! Interface to let node manage chain clients (wallets, or maybe tools for
diff --git a/src/ipc/process.cpp b/src/ipc/process.cpp
index 43ed1f1bae..9036b80c45 100644
--- a/src/ipc/process.cpp
+++ b/src/ipc/process.cpp
@@ -30,8 +30,8 @@ public:
return mp::SpawnProcess(pid, [&](int fd) {
fs::path path = argv0_path;
path.remove_filename();
- path.append(new_exe_name);
- return std::vector<std::string>{path.string(), "-ipcfd", strprintf("%i", fd)};
+ path /= fs::PathFromString(new_exe_name);
+ return std::vector<std::string>{fs::PathToString(path), "-ipcfd", strprintf("%i", fd)};
});
}
int waitSpawned(int pid) override { return mp::WaitProcess(pid); }
diff --git a/src/key.cpp b/src/key.cpp
index 40df248e02..39155e4311 100644
--- a/src/key.cpp
+++ b/src/key.cpp
@@ -343,8 +343,7 @@ CExtPubKey CExtKey::Neuter() const {
void CExtKey::Encode(unsigned char code[BIP32_EXTKEY_SIZE]) const {
code[0] = nDepth;
memcpy(code+1, vchFingerprint, 4);
- code[5] = (nChild >> 24) & 0xFF; code[6] = (nChild >> 16) & 0xFF;
- code[7] = (nChild >> 8) & 0xFF; code[8] = (nChild >> 0) & 0xFF;
+ WriteBE32(code+5, nChild);
memcpy(code+9, chaincode.begin(), 32);
code[41] = 0;
assert(key.size() == 32);
@@ -354,7 +353,7 @@ void CExtKey::Encode(unsigned char code[BIP32_EXTKEY_SIZE]) const {
void CExtKey::Decode(const unsigned char code[BIP32_EXTKEY_SIZE]) {
nDepth = code[0];
memcpy(vchFingerprint, code+1, 4);
- nChild = (code[5] << 24) | (code[6] << 16) | (code[7] << 8) | code[8];
+ nChild = ReadBE32(code+5);
memcpy(chaincode.begin(), code+9, 32);
key.Set(code+42, code+BIP32_EXTKEY_SIZE, true);
if ((nDepth == 0 && (nChild != 0 || ReadLE32(vchFingerprint) != 0)) || code[41] != 0) key = CKey();
diff --git a/src/logging.cpp b/src/logging.cpp
index a352e106e5..1efce21bdb 100644
--- a/src/logging.cpp
+++ b/src/logging.cpp
@@ -161,6 +161,7 @@ const CLogCategoryDesc LogCategories[] =
{BCLog::IPC, "ipc"},
{BCLog::LOCK, "lock"},
{BCLog::UTIL, "util"},
+ {BCLog::BLOCKSTORE, "blockstorage"},
{BCLog::ALL, "1"},
{BCLog::ALL, "all"},
};
diff --git a/src/logging.h b/src/logging.h
index 02e64a7c48..f46104364c 100644
--- a/src/logging.h
+++ b/src/logging.h
@@ -61,6 +61,7 @@ namespace BCLog {
IPC = (1 << 23),
LOCK = (1 << 24),
UTIL = (1 << 25),
+ BLOCKSTORE = (1 << 26),
ALL = ~(uint32_t)0,
};
diff --git a/src/net.cpp b/src/net.cpp
index fca53a6f09..7271ff22b2 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -2010,17 +2010,18 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
if (nTries > 100)
break;
- CAddrInfo addr;
+ CAddress addr;
+ int64_t addr_last_try{0};
if (fFeeler) {
// First, try to get a tried table collision address. This returns
// an empty (invalid) address if there are no collisions to try.
- addr = addrman.SelectTriedCollision();
+ std::tie(addr, addr_last_try) = addrman.SelectTriedCollision();
if (!addr.IsValid()) {
// No tried table collisions. Select a new table address
// for our feeler.
- addr = addrman.Select(true);
+ std::tie(addr, addr_last_try) = addrman.Select(true);
} else if (AlreadyConnectedToAddress(addr)) {
// If test-before-evict logic would have us connect to a
// peer that we're already connected to, just mark that
@@ -2029,11 +2030,11 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// a currently-connected peer.
addrman.Good(addr);
// Select a new table address for our feeler instead.
- addr = addrman.Select(true);
+ std::tie(addr, addr_last_try) = addrman.Select(true);
}
} else {
// Not a feeler
- addr = addrman.Select();
+ std::tie(addr, addr_last_try) = addrman.Select();
}
// Require outbound connections, other than feelers, to be to distinct network groups
@@ -2050,7 +2051,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
continue;
// only consider very recently tried nodes after 30 failed attempts
- if (nANow - addr.nLastTry < 600 && nTries < 30)
+ if (nANow - addr_last_try < 600 && nTries < 30)
continue;
// for non-feelers, require all the services we'll want,
@@ -2443,7 +2444,7 @@ void CConnman::SetNetworkActive(bool active)
}
}
-CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In, CAddrMan& addrman_in, bool network_active)
+CConnman::CConnman(uint64_t nSeed0In, uint64_t nSeed1In, AddrMan& addrman_in, bool network_active)
: addrman(addrman_in), nSeed0(nSeed0In), nSeed1(nSeed1In)
{
SetTryNewOutboundPeer(false);
diff --git a/src/net.h b/src/net.h
index a97ed9946d..e2071414b4 100644
--- a/src/net.h
+++ b/src/net.h
@@ -797,7 +797,7 @@ public:
m_onion_binds = connOptions.onion_binds;
}
- CConnman(uint64_t seed0, uint64_t seed1, CAddrMan& addrman, bool network_active = true);
+ CConnman(uint64_t seed0, uint64_t seed1, AddrMan& addrman, bool network_active = true);
~CConnman();
bool Start(CScheduler& scheduler, const Options& options);
@@ -1049,7 +1049,7 @@ private:
std::vector<ListenSocket> vhListenSocket;
std::atomic<bool> fNetworkActive{true};
bool fAddressesInitialized{false};
- CAddrMan& addrman;
+ AddrMan& addrman;
std::deque<std::string> m_addr_fetches GUARDED_BY(m_addr_fetches_mutex);
RecursiveMutex m_addr_fetches_mutex;
std::vector<std::string> vAddedNodes GUARDED_BY(cs_vAddedNodes);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index aaad34be66..66b99aa2bb 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -292,7 +292,7 @@ using PeerRef = std::shared_ptr<Peer>;
class PeerManagerImpl final : public PeerManager
{
public:
- PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
+ PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman,
BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs);
@@ -410,7 +410,7 @@ private:
const CChainParams& m_chainparams;
CConnman& m_connman;
- CAddrMan& m_addrman;
+ AddrMan& m_addrman;
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
BanMan* const m_banman;
ChainstateManager& m_chainman;
@@ -1426,14 +1426,14 @@ bool PeerManagerImpl::BlockRequestAllowed(const CBlockIndex* pindex)
(GetBlockProofEquivalentTime(*pindexBestHeader, *pindex, *pindexBestHeader, m_chainparams.GetConsensus()) < STALE_RELAY_AGE_LIMIT);
}
-std::unique_ptr<PeerManager> PeerManager::make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
+std::unique_ptr<PeerManager> PeerManager::make(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman,
BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs)
{
return std::make_unique<PeerManagerImpl>(chainparams, connman, addrman, banman, chainman, pool, ignore_incoming_txs);
}
-PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
+PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman,
BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs)
: m_chainparams(chainparams),
@@ -2664,7 +2664,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
// table is also potentially detrimental because new-table entries
// are subject to eviction in the event of addrman collisions. We
// mitigate the information-leak by never calling
- // CAddrMan::Connected() on block-relay-only peers; see
+ // AddrMan::Connected() on block-relay-only peers; see
// FinalizeNode().
//
// This moves an address from New to Tried table in Addrman,
diff --git a/src/net_processing.h b/src/net_processing.h
index 9d8d788583..27bc40687a 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -9,7 +9,7 @@
#include <net.h>
#include <validationinterface.h>
-class CAddrMan;
+class AddrMan;
class CChainParams;
class CTxMemPool;
class ChainstateManager;
@@ -37,7 +37,7 @@ struct CNodeStateStats {
class PeerManager : public CValidationInterface, public NetEventsInterface
{
public:
- static std::unique_ptr<PeerManager> make(const CChainParams& chainparams, CConnman& connman, CAddrMan& addrman,
+ static std::unique_ptr<PeerManager> make(const CChainParams& chainparams, CConnman& connman, AddrMan& addrman,
BanMan* banman, ChainstateManager& chainman,
CTxMemPool& pool, bool ignore_incoming_txs);
virtual ~PeerManager() { }
diff --git a/src/netaddress.cpp b/src/netaddress.cpp
index b2f4945e3b..f9fff5a6d5 100644
--- a/src/netaddress.cpp
+++ b/src/netaddress.cpp
@@ -165,7 +165,7 @@ void CNetAddr::SetLegacyIPv6(Span<const uint8_t> ipv6)
}
/**
- * Create an "internal" address that represents a name or FQDN. CAddrMan uses
+ * Create an "internal" address that represents a name or FQDN. AddrMan uses
* these fake addresses to keep track of which DNS seeds were used.
* @returns Whether or not the operation was successful.
* @see NET_INTERNAL, INTERNAL_IN_IPV6_PREFIX, CNetAddr::IsInternal(), CNetAddr::IsRFC4193()
diff --git a/src/netaddress.h b/src/netaddress.h
index cfb2edcd34..66c8c48f08 100644
--- a/src/netaddress.h
+++ b/src/netaddress.h
@@ -62,7 +62,7 @@ enum Network {
NET_CJDNS,
/// A set of addresses that represent the hash of a string or FQDN. We use
- /// them in CAddrMan to keep track of which DNS seeds were used.
+ /// them in AddrMan to keep track of which DNS seeds were used.
NET_INTERNAL,
/// Dummy value to indicate the number of NET_* constants.
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 0b7df9bd9a..53bc2b5069 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -68,13 +68,14 @@ void CleanupBlockRevFiles()
LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
fs::path blocksdir = gArgs.GetBlocksDirPath();
for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
+ const std::string path = fs::PathToString(it->path().filename());
if (fs::is_regular_file(*it) &&
- it->path().filename().string().length() == 12 &&
- it->path().filename().string().substr(8,4) == ".dat")
+ path.length() == 12 &&
+ path.substr(8,4) == ".dat")
{
- if (it->path().filename().string().substr(0, 3) == "blk") {
- mapBlockFiles[it->path().filename().string().substr(3, 5)] = it->path();
- } else if (it->path().filename().string().substr(0, 3) == "rev") {
+ if (path.substr(0, 3) == "blk") {
+ mapBlockFiles[path.substr(3, 5)] = it->path();
+ } else if (path.substr(0, 3) == "rev") {
remove(it->path());
}
}
@@ -204,7 +205,7 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
FlatFilePos pos(*it, 0);
fs::remove(BlockFileSeq().FileName(pos));
fs::remove(UndoFileSeq().FileName(pos));
- LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
+ LogPrint(BCLog::BLOCKSTORE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
}
}
@@ -261,7 +262,7 @@ bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight,
if ((int)nFile != nLastBlockFile) {
if (!fKnown) {
- LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
+ LogPrint(BCLog::BLOCKSTORE, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
}
FlushBlockFile(!fKnown, finalize_undo);
nLastBlockFile = nFile;
@@ -527,14 +528,14 @@ void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFile
for (const fs::path& path : vImportFiles) {
FILE* file = fsbridge::fopen(path, "rb");
if (file) {
- LogPrintf("Importing blocks file %s...\n", path.string());
+ LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
chainman.ActiveChainstate().LoadExternalBlockFile(file);
if (ShutdownRequested()) {
LogPrintf("Shutdown requested. Exit %s\n", __func__);
return;
}
} else {
- LogPrintf("Warning: Could not open blocks file %s\n", path.string());
+ LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
}
}
diff --git a/src/node/context.h b/src/node/context.h
index 135f9ea1c6..26873345b4 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -12,7 +12,7 @@
class ArgsManager;
class BanMan;
-class CAddrMan;
+class AddrMan;
class CBlockPolicyEstimator;
class CConnman;
class CScheduler;
@@ -39,7 +39,7 @@ class WalletClient;
struct NodeContext {
//! Init interface for initializing current process and connecting to other processes.
interfaces::Init* init{nullptr};
- std::unique_ptr<CAddrMan> addrman;
+ std::unique_ptr<AddrMan> addrman;
std::unique_ptr<CConnman> connman;
std::unique_ptr<CTxMemPool> mempool;
std::unique_ptr<CBlockPolicyEstimator> fee_estimator;
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 5b6d8416a7..73f4036057 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -698,7 +698,7 @@ public:
notifications.transactionAddedToMempool(entry.GetSharedTx(), 0 /* mempool_sequence */);
}
}
- bool isTaprootActive() const override
+ bool isTaprootActive() override
{
LOCK(::cs_main);
const CBlockIndex* tip = Assert(m_node.chainman)->ActiveChain().Tip();
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index 2ae5798ebe..850b4f0439 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -527,7 +527,7 @@ CBlockPolicyEstimator::CBlockPolicyEstimator()
fs::path est_filepath = gArgs.GetDataDirNet() / FEE_ESTIMATES_FILENAME;
CAutoFile est_file(fsbridge::fopen(est_filepath, "rb"), SER_DISK, CLIENT_VERSION);
if (est_file.IsNull() || !Read(est_file)) {
- LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", est_filepath.string());
+ LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(est_filepath));
}
}
@@ -887,7 +887,7 @@ void CBlockPolicyEstimator::Flush() {
fs::path est_filepath = gArgs.GetDataDirNet() / FEE_ESTIMATES_FILENAME;
CAutoFile est_file(fsbridge::fopen(est_filepath, "wb"), SER_DISK, CLIENT_VERSION);
if (est_file.IsNull() || !Write(est_file)) {
- LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", est_filepath.string());
+ LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(est_filepath));
}
}
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index 9e433584e7..fced397e51 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -22,7 +22,7 @@ CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFeeIn)
// so dust is a spendable txout less than
// 182*dustRelayFee/1000 (in satoshis).
// 546 satoshis at the default rate of 3000 sat/kvB.
- // A typical spendable segwit txout is 31 bytes big, and will
+ // A typical spendable segwit P2WPKH txout is 31 bytes big, and will
// need a CTxIn of at least 67 bytes to spend:
// so dust is a spendable txout less than
// 98*dustRelayFee/1000 (in satoshis).
@@ -34,6 +34,11 @@ CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFeeIn)
int witnessversion = 0;
std::vector<unsigned char> witnessprogram;
+ // Note this computation is for spending a Segwit v0 P2WPKH output (a 33 bytes
+ // public key + an ECDSA signature). For Segwit v1 Taproot outputs the minimum
+ // satisfaction is lower (a single BIP340 signature) but this computation was
+ // kept to not further reduce the dust level.
+ // See discussion in https://github.com/bitcoin/bitcoin/pull/22779 for details.
if (txout.scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram)) {
// sum the sizes of the parts of a transaction input
// with 75% segwit discount applied to the script size.
diff --git a/src/pubkey.cpp b/src/pubkey.cpp
index d14a20b870..956ff2b34a 100644
--- a/src/pubkey.cpp
+++ b/src/pubkey.cpp
@@ -337,8 +337,7 @@ bool CPubKey::Derive(CPubKey& pubkeyChild, ChainCode &ccChild, unsigned int nChi
void CExtPubKey::Encode(unsigned char code[BIP32_EXTKEY_SIZE]) const {
code[0] = nDepth;
memcpy(code+1, vchFingerprint, 4);
- code[5] = (nChild >> 24) & 0xFF; code[6] = (nChild >> 16) & 0xFF;
- code[7] = (nChild >> 8) & 0xFF; code[8] = (nChild >> 0) & 0xFF;
+ WriteBE32(code+5, nChild);
memcpy(code+9, chaincode.begin(), 32);
assert(pubkey.size() == CPubKey::COMPRESSED_SIZE);
memcpy(code+41, pubkey.begin(), CPubKey::COMPRESSED_SIZE);
@@ -347,7 +346,7 @@ void CExtPubKey::Encode(unsigned char code[BIP32_EXTKEY_SIZE]) const {
void CExtPubKey::Decode(const unsigned char code[BIP32_EXTKEY_SIZE]) {
nDepth = code[0];
memcpy(vchFingerprint, code+1, 4);
- nChild = (code[5] << 24) | (code[6] << 16) | (code[7] << 8) | code[8];
+ nChild = ReadBE32(code+5);
memcpy(chaincode.begin(), code+9, 32);
pubkey.Set(code+41, code+BIP32_EXTKEY_SIZE);
if ((nDepth == 0 && (nChild != 0 || ReadLE32(vchFingerprint) != 0)) || !pubkey.IsFullyValid()) pubkey = CPubKey();
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 00c9fd3059..7de56a648a 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -46,7 +46,6 @@
#include <QApplication>
#include <QDebug>
-#include <QFontDatabase>
#include <QLatin1String>
#include <QLibraryInfo>
#include <QLocale>
@@ -492,7 +491,7 @@ int GuiMain(int argc, char* argv[])
#endif
BitcoinApplication app;
- QFontDatabase::addApplicationFont(":/fonts/monospace");
+ GUIUtil::LoadFont(QStringLiteral(":/fonts/monospace"));
/// 2. Parse command-line options. We do this after qt in order to show an error if there are problems parsing these
// Command-line options take precedence:
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 7b1384b485..4262866f32 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -272,6 +272,12 @@ bool hasEntryData(const QAbstractItemView *view, int column, int role)
return !selection.at(0).data(role).toString().isEmpty();
}
+void LoadFont(const QString& file_name)
+{
+ const int id = QFontDatabase::addApplicationFont(file_name);
+ assert(id != -1);
+}
+
QString getDefaultDataDirectory()
{
return boostPathToQString(GetDefaultDataDir());
@@ -647,12 +653,12 @@ void setClipboard(const QString& str)
fs::path qstringToBoostPath(const QString &path)
{
- return fs::path(path.toStdString());
+ return fs::u8path(path.toStdString());
}
QString boostPathToQString(const fs::path &path)
{
- return QString::fromStdString(path.string());
+ return QString::fromStdString(path.u8string());
}
QString NetworkToQString(Network net)
diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h
index 6287f95192..211f3f506d 100644
--- a/src/qt/guiutil.h
+++ b/src/qt/guiutil.h
@@ -114,6 +114,11 @@ namespace GUIUtil
void setClipboard(const QString& str);
/**
+ * Loads the font from the file specified by file_name, aborts if it fails.
+ */
+ void LoadFont(const QString& file_name);
+
+ /**
* Determine default data directory for operating system.
*/
QString getDefaultDataDirectory();
diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp
index 4c78fba752..2ca4b6a21e 100644
--- a/src/qt/intro.cpp
+++ b/src/qt/intro.cpp
@@ -263,7 +263,7 @@ bool Intro::showIfNeeded(bool& did_show_intro, int64_t& prune_MiB)
* (to be consistent with bitcoind behavior)
*/
if(dataDir != GUIUtil::getDefaultDataDirectory()) {
- gArgs.SoftSetArg("-datadir", GUIUtil::qstringToBoostPath(dataDir).string()); // use OS locale for path setting
+ gArgs.SoftSetArg("-datadir", fs::PathToString(GUIUtil::qstringToBoostPath(dataDir))); // use OS locale for path setting
}
return true;
}
diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp
index 3c0dc5aa40..0c3332ab76 100644
--- a/src/qt/rpcconsole.cpp
+++ b/src/qt/rpcconsole.cpp
@@ -247,10 +247,11 @@ bool RPCConsole::RPCParseCommandLine(interfaces::Node* node, std::string &strRes
UniValue subelement;
if (lastResult.isArray())
{
- for(char argch: curarg)
- if (!IsDigit(argch))
- throw std::runtime_error("Invalid result query");
- subelement = lastResult[LocaleIndependentAtoi<int>(curarg)];
+ const auto parsed{ToIntegral<size_t>(curarg)};
+ if (!parsed) {
+ throw std::runtime_error("Invalid result query");
+ }
+ subelement = lastResult[parsed.value()];
}
else if (lastResult.isObject())
subelement = find_value(lastResult, curarg);
diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp
index 4c74bcd480..a0ad59f12a 100644
--- a/src/qt/walletcontroller.cpp
+++ b/src/qt/walletcontroller.cpp
@@ -184,12 +184,13 @@ WalletControllerActivity::WalletControllerActivity(WalletController* wallet_cont
connect(this, &WalletControllerActivity::finished, this, &QObject::deleteLater);
}
-void WalletControllerActivity::showProgressDialog(const QString& label_text)
+void WalletControllerActivity::showProgressDialog(const QString& title_text, const QString& label_text)
{
auto progress_dialog = new QProgressDialog(m_parent_widget);
progress_dialog->setAttribute(Qt::WA_DeleteOnClose);
connect(this, &WalletControllerActivity::finished, progress_dialog, &QWidget::close);
+ progress_dialog->setWindowTitle(title_text);
progress_dialog->setLabelText(label_text);
progress_dialog->setRange(0, 0);
progress_dialog->setCancelButton(nullptr);
@@ -231,7 +232,12 @@ void CreateWalletActivity::askPassphrase()
void CreateWalletActivity::createWallet()
{
- showProgressDialog(tr("Creating Wallet <b>%1</b>…").arg(m_create_wallet_dialog->walletName().toHtmlEscaped()));
+ showProgressDialog(
+ //: Title of window indicating the progress of creation of a new wallet.
+ tr("Create Wallet"),
+ /*: Descriptive text of the create wallet progress window which indicates
+ to the user which wallet is currently being created. */
+ tr("Creating Wallet <b>%1</b>…").arg(m_create_wallet_dialog->walletName().toHtmlEscaped()));
std::string name = m_create_wallet_dialog->walletName().toStdString();
uint64_t flags = 0;
@@ -322,7 +328,12 @@ void OpenWalletActivity::open(const std::string& path)
{
QString name = path.empty() ? QString("["+tr("default wallet")+"]") : QString::fromStdString(path);
- showProgressDialog(tr("Opening Wallet <b>%1</b>…").arg(name.toHtmlEscaped()));
+ showProgressDialog(
+ //: Title of window indicating the progress of opening of a wallet.
+ tr("Open Wallet"),
+ /*: Descriptive text of the open wallet progress window which indicates
+ to the user which wallet is currently being opened. */
+ tr("Opening Wallet <b>%1</b>…").arg(name.toHtmlEscaped()));
QTimer::singleShot(0, worker(), [this, path] {
std::unique_ptr<interfaces::Wallet> wallet = node().walletClient().loadWallet(path, m_error_message, m_warning_message);
@@ -340,7 +351,12 @@ LoadWalletsActivity::LoadWalletsActivity(WalletController* wallet_controller, QW
void LoadWalletsActivity::load()
{
- showProgressDialog(tr("Loading wallets…"));
+ showProgressDialog(
+ //: Title of progress window which is displayed when wallets are being loaded.
+ tr("Load Wallets"),
+ /*: Descriptive text of the load wallets progress window which indicates to
+ the user that wallets are currently being loaded.*/
+ tr("Loading wallets…"));
QTimer::singleShot(0, worker(), [this] {
for (auto& wallet : node().walletClient().getWallets()) {
diff --git a/src/qt/walletcontroller.h b/src/qt/walletcontroller.h
index f97a7a1e84..bbd990228f 100644
--- a/src/qt/walletcontroller.h
+++ b/src/qt/walletcontroller.h
@@ -96,7 +96,7 @@ protected:
interfaces::Node& node() const { return m_wallet_controller->m_node; }
QObject* worker() const { return m_wallet_controller->m_activity_worker; }
- void showProgressDialog(const QString& label_text);
+ void showProgressDialog(const QString& title_text, const QString& label_text);
WalletController* const m_wallet_controller;
QWidget* const m_parent_widget;
diff --git a/src/rest.cpp b/src/rest.cpp
index e21fd8dad5..3746fd752a 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -189,9 +189,10 @@ static bool rest_headers(const std::any& context,
if (path.size() != 2)
return RESTERR(req, HTTP_BAD_REQUEST, "No header count specified. Use /rest/headers/<count>/<hash>.<ext>.");
- long count = strtol(path[0].c_str(), nullptr, 10);
- if (count < 1 || count > 2000)
+ const auto parsed_count{ToIntegral<size_t>(path[0])};
+ if (!parsed_count.has_value() || *parsed_count < 1 || *parsed_count > 2000) {
return RESTERR(req, HTTP_BAD_REQUEST, "Header count out of range: " + path[0]);
+ }
std::string hashStr = path[1];
uint256 hash;
@@ -199,8 +200,8 @@ static bool rest_headers(const std::any& context,
return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr);
const CBlockIndex* tip = nullptr;
- std::vector<const CBlockIndex *> headers;
- headers.reserve(count);
+ std::vector<const CBlockIndex*> headers;
+ headers.reserve(*parsed_count);
{
ChainstateManager* maybe_chainman = GetChainman(context, req);
if (!maybe_chainman) return false;
@@ -211,8 +212,9 @@ static bool rest_headers(const std::any& context,
const CBlockIndex* pindex = chainman.m_blockman.LookupBlockIndex(hash);
while (pindex != nullptr && active_chain.Contains(pindex)) {
headers.push_back(pindex);
- if (headers.size() == (unsigned long)count)
+ if (headers.size() == *parsed_count) {
break;
+ }
pindex = active_chain.Next(pindex);
}
}
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 92e608a030..dadd82e03f 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -2554,15 +2554,15 @@ static RPCHelpMan dumptxoutset()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- const fs::path path = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), request.params[0].get_str());
+ const fs::path path = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), fs::u8path(request.params[0].get_str()));
// Write to a temporary path and then move into `path` on completion
// to avoid confusion due to an interruption.
- const fs::path temppath = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), request.params[0].get_str() + ".incomplete");
+ const fs::path temppath = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), fs::u8path(request.params[0].get_str() + ".incomplete"));
if (fs::exists(path)) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
- path.string() + " already exists. If you are sure this is what you want, "
+ path.u8string() + " already exists. If you are sure this is what you want, "
"move it out of the way first");
}
@@ -2572,7 +2572,7 @@ static RPCHelpMan dumptxoutset()
UniValue result = CreateUTXOSnapshot(node, node.chainman->ActiveChainstate(), afile);
fs::rename(temppath, path);
- result.pushKV("path", path.string());
+ result.pushKV("path", path.u8string());
return result;
},
};
diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp
index a7866474e1..3245e04cdf 100644
--- a/src/rpc/request.cpp
+++ b/src/rpc/request.cpp
@@ -70,7 +70,7 @@ static fs::path GetAuthCookieFile(bool temp=false)
if (temp) {
arg += ".tmp";
}
- return AbsPathForConfigVal(fs::path(arg));
+ return AbsPathForConfigVal(fs::PathFromString(arg));
}
bool GenerateAuthCookie(std::string *cookie_out)
@@ -87,7 +87,7 @@ bool GenerateAuthCookie(std::string *cookie_out)
fs::path filepath_tmp = GetAuthCookieFile(true);
file.open(filepath_tmp);
if (!file.is_open()) {
- LogPrintf("Unable to open cookie authentication file %s for writing\n", filepath_tmp.string());
+ LogPrintf("Unable to open cookie authentication file %s for writing\n", fs::PathToString(filepath_tmp));
return false;
}
file << cookie;
@@ -95,10 +95,10 @@ bool GenerateAuthCookie(std::string *cookie_out)
fs::path filepath = GetAuthCookieFile(false);
if (!RenameOver(filepath_tmp, filepath)) {
- LogPrintf("Unable to rename cookie authentication file %s to %s\n", filepath_tmp.string(), filepath.string());
+ LogPrintf("Unable to rename cookie authentication file %s to %s\n", fs::PathToString(filepath_tmp), fs::PathToString(filepath));
return false;
}
- LogPrintf("Generated RPC authentication cookie %s\n", filepath.string());
+ LogPrintf("Generated RPC authentication cookie %s\n", fs::PathToString(filepath));
if (cookie_out)
*cookie_out = cookie;
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 0d02ec5c47..9bcfba3507 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -239,7 +239,7 @@ static RPCHelpMan getrpcinfo()
UniValue result(UniValue::VOBJ);
result.pushKV("active_commands", active_commands);
- const std::string path = LogInstance().m_file_path.string();
+ const std::string path = LogInstance().m_file_path.u8string();
UniValue log_path(UniValue::VSTR, path);
result.pushKV("logpath", log_path);
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index 01a492a20b..bd6f470219 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -4,6 +4,7 @@
#include <addrdb.h>
#include <addrman.h>
+#include <addrman_impl.h>
#include <chainparams.h>
#include <clientversion.h>
#include <hash.h>
@@ -21,26 +22,26 @@
using namespace std::literals;
-class CAddrManSerializationMock : public CAddrMan
+class AddrManSerializationMock : public AddrMan
{
public:
virtual void Serialize(CDataStream& s) const = 0;
- CAddrManSerializationMock()
- : CAddrMan(/* asmap */ std::vector<bool>(), /* deterministic */ true, /* consistency_check_ratio */ 100)
+ AddrManSerializationMock()
+ : AddrMan(/* asmap */ std::vector<bool>(), /* deterministic */ true, /* consistency_check_ratio */ 100)
{}
};
-class CAddrManUncorrupted : public CAddrManSerializationMock
+class AddrManUncorrupted : public AddrManSerializationMock
{
public:
void Serialize(CDataStream& s) const override
{
- CAddrMan::Serialize(s);
+ AddrMan::Serialize(s);
}
};
-class CAddrManCorrupted : public CAddrManSerializationMock
+class AddrManCorrupted : public AddrManSerializationMock
{
public:
void Serialize(CDataStream& s) const override
@@ -61,12 +62,12 @@ public:
CAddress addr = CAddress(serv, NODE_NONE);
CNetAddr resolved;
BOOST_CHECK(LookupHost("252.2.2.2", resolved, false));
- CAddrInfo info = CAddrInfo(addr, resolved);
+ AddrInfo info = AddrInfo(addr, resolved);
s << info;
}
};
-static CDataStream AddrmanToStream(const CAddrManSerializationMock& _addrman)
+static CDataStream AddrmanToStream(const AddrManSerializationMock& _addrman)
{
CDataStream ssPeersIn(SER_DISK, CLIENT_VERSION);
ssPeersIn << Params().MessageStart();
@@ -76,44 +77,44 @@ static CDataStream AddrmanToStream(const CAddrManSerializationMock& _addrman)
return CDataStream(vchData, SER_DISK, CLIENT_VERSION);
}
-class CAddrManTest : public CAddrMan
+class AddrManTest : public AddrMan
{
private:
bool deterministic;
public:
- explicit CAddrManTest(bool makeDeterministic = true,
- std::vector<bool> asmap = std::vector<bool>())
- : CAddrMan(asmap, makeDeterministic, /* consistency_check_ratio */ 100)
+ explicit AddrManTest(bool makeDeterministic = true,
+ std::vector<bool> asmap = std::vector<bool>())
+ : AddrMan(asmap, makeDeterministic, /* consistency_check_ratio */ 100)
{
deterministic = makeDeterministic;
}
- CAddrInfo* Find(const CNetAddr& addr, int* pnId = nullptr)
+ AddrInfo* Find(const CNetAddr& addr, int* pnId = nullptr)
{
- LOCK(cs);
- return CAddrMan::Find(addr, pnId);
+ LOCK(m_impl->cs);
+ return m_impl->Find(addr, pnId);
}
- CAddrInfo* Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId = nullptr)
+ AddrInfo* Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId = nullptr)
{
- LOCK(cs);
- return CAddrMan::Create(addr, addrSource, pnId);
+ LOCK(m_impl->cs);
+ return m_impl->Create(addr, addrSource, pnId);
}
void Delete(int nId)
{
- LOCK(cs);
- CAddrMan::Delete(nId);
+ LOCK(m_impl->cs);
+ m_impl->Delete(nId);
}
// Used to test deserialization
std::pair<int, int> GetBucketAndEntry(const CAddress& addr)
{
- LOCK(cs);
- int nId = mapAddr[addr];
+ LOCK(m_impl->cs);
+ int nId = m_impl->mapAddr[addr];
for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; ++bucket) {
for (int entry = 0; entry < ADDRMAN_BUCKET_SIZE; ++entry) {
- if (nId == vvNew[bucket][entry]) {
+ if (nId == m_impl->vvNew[bucket][entry]) {
return std::pair<int, int>(bucket, entry);
}
}
@@ -165,20 +166,20 @@ BOOST_FIXTURE_TEST_SUITE(addrman_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(addrman_simple)
{
- auto addrman = std::make_unique<CAddrManTest>();
+ auto addrman = std::make_unique<AddrManTest>();
CNetAddr source = ResolveIP("252.2.2.2");
// Test: Does Addrman respond correctly when empty.
BOOST_CHECK_EQUAL(addrman->size(), 0U);
- CAddrInfo addr_null = addrman->Select();
+ auto addr_null = addrman->Select().first;
BOOST_CHECK_EQUAL(addr_null.ToString(), "[::]:0");
// Test: Does Addrman::Add work as expected.
CService addr1 = ResolveService("250.1.1.1", 8333);
BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
BOOST_CHECK_EQUAL(addrman->size(), 1U);
- CAddrInfo addr_ret1 = addrman->Select();
+ auto addr_ret1 = addrman->Select().first;
BOOST_CHECK_EQUAL(addr_ret1.ToString(), "250.1.1.1:8333");
// Test: Does IP address deduplication work correctly.
@@ -199,7 +200,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
BOOST_CHECK(addrman->size() >= 1);
// Test: reset addrman and test AddrMan::Add multiple addresses works as expected
- addrman = std::make_unique<CAddrManTest>();
+ addrman = std::make_unique<AddrManTest>();
std::vector<CAddress> vAddr;
vAddr.push_back(CAddress(ResolveService("250.1.1.3", 8333), NODE_NONE));
vAddr.push_back(CAddress(ResolveService("250.1.1.4", 8333), NODE_NONE));
@@ -209,7 +210,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
BOOST_AUTO_TEST_CASE(addrman_ports)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
CNetAddr source = ResolveIP("252.2.2.2");
@@ -223,7 +224,7 @@ BOOST_AUTO_TEST_CASE(addrman_ports)
CService addr1_port = ResolveService("250.1.1.1", 8334);
BOOST_CHECK(!addrman.Add({CAddress(addr1_port, NODE_NONE)}, source));
BOOST_CHECK_EQUAL(addrman.size(), 1U);
- CAddrInfo addr_ret2 = addrman.Select();
+ auto addr_ret2 = addrman.Select().first;
BOOST_CHECK_EQUAL(addr_ret2.ToString(), "250.1.1.1:8333");
// Test: Add same IP but diff port to tried table, it doesn't get added.
@@ -231,14 +232,14 @@ BOOST_AUTO_TEST_CASE(addrman_ports)
addrman.Good(CAddress(addr1_port, NODE_NONE));
BOOST_CHECK_EQUAL(addrman.size(), 1U);
bool newOnly = true;
- CAddrInfo addr_ret3 = addrman.Select(newOnly);
+ auto addr_ret3 = addrman.Select(newOnly).first;
BOOST_CHECK_EQUAL(addr_ret3.ToString(), "250.1.1.1:8333");
}
BOOST_AUTO_TEST_CASE(addrman_select)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
CNetAddr source = ResolveIP("252.2.2.2");
@@ -248,16 +249,16 @@ BOOST_AUTO_TEST_CASE(addrman_select)
BOOST_CHECK_EQUAL(addrman.size(), 1U);
bool newOnly = true;
- CAddrInfo addr_ret1 = addrman.Select(newOnly);
+ auto addr_ret1 = addrman.Select(newOnly).first;
BOOST_CHECK_EQUAL(addr_ret1.ToString(), "250.1.1.1:8333");
// Test: move addr to tried, select from new expected nothing returned.
addrman.Good(CAddress(addr1, NODE_NONE));
BOOST_CHECK_EQUAL(addrman.size(), 1U);
- CAddrInfo addr_ret2 = addrman.Select(newOnly);
+ auto addr_ret2 = addrman.Select(newOnly).first;
BOOST_CHECK_EQUAL(addr_ret2.ToString(), "[::]:0");
- CAddrInfo addr_ret3 = addrman.Select();
+ auto addr_ret3 = addrman.Select().first;
BOOST_CHECK_EQUAL(addr_ret3.ToString(), "250.1.1.1:8333");
BOOST_CHECK_EQUAL(addrman.size(), 1U);
@@ -290,14 +291,14 @@ BOOST_AUTO_TEST_CASE(addrman_select)
// Test: Select pulls from new and tried regardless of port number.
std::set<uint16_t> ports;
for (int i = 0; i < 20; ++i) {
- ports.insert(addrman.Select().GetPort());
+ ports.insert(addrman.Select().first.GetPort());
}
BOOST_CHECK_EQUAL(ports.size(), 3U);
}
BOOST_AUTO_TEST_CASE(addrman_new_collisions)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
CNetAddr source = ResolveIP("252.2.2.2");
@@ -326,7 +327,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions)
BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
CNetAddr source = ResolveIP("252.2.2.2");
@@ -356,7 +357,7 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
BOOST_AUTO_TEST_CASE(addrman_find)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
BOOST_CHECK_EQUAL(addrman.size(), 0U);
@@ -372,24 +373,24 @@ BOOST_AUTO_TEST_CASE(addrman_find)
BOOST_CHECK(addrman.Add({addr3}, source1));
// Test: ensure Find returns an IP matching what we searched on.
- CAddrInfo* info1 = addrman.Find(addr1);
+ AddrInfo* info1 = addrman.Find(addr1);
BOOST_REQUIRE(info1);
BOOST_CHECK_EQUAL(info1->ToString(), "250.1.2.1:8333");
// Test 18; Find does not discriminate by port number.
- CAddrInfo* info2 = addrman.Find(addr2);
+ AddrInfo* info2 = addrman.Find(addr2);
BOOST_REQUIRE(info2);
BOOST_CHECK_EQUAL(info2->ToString(), info1->ToString());
// Test: Find returns another IP matching what we searched on.
- CAddrInfo* info3 = addrman.Find(addr3);
+ AddrInfo* info3 = addrman.Find(addr3);
BOOST_REQUIRE(info3);
BOOST_CHECK_EQUAL(info3->ToString(), "251.255.2.1:8333");
}
BOOST_AUTO_TEST_CASE(addrman_create)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
BOOST_CHECK_EQUAL(addrman.size(), 0U);
@@ -397,19 +398,19 @@ BOOST_AUTO_TEST_CASE(addrman_create)
CNetAddr source1 = ResolveIP("250.1.2.1");
int nId;
- CAddrInfo* pinfo = addrman.Create(addr1, source1, &nId);
+ AddrInfo* pinfo = addrman.Create(addr1, source1, &nId);
// Test: The result should be the same as the input addr.
BOOST_CHECK_EQUAL(pinfo->ToString(), "250.1.2.1:8333");
- CAddrInfo* info2 = addrman.Find(addr1);
+ AddrInfo* info2 = addrman.Find(addr1);
BOOST_CHECK_EQUAL(info2->ToString(), "250.1.2.1:8333");
}
BOOST_AUTO_TEST_CASE(addrman_delete)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
BOOST_CHECK_EQUAL(addrman.size(), 0U);
@@ -423,13 +424,13 @@ BOOST_AUTO_TEST_CASE(addrman_delete)
BOOST_CHECK_EQUAL(addrman.size(), 1U);
addrman.Delete(nId);
BOOST_CHECK_EQUAL(addrman.size(), 0U);
- CAddrInfo* info2 = addrman.Find(addr1);
+ AddrInfo* info2 = addrman.Find(addr1);
BOOST_CHECK(info2 == nullptr);
}
BOOST_AUTO_TEST_CASE(addrman_getaddr)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
// Test: Sanity check, GetAddr should never return anything if addrman
// is empty.
@@ -489,7 +490,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
CAddress addr1 = CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.1.1.1", 9999), NODE_NONE);
@@ -497,7 +498,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
CNetAddr source1 = ResolveIP("250.1.1.1");
- CAddrInfo info1 = CAddrInfo(addr1, source1);
+ AddrInfo info1 = AddrInfo(addr1, source1);
uint256 nKey1 = (uint256)(CHashWriter(SER_GETHASH, 0) << 1).GetHash();
uint256 nKey2 = (uint256)(CHashWriter(SER_GETHASH, 0) << 2).GetHash();
@@ -512,14 +513,14 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
// Test: Two addresses with same IP but different ports can map to
// different buckets because they have different keys.
- CAddrInfo info2 = CAddrInfo(addr2, source1);
+ AddrInfo info2 = AddrInfo(addr2, source1);
BOOST_CHECK(info1.GetKey() != info2.GetKey());
BOOST_CHECK(info1.GetTriedBucket(nKey1, asmap) != info2.GetTriedBucket(nKey1, asmap));
std::set<int> buckets;
for (int i = 0; i < 255; i++) {
- CAddrInfo infoi = CAddrInfo(
+ AddrInfo infoi = AddrInfo(
CAddress(ResolveService("250.1.1." + ToString(i)), NODE_NONE),
ResolveIP("250.1.1." + ToString(i)));
int bucket = infoi.GetTriedBucket(nKey1, asmap);
@@ -531,7 +532,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
buckets.clear();
for (int j = 0; j < 255; j++) {
- CAddrInfo infoj = CAddrInfo(
+ AddrInfo infoj = AddrInfo(
CAddress(ResolveService("250." + ToString(j) + ".1.1"), NODE_NONE),
ResolveIP("250." + ToString(j) + ".1.1"));
int bucket = infoj.GetTriedBucket(nKey1, asmap);
@@ -544,14 +545,14 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.1.2.1", 9999), NODE_NONE);
CNetAddr source1 = ResolveIP("250.1.2.1");
- CAddrInfo info1 = CAddrInfo(addr1, source1);
+ AddrInfo info1 = AddrInfo(addr1, source1);
uint256 nKey1 = (uint256)(CHashWriter(SER_GETHASH, 0) << 1).GetHash();
uint256 nKey2 = (uint256)(CHashWriter(SER_GETHASH, 0) << 2).GetHash();
@@ -567,13 +568,13 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
BOOST_CHECK(info1.GetNewBucket(nKey1, asmap) != info1.GetNewBucket(nKey2, asmap));
// Test: Ports should not affect bucket placement in the addr
- CAddrInfo info2 = CAddrInfo(addr2, source1);
+ AddrInfo info2 = AddrInfo(addr2, source1);
BOOST_CHECK(info1.GetKey() != info2.GetKey());
BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, asmap), info2.GetNewBucket(nKey1, asmap));
std::set<int> buckets;
for (int i = 0; i < 255; i++) {
- CAddrInfo infoi = CAddrInfo(
+ AddrInfo infoi = AddrInfo(
CAddress(ResolveService("250.1.1." + ToString(i)), NODE_NONE),
ResolveIP("250.1.1." + ToString(i)));
int bucket = infoi.GetNewBucket(nKey1, asmap);
@@ -585,7 +586,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
buckets.clear();
for (int j = 0; j < 4 * 255; j++) {
- CAddrInfo infoj = CAddrInfo(CAddress(
+ AddrInfo infoj = AddrInfo(CAddress(
ResolveService(
ToString(250 + (j / 255)) + "." + ToString(j % 256) + ".1.1"), NODE_NONE),
ResolveIP("251.4.1.1"));
@@ -598,7 +599,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
buckets.clear();
for (int p = 0; p < 255; p++) {
- CAddrInfo infoj = CAddrInfo(
+ AddrInfo infoj = AddrInfo(
CAddress(ResolveService("250.1.1.1"), NODE_NONE),
ResolveIP("250." + ToString(p) + ".1.1"));
int bucket = infoj.GetNewBucket(nKey1, asmap);
@@ -622,7 +623,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
// 101.8.0.0/16 AS8
BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
CAddress addr1 = CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.1.1.1", 9999), NODE_NONE);
@@ -630,7 +631,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
CNetAddr source1 = ResolveIP("250.1.1.1");
- CAddrInfo info1 = CAddrInfo(addr1, source1);
+ AddrInfo info1 = AddrInfo(addr1, source1);
uint256 nKey1 = (uint256)(CHashWriter(SER_GETHASH, 0) << 1).GetHash();
uint256 nKey2 = (uint256)(CHashWriter(SER_GETHASH, 0) << 2).GetHash();
@@ -645,14 +646,14 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
// Test: Two addresses with same IP but different ports can map to
// different buckets because they have different keys.
- CAddrInfo info2 = CAddrInfo(addr2, source1);
+ AddrInfo info2 = AddrInfo(addr2, source1);
BOOST_CHECK(info1.GetKey() != info2.GetKey());
BOOST_CHECK(info1.GetTriedBucket(nKey1, asmap) != info2.GetTriedBucket(nKey1, asmap));
std::set<int> buckets;
for (int j = 0; j < 255; j++) {
- CAddrInfo infoj = CAddrInfo(
+ AddrInfo infoj = AddrInfo(
CAddress(ResolveService("101." + ToString(j) + ".1.1"), NODE_NONE),
ResolveIP("101." + ToString(j) + ".1.1"));
int bucket = infoj.GetTriedBucket(nKey1, asmap);
@@ -664,7 +665,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
buckets.clear();
for (int j = 0; j < 255; j++) {
- CAddrInfo infoj = CAddrInfo(
+ AddrInfo infoj = AddrInfo(
CAddress(ResolveService("250." + ToString(j) + ".1.1"), NODE_NONE),
ResolveIP("250." + ToString(j) + ".1.1"));
int bucket = infoj.GetTriedBucket(nKey1, asmap);
@@ -677,14 +678,14 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.1.2.1", 9999), NODE_NONE);
CNetAddr source1 = ResolveIP("250.1.2.1");
- CAddrInfo info1 = CAddrInfo(addr1, source1);
+ AddrInfo info1 = AddrInfo(addr1, source1);
uint256 nKey1 = (uint256)(CHashWriter(SER_GETHASH, 0) << 1).GetHash();
uint256 nKey2 = (uint256)(CHashWriter(SER_GETHASH, 0) << 2).GetHash();
@@ -700,13 +701,13 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
BOOST_CHECK(info1.GetNewBucket(nKey1, asmap) != info1.GetNewBucket(nKey2, asmap));
// Test: Ports should not affect bucket placement in the addr
- CAddrInfo info2 = CAddrInfo(addr2, source1);
+ AddrInfo info2 = AddrInfo(addr2, source1);
BOOST_CHECK(info1.GetKey() != info2.GetKey());
BOOST_CHECK_EQUAL(info1.GetNewBucket(nKey1, asmap), info2.GetNewBucket(nKey1, asmap));
std::set<int> buckets;
for (int i = 0; i < 255; i++) {
- CAddrInfo infoi = CAddrInfo(
+ AddrInfo infoi = AddrInfo(
CAddress(ResolveService("250.1.1." + ToString(i)), NODE_NONE),
ResolveIP("250.1.1." + ToString(i)));
int bucket = infoi.GetNewBucket(nKey1, asmap);
@@ -718,7 +719,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
buckets.clear();
for (int j = 0; j < 4 * 255; j++) {
- CAddrInfo infoj = CAddrInfo(CAddress(
+ AddrInfo infoj = AddrInfo(CAddress(
ResolveService(
ToString(250 + (j / 255)) + "." + ToString(j % 256) + ".1.1"), NODE_NONE),
ResolveIP("251.4.1.1"));
@@ -731,7 +732,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
buckets.clear();
for (int p = 0; p < 255; p++) {
- CAddrInfo infoj = CAddrInfo(
+ AddrInfo infoj = AddrInfo(
CAddress(ResolveService("250.1.1.1"), NODE_NONE),
ResolveIP("101." + ToString(p) + ".1.1"));
int bucket = infoj.GetNewBucket(nKey1, asmap);
@@ -743,7 +744,7 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
buckets.clear();
for (int p = 0; p < 255; p++) {
- CAddrInfo infoj = CAddrInfo(
+ AddrInfo infoj = AddrInfo(
CAddress(ResolveService("250.1.1.1"), NODE_NONE),
ResolveIP("250." + ToString(p) + ".1.1"));
int bucket = infoj.GetNewBucket(nKey1, asmap);
@@ -759,9 +760,9 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
{
std::vector<bool> asmap1 = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
- auto addrman_asmap1 = std::make_unique<CAddrManTest>(true, asmap1);
- auto addrman_asmap1_dup = std::make_unique<CAddrManTest>(true, asmap1);
- auto addrman_noasmap = std::make_unique<CAddrManTest>();
+ auto addrman_asmap1 = std::make_unique<AddrManTest>(true, asmap1);
+ auto addrman_asmap1_dup = std::make_unique<AddrManTest>(true, asmap1);
+ auto addrman_noasmap = std::make_unique<AddrManTest>();
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
CAddress addr = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
@@ -791,8 +792,8 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
BOOST_CHECK(bucketAndEntry_asmap1.second != bucketAndEntry_noasmap.second);
// deserializing non-asmaped peers.dat to asmaped addrman
- addrman_asmap1 = std::make_unique<CAddrManTest>(true, asmap1);
- addrman_noasmap = std::make_unique<CAddrManTest>();
+ addrman_asmap1 = std::make_unique<AddrManTest>(true, asmap1);
+ addrman_noasmap = std::make_unique<AddrManTest>();
addrman_noasmap->Add({addr}, default_source);
stream << *addrman_noasmap;
stream >> *addrman_asmap1;
@@ -803,8 +804,8 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
BOOST_CHECK(bucketAndEntry_asmap1_deser.second == bucketAndEntry_asmap1_dup.second);
// used to map to different buckets, now maps to the same bucket.
- addrman_asmap1 = std::make_unique<CAddrManTest>(true, asmap1);
- addrman_noasmap = std::make_unique<CAddrManTest>();
+ addrman_asmap1 = std::make_unique<AddrManTest>(true, asmap1);
+ addrman_noasmap = std::make_unique<AddrManTest>();
CAddress addr1 = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.2.1.1"), NODE_NONE);
addrman_noasmap->Add({addr, addr2}, default_source);
@@ -824,7 +825,7 @@ BOOST_AUTO_TEST_CASE(remove_invalid)
{
// Confirm that invalid addresses are ignored in unserialization.
- auto addrman = std::make_unique<CAddrManTest>();
+ auto addrman = std::make_unique<AddrManTest>();
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
const CAddress new1{ResolveService("5.5.5.5"), NODE_NONE};
@@ -856,19 +857,19 @@ BOOST_AUTO_TEST_CASE(remove_invalid)
BOOST_REQUIRE(pos + sizeof(tried2_raw_replacement) <= stream.size());
memcpy(stream.data() + pos, tried2_raw_replacement, sizeof(tried2_raw_replacement));
- addrman = std::make_unique<CAddrManTest>();
+ addrman = std::make_unique<AddrManTest>();
stream >> *addrman;
BOOST_CHECK_EQUAL(addrman->size(), 2);
}
BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
BOOST_CHECK(addrman.size() == 0);
// Empty addrman should return blank addrman info.
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
// Add twenty two addresses.
CNetAddr source = ResolveIP("252.2.2.2");
@@ -879,7 +880,7 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
// No collisions yet.
BOOST_CHECK(addrman.size() == i);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
}
// Ensure Good handles duplicates well.
@@ -888,14 +889,14 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
addrman.Good(addr);
BOOST_CHECK(addrman.size() == 22);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
}
}
BOOST_AUTO_TEST_CASE(addrman_noevict)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
// Add 35 addresses.
CNetAddr source = ResolveIP("252.2.2.2");
@@ -906,7 +907,7 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
// No collision yet.
BOOST_CHECK(addrman.size() == i);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
}
// Collision between 36 and 19.
@@ -915,11 +916,11 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
addrman.Good(addr36);
BOOST_CHECK(addrman.size() == 36);
- BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().ToString(), "250.1.1.19:0");
+ BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().first.ToString(), "250.1.1.19:0");
// 36 should be discarded and 19 not evicted.
addrman.ResolveCollisions();
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
// Lets create two collisions.
for (unsigned int i = 37; i < 59; i++) {
@@ -928,7 +929,7 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
addrman.Good(addr);
BOOST_CHECK(addrman.size() == i);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
}
// Cause a collision.
@@ -937,26 +938,26 @@ BOOST_AUTO_TEST_CASE(addrman_noevict)
addrman.Good(addr59);
BOOST_CHECK(addrman.size() == 59);
- BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().ToString(), "250.1.1.10:0");
+ BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().first.ToString(), "250.1.1.10:0");
// Cause a second collision.
BOOST_CHECK(!addrman.Add({CAddress(addr36, NODE_NONE)}, source));
addrman.Good(addr36);
BOOST_CHECK(addrman.size() == 59);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() != "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() != "[::]:0");
addrman.ResolveCollisions();
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
}
BOOST_AUTO_TEST_CASE(addrman_evictionworks)
{
- CAddrManTest addrman;
+ AddrManTest addrman;
BOOST_CHECK(addrman.size() == 0);
// Empty addrman should return blank addrman info.
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
// Add 35 addresses
CNetAddr source = ResolveIP("252.2.2.2");
@@ -967,7 +968,7 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
// No collision yet.
BOOST_CHECK(addrman.size() == i);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
}
// Collision between 36 and 19.
@@ -976,7 +977,7 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
addrman.Good(addr);
BOOST_CHECK_EQUAL(addrman.size(), 36);
- CAddrInfo info = addrman.SelectTriedCollision();
+ auto info = addrman.SelectTriedCollision().first;
BOOST_CHECK_EQUAL(info.ToString(), "250.1.1.19:0");
// Ensure test of address fails, so that it is evicted.
@@ -984,28 +985,28 @@ BOOST_AUTO_TEST_CASE(addrman_evictionworks)
// Should swap 36 for 19.
addrman.ResolveCollisions();
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
// If 36 was swapped for 19, then this should cause no collisions.
BOOST_CHECK(!addrman.Add({CAddress(addr, NODE_NONE)}, source));
addrman.Good(addr);
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
// If we insert 19 it should collide with 36
CService addr19 = ResolveService("250.1.1.19");
BOOST_CHECK(!addrman.Add({CAddress(addr19, NODE_NONE)}, source));
addrman.Good(addr19);
- BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().ToString(), "250.1.1.36:0");
+ BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().first.ToString(), "250.1.1.36:0");
addrman.ResolveCollisions();
- BOOST_CHECK(addrman.SelectTriedCollision().ToString() == "[::]:0");
+ BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
}
BOOST_AUTO_TEST_CASE(load_addrman)
{
- CAddrManUncorrupted addrmanUncorrupted;
+ AddrManUncorrupted addrmanUncorrupted;
CService addr1, addr2, addr3;
BOOST_CHECK(Lookup("250.7.1.1", addr1, 8333, false));
@@ -1024,7 +1025,7 @@ BOOST_AUTO_TEST_CASE(load_addrman)
// Test that the de-serialization does not throw an exception.
CDataStream ssPeers1 = AddrmanToStream(addrmanUncorrupted);
bool exceptionThrown = false;
- CAddrMan addrman1(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
+ AddrMan addrman1(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
BOOST_CHECK(addrman1.size() == 0);
try {
@@ -1041,7 +1042,7 @@ BOOST_AUTO_TEST_CASE(load_addrman)
// Test that ReadFromStream creates an addrman with the correct number of addrs.
CDataStream ssPeers2 = AddrmanToStream(addrmanUncorrupted);
- CAddrMan addrman2(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
+ AddrMan addrman2(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
BOOST_CHECK(addrman2.size() == 0);
ReadFromStream(addrman2, ssPeers2);
BOOST_CHECK(addrman2.size() == 3);
@@ -1050,12 +1051,12 @@ BOOST_AUTO_TEST_CASE(load_addrman)
BOOST_AUTO_TEST_CASE(load_addrman_corrupted)
{
- CAddrManCorrupted addrmanCorrupted;
+ AddrManCorrupted addrmanCorrupted;
// Test that the de-serialization of corrupted addrman throws an exception.
CDataStream ssPeers1 = AddrmanToStream(addrmanCorrupted);
bool exceptionThrown = false;
- CAddrMan addrman1(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
+ AddrMan addrman1(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
BOOST_CHECK(addrman1.size() == 0);
try {
unsigned char pchMsgTmp[4];
@@ -1071,7 +1072,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted)
// Test that ReadFromStream fails if peers.dat is corrupt
CDataStream ssPeers2 = AddrmanToStream(addrmanCorrupted);
- CAddrMan addrman2(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
+ AddrMan addrman2(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 100);
BOOST_CHECK(addrman2.size() == 0);
BOOST_CHECK_THROW(ReadFromStream(addrman2, ssPeers2), std::ios_base::failure);
}
diff --git a/src/test/fs_tests.cpp b/src/test/fs_tests.cpp
index 526a3c27be..ecb838a7dd 100644
--- a/src/test/fs_tests.cpp
+++ b/src/test/fs_tests.cpp
@@ -11,6 +11,33 @@
BOOST_FIXTURE_TEST_SUITE(fs_tests, BasicTestingSetup)
+BOOST_AUTO_TEST_CASE(fsbridge_pathtostring)
+{
+ std::string u8_str = "fs_tests_₿_🏃";
+ BOOST_CHECK_EQUAL(fs::PathToString(fs::PathFromString(u8_str)), u8_str);
+ BOOST_CHECK_EQUAL(fs::u8path(u8_str).u8string(), u8_str);
+ BOOST_CHECK_EQUAL(fs::PathFromString(u8_str).u8string(), u8_str);
+ BOOST_CHECK_EQUAL(fs::PathToString(fs::u8path(u8_str)), u8_str);
+#ifndef WIN32
+ // On non-windows systems, verify that arbitrary byte strings containing
+ // invalid UTF-8 can be round tripped successfully with PathToString and
+ // PathFromString. On non-windows systems, paths are just byte strings so
+ // these functions do not do any encoding. On windows, paths are Unicode,
+ // and these functions do encoding and decoding, so the behavior of this
+ // test would be undefined.
+ std::string invalid_u8_str = "\xf0";
+ BOOST_CHECK_EQUAL(invalid_u8_str.size(), 1);
+ BOOST_CHECK_EQUAL(fs::PathToString(fs::PathFromString(invalid_u8_str)), invalid_u8_str);
+#endif
+}
+
+BOOST_AUTO_TEST_CASE(fsbridge_stem)
+{
+ std::string test_filename = "fs_tests_₿_🏃.dat";
+ std::string expected_stem = "fs_tests_₿_🏃";
+ BOOST_CHECK_EQUAL(fs::PathToString(fs::PathFromString(test_filename).stem()), expected_stem);
+}
+
BOOST_AUTO_TEST_CASE(fsbridge_fstream)
{
fs::path tmpfolder = m_args.GetDataDirBase();
diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp
index 45ee778b87..8df3707fc9 100644
--- a/src/test/fuzz/addrman.cpp
+++ b/src/test/fuzz/addrman.cpp
@@ -4,8 +4,10 @@
#include <addrdb.h>
#include <addrman.h>
+#include <addrman_impl.h>
#include <chainparams.h>
#include <merkleblock.h>
+#include <random.h>
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
@@ -27,99 +29,93 @@ FUZZ_TARGET_INIT(data_stream_addr_man, initialize_addrman)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
CDataStream data_stream = ConsumeDataStream(fuzzed_data_provider);
- CAddrMan addr_man(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ AddrMan addr_man(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
try {
ReadFromStream(addr_man, data_stream);
} catch (const std::exception&) {
}
}
-class CAddrManDeterministic : public CAddrMan
+/**
+ * Generate a random address. Always returns a valid address.
+ */
+CNetAddr RandAddr(FuzzedDataProvider& fuzzed_data_provider, FastRandomContext& fast_random_context)
{
-public:
- FuzzedDataProvider& m_fuzzed_data_provider;
-
- explicit CAddrManDeterministic(std::vector<bool> asmap, FuzzedDataProvider& fuzzed_data_provider)
- : CAddrMan(std::move(asmap), /* deterministic */ true, /* consistency_check_ratio */ 0)
- , m_fuzzed_data_provider(fuzzed_data_provider)
- {
- WITH_LOCK(cs, insecure_rand = FastRandomContext{ConsumeUInt256(fuzzed_data_provider)});
- }
-
- /**
- * Generate a random address. Always returns a valid address.
- */
- CNetAddr RandAddr() EXCLUSIVE_LOCKS_REQUIRED(cs)
- {
- CNetAddr addr;
- if (m_fuzzed_data_provider.remaining_bytes() > 1 && m_fuzzed_data_provider.ConsumeBool()) {
- addr = ConsumeNetAddr(m_fuzzed_data_provider);
- } else {
- // The networks [1..6] correspond to CNetAddr::BIP155Network (private).
- static const std::map<uint8_t, uint8_t> net_len_map = {{1, ADDR_IPV4_SIZE},
- {2, ADDR_IPV6_SIZE},
- {4, ADDR_TORV3_SIZE},
- {5, ADDR_I2P_SIZE},
- {6, ADDR_CJDNS_SIZE}};
- uint8_t net = insecure_rand.randrange(5) + 1; // [1..5]
- if (net == 3) {
- net = 6;
- }
-
- CDataStream s(SER_NETWORK, PROTOCOL_VERSION | ADDRV2_FORMAT);
-
- s << net;
- s << insecure_rand.randbytes(net_len_map.at(net));
-
- s >> addr;
+ CNetAddr addr;
+ if (fuzzed_data_provider.remaining_bytes() > 1 && fuzzed_data_provider.ConsumeBool()) {
+ addr = ConsumeNetAddr(fuzzed_data_provider);
+ } else {
+ // The networks [1..6] correspond to CNetAddr::BIP155Network (private).
+ static const std::map<uint8_t, uint8_t> net_len_map = {{1, ADDR_IPV4_SIZE},
+ {2, ADDR_IPV6_SIZE},
+ {4, ADDR_TORV3_SIZE},
+ {5, ADDR_I2P_SIZE},
+ {6, ADDR_CJDNS_SIZE}};
+ uint8_t net = fast_random_context.randrange(5) + 1; // [1..5]
+ if (net == 3) {
+ net = 6;
}
- // Return a dummy IPv4 5.5.5.5 if we generated an invalid address.
- if (!addr.IsValid()) {
- in_addr v4_addr = {};
- v4_addr.s_addr = 0x05050505;
- addr = CNetAddr{v4_addr};
- }
-
- return addr;
- }
-
- /**
- * Fill this addrman with lots of addresses from lots of sources.
- */
- void Fill()
- {
- LOCK(cs);
+ CDataStream s(SER_NETWORK, PROTOCOL_VERSION | ADDRV2_FORMAT);
- // Add some of the addresses directly to the "tried" table.
+ s << net;
+ s << fast_random_context.randbytes(net_len_map.at(net));
- // 0, 1, 2, 3 corresponding to 0%, 100%, 50%, 33%
- const size_t n = m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 3);
+ s >> addr;
+ }
- const size_t num_sources = m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(1, 50);
- CNetAddr prev_source;
- // Use insecure_rand inside the loops instead of m_fuzzed_data_provider because when
- // the latter is exhausted it just returns 0.
- for (size_t i = 0; i < num_sources; ++i) {
- const auto source = RandAddr();
- const size_t num_addresses = insecure_rand.randrange(500) + 1; // [1..500]
+ // Return a dummy IPv4 5.5.5.5 if we generated an invalid address.
+ if (!addr.IsValid()) {
+ in_addr v4_addr = {};
+ v4_addr.s_addr = 0x05050505;
+ addr = CNetAddr{v4_addr};
+ }
- for (size_t j = 0; j < num_addresses; ++j) {
- const auto addr = CAddress{CService{RandAddr(), 8333}, NODE_NETWORK};
- const auto time_penalty = insecure_rand.randrange(100000001);
- Add_(addr, source, time_penalty);
+ return addr;
+}
- if (n > 0 && mapInfo.size() % n == 0) {
- Good_(addr, false, GetTime());
- }
+/** Fill addrman with lots of addresses from lots of sources. */
+void FillAddrman(AddrMan& addrman, FuzzedDataProvider& fuzzed_data_provider)
+{
+ // Add a fraction of the addresses to the "tried" table.
+ // 0, 1, 2, 3 corresponding to 0%, 100%, 50%, 33%
+ const size_t n = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 3);
+
+ const size_t num_sources = fuzzed_data_provider.ConsumeIntegralInRange<size_t>(1, 50);
+ CNetAddr prev_source;
+ // Generate a FastRandomContext seed to use inside the loops instead of
+ // fuzzed_data_provider. When fuzzed_data_provider is exhausted it
+ // just returns 0.
+ FastRandomContext fast_random_context{ConsumeUInt256(fuzzed_data_provider)};
+ for (size_t i = 0; i < num_sources; ++i) {
+ const auto source = RandAddr(fuzzed_data_provider, fast_random_context);
+ const size_t num_addresses = fast_random_context.randrange(500) + 1; // [1..500]
+
+ for (size_t j = 0; j < num_addresses; ++j) {
+ const auto addr = CAddress{CService{RandAddr(fuzzed_data_provider, fast_random_context), 8333}, NODE_NETWORK};
+ const auto time_penalty = fast_random_context.randrange(100000001);
+ addrman.Add({addr}, source, time_penalty);
+
+ if (n > 0 && addrman.size() % n == 0) {
+ addrman.Good(addr, GetTime());
+ }
- // Add 10% of the addresses from more than one source.
- if (insecure_rand.randrange(10) == 0 && prev_source.IsValid()) {
- Add_(addr, prev_source, time_penalty);
- }
+ // Add 10% of the addresses from more than one source.
+ if (fast_random_context.randrange(10) == 0 && prev_source.IsValid()) {
+ addrman.Add({addr}, prev_source, time_penalty);
}
- prev_source = source;
}
+ prev_source = source;
+ }
+}
+
+class AddrManDeterministic : public AddrMan
+{
+public:
+ explicit AddrManDeterministic(std::vector<bool> asmap, FuzzedDataProvider& fuzzed_data_provider)
+ : AddrMan(std::move(asmap), /* deterministic */ true, /* consistency_check_ratio */ 0)
+ {
+ WITH_LOCK(m_impl->cs, m_impl->insecure_rand = FastRandomContext{ConsumeUInt256(fuzzed_data_provider)});
}
/**
@@ -129,46 +125,46 @@ public:
* - vvNew entries refer to the same addresses
* - vvTried entries refer to the same addresses
*/
- bool operator==(const CAddrManDeterministic& other)
+ bool operator==(const AddrManDeterministic& other)
{
- LOCK2(cs, other.cs);
+ LOCK2(m_impl->cs, other.m_impl->cs);
- if (mapInfo.size() != other.mapInfo.size() || nNew != other.nNew ||
- nTried != other.nTried) {
+ if (m_impl->mapInfo.size() != other.m_impl->mapInfo.size() || m_impl->nNew != other.m_impl->nNew ||
+ m_impl->nTried != other.m_impl->nTried) {
return false;
}
// Check that all values in `mapInfo` are equal to all values in `other.mapInfo`.
// Keys may be different.
- using CAddrInfoHasher = std::function<size_t(const CAddrInfo&)>;
- using CAddrInfoEq = std::function<bool(const CAddrInfo&, const CAddrInfo&)>;
+ using AddrInfoHasher = std::function<size_t(const AddrInfo&)>;
+ using AddrInfoEq = std::function<bool(const AddrInfo&, const AddrInfo&)>;
CNetAddrHash netaddr_hasher;
- CAddrInfoHasher addrinfo_hasher = [&netaddr_hasher](const CAddrInfo& a) {
+ AddrInfoHasher addrinfo_hasher = [&netaddr_hasher](const AddrInfo& a) {
return netaddr_hasher(static_cast<CNetAddr>(a)) ^ netaddr_hasher(a.source) ^
a.nLastSuccess ^ a.nAttempts ^ a.nRefCount ^ a.fInTried;
};
- CAddrInfoEq addrinfo_eq = [](const CAddrInfo& lhs, const CAddrInfo& rhs) {
+ AddrInfoEq addrinfo_eq = [](const AddrInfo& lhs, const AddrInfo& rhs) {
return static_cast<CNetAddr>(lhs) == static_cast<CNetAddr>(rhs) &&
lhs.source == rhs.source && lhs.nLastSuccess == rhs.nLastSuccess &&
lhs.nAttempts == rhs.nAttempts && lhs.nRefCount == rhs.nRefCount &&
lhs.fInTried == rhs.fInTried;
};
- using Addresses = std::unordered_set<CAddrInfo, CAddrInfoHasher, CAddrInfoEq>;
+ using Addresses = std::unordered_set<AddrInfo, AddrInfoHasher, AddrInfoEq>;
- const size_t num_addresses{mapInfo.size()};
+ const size_t num_addresses{m_impl->mapInfo.size()};
Addresses addresses{num_addresses, addrinfo_hasher, addrinfo_eq};
- for (const auto& [id, addr] : mapInfo) {
+ for (const auto& [id, addr] : m_impl->mapInfo) {
addresses.insert(addr);
}
Addresses other_addresses{num_addresses, addrinfo_hasher, addrinfo_eq};
- for (const auto& [id, addr] : other.mapInfo) {
+ for (const auto& [id, addr] : other.m_impl->mapInfo) {
other_addresses.insert(addr);
}
@@ -176,14 +172,14 @@ public:
return false;
}
- auto IdsReferToSameAddress = [&](int id, int other_id) EXCLUSIVE_LOCKS_REQUIRED(cs, other.cs) {
+ auto IdsReferToSameAddress = [&](int id, int other_id) EXCLUSIVE_LOCKS_REQUIRED(m_impl->cs, other.m_impl->cs) {
if (id == -1 && other_id == -1) {
return true;
}
if ((id == -1 && other_id != -1) || (id != -1 && other_id == -1)) {
return false;
}
- return mapInfo.at(id) == other.mapInfo.at(other_id);
+ return m_impl->mapInfo.at(id) == other.m_impl->mapInfo.at(other_id);
};
// Check that `vvNew` contains the same addresses as `other.vvNew`. Notice - `vvNew[i][j]`
@@ -191,7 +187,7 @@ public:
// themselves may differ between `vvNew` and `other.vvNew`.
for (size_t i = 0; i < ADDRMAN_NEW_BUCKET_COUNT; ++i) {
for (size_t j = 0; j < ADDRMAN_BUCKET_SIZE; ++j) {
- if (!IdsReferToSameAddress(vvNew[i][j], other.vvNew[i][j])) {
+ if (!IdsReferToSameAddress(m_impl->vvNew[i][j], other.m_impl->vvNew[i][j])) {
return false;
}
}
@@ -200,7 +196,7 @@ public:
// Same for `vvTried`.
for (size_t i = 0; i < ADDRMAN_TRIED_BUCKET_COUNT; ++i) {
for (size_t j = 0; j < ADDRMAN_BUCKET_SIZE; ++j) {
- if (!IdsReferToSameAddress(vvTried[i][j], other.vvTried[i][j])) {
+ if (!IdsReferToSameAddress(m_impl->vvTried[i][j], other.m_impl->vvTried[i][j])) {
return false;
}
}
@@ -222,7 +218,7 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman)
FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size());
SetMockTime(ConsumeTime(fuzzed_data_provider));
std::vector<bool> asmap = ConsumeAsmap(fuzzed_data_provider);
- auto addr_man_ptr = std::make_unique<CAddrManDeterministic>(asmap, fuzzed_data_provider);
+ auto addr_man_ptr = std::make_unique<AddrManDeterministic>(asmap, fuzzed_data_provider);
if (fuzzed_data_provider.ConsumeBool()) {
const std::vector<uint8_t> serialized_data{ConsumeRandomLengthByteVector(fuzzed_data_provider)};
CDataStream ds(serialized_data, SER_DISK, INIT_PROTO_VERSION);
@@ -231,10 +227,10 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman)
try {
ds >> *addr_man_ptr;
} catch (const std::ios_base::failure&) {
- addr_man_ptr = std::make_unique<CAddrManDeterministic>(asmap, fuzzed_data_provider);
+ addr_man_ptr = std::make_unique<AddrManDeterministic>(asmap, fuzzed_data_provider);
}
}
- CAddrManDeterministic& addr_man = *addr_man_ptr;
+ AddrManDeterministic& addr_man = *addr_man_ptr;
while (fuzzed_data_provider.ConsumeBool()) {
CallOneOf(
fuzzed_data_provider,
@@ -283,7 +279,7 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman)
}
});
}
- const CAddrMan& const_addr_man{addr_man};
+ const AddrMan& const_addr_man{addr_man};
(void)const_addr_man.GetAddr(
/* max_addresses */ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096),
/* max_pct */ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096),
@@ -301,12 +297,12 @@ FUZZ_TARGET_INIT(addrman_serdeser, initialize_addrman)
SetMockTime(ConsumeTime(fuzzed_data_provider));
std::vector<bool> asmap = ConsumeAsmap(fuzzed_data_provider);
- CAddrManDeterministic addr_man1{asmap, fuzzed_data_provider};
- CAddrManDeterministic addr_man2{asmap, fuzzed_data_provider};
+ AddrManDeterministic addr_man1{asmap, fuzzed_data_provider};
+ AddrManDeterministic addr_man2{asmap, fuzzed_data_provider};
CDataStream data_stream(SER_NETWORK, PROTOCOL_VERSION);
- addr_man1.Fill();
+ FillAddrman(addr_man1, fuzzed_data_provider);
data_stream << addr_man1;
data_stream >> addr_man2;
assert(addr_man1 == addr_man2);
diff --git a/src/test/fuzz/banman.cpp b/src/test/fuzz/banman.cpp
index 561cc83c72..fbba25c404 100644
--- a/src/test/fuzz/banman.cpp
+++ b/src/test/fuzz/banman.cpp
@@ -48,7 +48,7 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
const bool start_with_corrupted_banlist{fuzzed_data_provider.ConsumeBool()};
bool force_read_and_write_to_err{false};
if (start_with_corrupted_banlist) {
- assert(WriteBinaryFile(banlist_file.string() + ".json",
+ assert(WriteBinaryFile(banlist_file + ".json",
fuzzed_data_provider.ConsumeRandomLengthString()));
} else {
force_read_and_write_to_err = fuzzed_data_provider.ConsumeBool();
@@ -111,5 +111,5 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
assert(banmap == banmap_read);
}
}
- fs::remove(banlist_file.string() + ".json");
+ fs::remove(fs::PathToString(banlist_file + ".json"));
}
diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp
index 01741103e4..d381345a0d 100644
--- a/src/test/fuzz/connman.cpp
+++ b/src/test/fuzz/connman.cpp
@@ -25,7 +25,7 @@ FUZZ_TARGET_INIT(connman, initialize_connman)
{
FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()};
SetMockTime(ConsumeTime(fuzzed_data_provider));
- CAddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
CConnman connman{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>(), addrman, fuzzed_data_provider.ConsumeBool()};
CNetAddr random_netaddr;
CNode random_node = ConsumeNode(fuzzed_data_provider);
diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp
index 83ae1680e3..a9325fa738 100644
--- a/src/test/fuzz/deserialize.cpp
+++ b/src/test/fuzz/deserialize.cpp
@@ -4,6 +4,7 @@
#include <addrdb.h>
#include <addrman.h>
+#include <addrman_impl.h>
#include <blockencodings.h>
#include <blockfilter.h>
#include <chain.h>
@@ -104,7 +105,7 @@ FUZZ_TARGET_DESERIALIZE(block_filter_deserialize, {
DeserializeFromFuzzingInput(buffer, block_filter);
})
FUZZ_TARGET_DESERIALIZE(addr_info_deserialize, {
- CAddrInfo addr_info;
+ AddrInfo addr_info;
DeserializeFromFuzzingInput(buffer, addr_info);
})
FUZZ_TARGET_DESERIALIZE(block_file_info_deserialize, {
@@ -188,7 +189,7 @@ FUZZ_TARGET_DESERIALIZE(blockmerkleroot, {
BlockMerkleRoot(block, &mutated);
})
FUZZ_TARGET_DESERIALIZE(addrman_deserialize, {
- CAddrMan am(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ AddrMan am(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
DeserializeFromFuzzingInput(buffer, am);
})
FUZZ_TARGET_DESERIALIZE(blockheader_deserialize, {
diff --git a/src/test/script_parse_tests.cpp b/src/test/script_parse_tests.cpp
new file mode 100644
index 0000000000..004c1a9a84
--- /dev/null
+++ b/src/test/script_parse_tests.cpp
@@ -0,0 +1,55 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#include <core_io.h>
+#include <script/script.h>
+#include <util/strencodings.h>
+#include <test/util/setup_common.h>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(script_parse_tests)
+BOOST_AUTO_TEST_CASE(parse_script)
+{
+ const std::vector<std::pair<std::string,std::string>> IN_OUT{
+ // {IN: script string , OUT: hex string }
+ {"", ""},
+ {"0", "00"},
+ {"1", "51"},
+ {"2", "52"},
+ {"3", "53"},
+ {"4", "54"},
+ {"5", "55"},
+ {"6", "56"},
+ {"7", "57"},
+ {"8", "58"},
+ {"9", "59"},
+ {"10", "5a"},
+ {"11", "5b"},
+ {"12", "5c"},
+ {"13", "5d"},
+ {"14", "5e"},
+ {"15", "5f"},
+ {"16", "60"},
+ {"17", "0111"},
+ {"-9", "0189"},
+ {"0x17", "17"},
+ {"'17'", "023137"},
+ {"ELSE", "67"},
+ {"NOP10", "b9"},
+ };
+ std::string all_in;
+ std::string all_out;
+ for (const auto& [in, out] : IN_OUT) {
+ BOOST_CHECK_EQUAL(HexStr(ParseScript(in)), out);
+ all_in += " " + in + " ";
+ all_out += out;
+ }
+ BOOST_CHECK_EQUAL(HexStr(ParseScript(all_in)), all_out);
+
+ BOOST_CHECK_EXCEPTION(ParseScript("11111111111111111111"), std::runtime_error, HasReason("script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF"));
+ BOOST_CHECK_EXCEPTION(ParseScript("11111111111"), std::runtime_error, HasReason("script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF"));
+ BOOST_CHECK_EXCEPTION(ParseScript("OP_CHECKSIGADD"), std::runtime_error, HasReason("script parse error: unknown opcode"));
+}
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp
index 340ce33d91..15cba9e3e5 100644
--- a/src/test/settings_tests.cpp
+++ b/src/test/settings_tests.cpp
@@ -80,19 +80,19 @@ BOOST_AUTO_TEST_CASE(ReadWrite)
"dupe": "dupe"
})");
BOOST_CHECK(!util::ReadSettings(path, values, errors));
- std::vector<std::string> dup_keys = {strprintf("Found duplicate key dupe in settings file %s", path.string())};
+ std::vector<std::string> dup_keys = {strprintf("Found duplicate key dupe in settings file %s", fs::PathToString(path))};
BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), dup_keys.begin(), dup_keys.end());
// Check non-kv json files not allowed
WriteText(path, R"("non-kv")");
BOOST_CHECK(!util::ReadSettings(path, values, errors));
- std::vector<std::string> non_kv = {strprintf("Found non-object value \"non-kv\" in settings file %s", path.string())};
+ std::vector<std::string> non_kv = {strprintf("Found non-object value \"non-kv\" in settings file %s", fs::PathToString(path))};
BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), non_kv.begin(), non_kv.end());
// Check invalid json not allowed
WriteText(path, R"(invalid json)");
BOOST_CHECK(!util::ReadSettings(path, values, errors));
- std::vector<std::string> fail_parse = {strprintf("Unable to parse settings file %s", path.string())};
+ std::vector<std::string> fail_parse = {strprintf("Unable to parse settings file %s", fs::PathToString(path))};
BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), fail_parse.begin(), fail_parse.end());
}
diff --git a/src/test/util/chainstate.h b/src/test/util/chainstate.h
index 81ea4c38f5..e95573022c 100644
--- a/src/test/util/chainstate.h
+++ b/src/test/util/chainstate.h
@@ -36,7 +36,7 @@ CreateAndActivateUTXOSnapshot(NodeContext& node, const fs::path root, F malleati
UniValue result = CreateUTXOSnapshot(node, node.chainman->ActiveChainstate(), auto_outfile);
BOOST_TEST_MESSAGE(
- "Wrote UTXO snapshot to " << snapshot_path.make_preferred().string() << ": " << result.write());
+ "Wrote UTXO snapshot to " << fs::PathToString(snapshot_path.make_preferred()) << ": " << result.write());
// Read the written snapshot in and then activate it.
//
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 97e614379c..a3c7564d76 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -91,8 +91,8 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
extra_args);
util::ThreadRename("test");
fs::create_directories(m_path_root);
- m_args.ForceSetArg("-datadir", m_path_root.string());
- gArgs.ForceSetArg("-datadir", m_path_root.string());
+ m_args.ForceSetArg("-datadir", fs::PathToString(m_path_root));
+ gArgs.ForceSetArg("-datadir", fs::PathToString(m_path_root));
gArgs.ClearPathCache();
{
SetupServerArgs(*m_node.args);
@@ -192,7 +192,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
}
- m_node.addrman = std::make_unique<CAddrMan>(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
+ m_node.addrman = std::make_unique<AddrMan>(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0);
m_node.banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME);
m_node.connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman); // Deterministic randomness for tests.
m_node.peerman = PeerManager::make(chainparams, *m_node.connman, *m_node.addrman,
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 51707310a2..b1300d06ba 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -51,23 +51,23 @@ BOOST_AUTO_TEST_CASE(util_datadir)
{
// Use local args variable instead of m_args to avoid making assumptions about test setup
ArgsManager args;
- args.ForceSetArg("-datadir", m_path_root.string());
+ args.ForceSetArg("-datadir", fs::PathToString(m_path_root));
const fs::path dd_norm = args.GetDataDirBase();
- args.ForceSetArg("-datadir", dd_norm.string() + "/");
+ args.ForceSetArg("-datadir", fs::PathToString(dd_norm) + "/");
args.ClearPathCache();
BOOST_CHECK_EQUAL(dd_norm, args.GetDataDirBase());
- args.ForceSetArg("-datadir", dd_norm.string() + "/.");
+ args.ForceSetArg("-datadir", fs::PathToString(dd_norm) + "/.");
args.ClearPathCache();
BOOST_CHECK_EQUAL(dd_norm, args.GetDataDirBase());
- args.ForceSetArg("-datadir", dd_norm.string() + "/./");
+ args.ForceSetArg("-datadir", fs::PathToString(dd_norm) + "/./");
args.ClearPathCache();
BOOST_CHECK_EQUAL(dd_norm, args.GetDataDirBase());
- args.ForceSetArg("-datadir", dd_norm.string() + "/.//");
+ args.ForceSetArg("-datadir", fs::PathToString(dd_norm) + "/.//");
args.ClearPathCache();
BOOST_CHECK_EQUAL(dd_norm, args.GetDataDirBase());
}
@@ -1181,13 +1181,13 @@ BOOST_AUTO_TEST_CASE(util_ReadWriteSettings)
{
// Test writing setting.
TestArgsManager args1;
- args1.ForceSetArg("-datadir", m_path_root.string());
+ args1.ForceSetArg("-datadir", fs::PathToString(m_path_root));
args1.LockSettings([&](util::Settings& settings) { settings.rw_settings["name"] = "value"; });
args1.WriteSettingsFile();
// Test reading setting.
TestArgsManager args2;
- args2.ForceSetArg("-datadir", m_path_root.string());
+ args2.ForceSetArg("-datadir", fs::PathToString(m_path_root));
args2.ReadSettingsFile();
args2.LockSettings([&](util::Settings& settings) { BOOST_CHECK_EQUAL(settings.rw_settings["name"].get_str(), "value"); });
@@ -1239,6 +1239,11 @@ BOOST_AUTO_TEST_CASE(util_FormatMoney)
BOOST_AUTO_TEST_CASE(util_ParseMoney)
{
BOOST_CHECK_EQUAL(ParseMoney("0.0").value(), 0);
+ BOOST_CHECK_EQUAL(ParseMoney(".").value(), 0);
+ BOOST_CHECK_EQUAL(ParseMoney("0.").value(), 0);
+ BOOST_CHECK_EQUAL(ParseMoney(".0").value(), 0);
+ BOOST_CHECK_EQUAL(ParseMoney(".6789").value(), 6789'0000);
+ BOOST_CHECK_EQUAL(ParseMoney("12345.").value(), COIN * 12345);
BOOST_CHECK_EQUAL(ParseMoney("12345.6789").value(), (COIN/10000)*123456789);
@@ -1276,11 +1281,18 @@ BOOST_AUTO_TEST_CASE(util_ParseMoney)
BOOST_CHECK(!ParseMoney(" "));
// Parsing two numbers should fail
+ BOOST_CHECK(!ParseMoney(".."));
+ BOOST_CHECK(!ParseMoney("0..0"));
BOOST_CHECK(!ParseMoney("1 2"));
BOOST_CHECK(!ParseMoney(" 1 2 "));
BOOST_CHECK(!ParseMoney(" 1.2 3 "));
BOOST_CHECK(!ParseMoney(" 1 2.3 "));
+ // Embedded whitespace should fail
+ BOOST_CHECK(!ParseMoney(" -1 .2 "));
+ BOOST_CHECK(!ParseMoney(" 1 .2 "));
+ BOOST_CHECK(!ParseMoney(" +1 .2 "));
+
// Attempted 63 bit overflow should fail
BOOST_CHECK(!ParseMoney("92233720368.54775808"));
diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp
index 726c9ebbb8..9bb08f774f 100644
--- a/src/test/validation_chainstate_tests.cpp
+++ b/src/test/validation_chainstate_tests.cpp
@@ -107,20 +107,21 @@ BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup)
curr_tip = ::g_best_block;
- CChainState* background_cs;
-
BOOST_CHECK_EQUAL(chainman.GetAll().size(), 2);
- for (CChainState* cs : chainman.GetAll()) {
- if (cs != &chainman.ActiveChainstate()) {
- background_cs = cs;
+
+ CChainState& background_cs{*[&] {
+ for (CChainState* cs : chainman.GetAll()) {
+ if (cs != &chainman.ActiveChainstate()) {
+ return cs;
+ }
}
- }
- BOOST_CHECK(background_cs);
+ assert(false);
+ }()};
// Create a block to append to the validation chain.
std::vector<CMutableTransaction> noTxns;
CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG;
- CBlock validation_block = this->CreateBlock(noTxns, scriptPubKey, *background_cs);
+ CBlock validation_block = this->CreateBlock(noTxns, scriptPubKey, background_cs);
auto pblock = std::make_shared<const CBlock>(validation_block);
BlockValidationState state;
CBlockIndex* pindex = nullptr;
@@ -133,15 +134,15 @@ BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup)
LOCK(::cs_main);
bool checked = CheckBlock(*pblock, state, chainparams.GetConsensus());
BOOST_CHECK(checked);
- bool accepted = background_cs->AcceptBlock(
+ bool accepted = background_cs.AcceptBlock(
pblock, state, &pindex, true, nullptr, &newblock);
BOOST_CHECK(accepted);
}
// UpdateTip is called here
- bool block_added = background_cs->ActivateBestChain(state, pblock);
+ bool block_added = background_cs.ActivateBestChain(state, pblock);
// Ensure tip is as expected
- BOOST_CHECK_EQUAL(background_cs->m_chain.Tip()->GetBlockHash(), validation_block.GetHash());
+ BOOST_CHECK_EQUAL(background_cs.m_chain.Tip()->GetBlockHash(), validation_block.GetHash());
// g_best_block should be unchanged after adding a block to the background
// validation chain.
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index f9caf48df8..55618a5c57 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -318,7 +318,7 @@ TorController::TorController(struct event_base* _base, const std::string& tor_co
// Read service private key if cached
std::pair<bool,std::string> pkf = ReadBinaryFile(GetPrivateKeyFile());
if (pkf.first) {
- LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", GetPrivateKeyFile().string());
+ LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", fs::PathToString(GetPrivateKeyFile()));
private_key = pkf.second;
}
}
@@ -356,9 +356,9 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
service = LookupNumeric(std::string(service_id+".onion"), Params().GetDefaultPort());
LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString());
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
- LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile().string());
+ LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", fs::PathToString(GetPrivateKeyFile()));
} else {
- LogPrintf("tor: Error writing service private key to %s\n", GetPrivateKeyFile().string());
+ LogPrintf("tor: Error writing service private key to %s\n", fs::PathToString(GetPrivateKeyFile()));
}
AddLocal(service, LOCAL_MANUAL);
// ... onion requested - keep connection open
@@ -508,7 +508,7 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro
} else if (methods.count("SAFECOOKIE")) {
// Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie
LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile);
- std::pair<bool,std::string> status_cookie = ReadBinaryFile(cookiefile, TOR_COOKIE_SIZE);
+ std::pair<bool,std::string> status_cookie = ReadBinaryFile(fs::PathFromString(cookiefile), TOR_COOKIE_SIZE);
if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) {
// _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2));
cookie = std::vector<uint8_t>(status_cookie.second.begin(), status_cookie.second.end());
diff --git a/src/util/asmap.cpp b/src/util/asmap.cpp
index 5695c62012..b696c65e9d 100644
--- a/src/util/asmap.cpp
+++ b/src/util/asmap.cpp
@@ -201,7 +201,7 @@ std::vector<bool> DecodeAsmap(fs::path path)
}
fseek(filestr, 0, SEEK_END);
int length = ftell(filestr);
- LogPrintf("Opened asmap file %s (%d bytes) from disk\n", path, length);
+ LogPrintf("Opened asmap file %s (%d bytes) from disk\n", fs::quoted(fs::PathToString(path)), length);
fseek(filestr, 0, SEEK_SET);
uint8_t cur_byte;
for (int i = 0; i < length; ++i) {
@@ -211,7 +211,7 @@ std::vector<bool> DecodeAsmap(fs::path path)
}
}
if (!SanityCheckASMap(bits, 128)) {
- LogPrintf("Sanity check of asmap file %s failed\n", path);
+ LogPrintf("Sanity check of asmap file %s failed\n", fs::quoted(fs::PathToString(path)));
return {};
}
return bits;
diff --git a/src/util/settings.cpp b/src/util/settings.cpp
index 846b34089d..7fb35c073e 100644
--- a/src/util/settings.cpp
+++ b/src/util/settings.cpp
@@ -66,24 +66,24 @@ bool ReadSettings(const fs::path& path, std::map<std::string, SettingsValue>& va
fsbridge::ifstream file;
file.open(path);
if (!file.is_open()) {
- errors.emplace_back(strprintf("%s. Please check permissions.", path.string()));
+ errors.emplace_back(strprintf("%s. Please check permissions.", fs::PathToString(path)));
return false;
}
SettingsValue in;
if (!in.read(std::string{std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()})) {
- errors.emplace_back(strprintf("Unable to parse settings file %s", path.string()));
+ errors.emplace_back(strprintf("Unable to parse settings file %s", fs::PathToString(path)));
return false;
}
if (file.fail()) {
- errors.emplace_back(strprintf("Failed reading settings file %s", path.string()));
+ errors.emplace_back(strprintf("Failed reading settings file %s", fs::PathToString(path)));
return false;
}
file.close(); // Done with file descriptor. Release while copying data.
if (!in.isObject()) {
- errors.emplace_back(strprintf("Found non-object value %s in settings file %s", in.write(), path.string()));
+ errors.emplace_back(strprintf("Found non-object value %s in settings file %s", in.write(), fs::PathToString(path)));
return false;
}
@@ -92,7 +92,7 @@ bool ReadSettings(const fs::path& path, std::map<std::string, SettingsValue>& va
for (size_t i = 0; i < in_keys.size(); ++i) {
auto inserted = values.emplace(in_keys[i], in_values[i]);
if (!inserted.second) {
- errors.emplace_back(strprintf("Found duplicate key %s in settings file %s", in_keys[i], path.string()));
+ errors.emplace_back(strprintf("Found duplicate key %s in settings file %s", in_keys[i], fs::PathToString(path)));
}
}
return errors.empty();
@@ -109,7 +109,7 @@ bool WriteSettings(const fs::path& path,
fsbridge::ofstream file;
file.open(path);
if (file.fail()) {
- errors.emplace_back(strprintf("Error: Unable to open settings file %s for writing", path.string()));
+ errors.emplace_back(strprintf("Error: Unable to open settings file %s for writing", fs::PathToString(path)));
return false;
}
file << out.write(/* prettyIndent= */ 1, /* indentLevel= */ 4) << std::endl;
diff --git a/src/util/strencodings.h b/src/util/strencodings.h
index 1f7762aeef..eedb5ec2f8 100644
--- a/src/util/strencodings.h
+++ b/src/util/strencodings.h
@@ -72,7 +72,7 @@ void SplitHostPort(std::string in, uint16_t& portOut, std::string& hostOut);
// LocaleIndependentAtoi is provided for backwards compatibility reasons.
//
-// New code should use the ParseInt64/ParseUInt64/ParseInt32/ParseUInt32 functions
+// New code should use ToIntegral or the ParseInt* functions
// which provide parse error feedback.
//
// The goal of LocaleIndependentAtoi is to replicate the exact defined behaviour
@@ -125,7 +125,7 @@ constexpr inline bool IsSpace(char c) noexcept {
/**
* Convert string to integral type T. Leading whitespace, a leading +, or any
* trailing character fail the parsing. The required format expressed as regex
- * is `-?[0-9]+`.
+ * is `-?[0-9]+`. The minus sign is only permitted for signed integer types.
*
* @returns std::nullopt if the entire string could not be parsed, or if the
* parsed value is not in the range representable by the type T.
diff --git a/src/util/syscall_sandbox.cpp b/src/util/syscall_sandbox.cpp
index c4006cbd3c..b361b09568 100644
--- a/src/util/syscall_sandbox.cpp
+++ b/src/util/syscall_sandbox.cpp
@@ -40,6 +40,34 @@ bool g_syscall_sandbox_log_violation_before_terminating{false};
#error Syscall sandbox is an experimental feature currently available only under Linux x86-64.
#endif // defined(__x86_64__)
+#ifndef SECCOMP_RET_KILL_PROCESS
+#define SECCOMP_RET_KILL_PROCESS 0x80000000U
+#endif
+
+// Define system call numbers for x86_64 that are referenced in the system call profile
+// but not provided by the kernel headers used in the GUIX build.
+// Usually, they can be found via "grep name /usr/include/x86_64-linux-gnu/asm/unistd_64.h"
+
+#ifndef __NR_clone3
+#define __NR_clone3 435
+#endif
+
+#ifndef __NR_statx
+#define __NR_statx 332
+#endif
+
+#ifndef __NR_getrandom
+#define __NR_getrandom 318
+#endif
+
+#ifndef __NR_membarrier
+#define __NR_membarrier 324
+#endif
+
+#ifndef __NR_copy_file_range
+#define __NR_copy_file_range 326
+#endif
+
// This list of syscalls in LINUX_SYSCALLS is only used to map syscall numbers to syscall names in
// order to be able to print user friendly error messages which include the syscall name in addition
// to the syscall number.
@@ -93,6 +121,7 @@ const std::map<uint32_t, std::string> LINUX_SYSCALLS{
{__NR_clock_nanosleep, "clock_nanosleep"},
{__NR_clock_settime, "clock_settime"},
{__NR_clone, "clone"},
+ {__NR_clone3, "clone3"},
{__NR_close, "close"},
{__NR_connect, "connect"},
{__NR_copy_file_range, "copy_file_range"},
@@ -158,9 +187,7 @@ const std::map<uint32_t, std::string> LINUX_SYSCALLS{
{__NR_getpmsg, "getpmsg"},
{__NR_getppid, "getppid"},
{__NR_getpriority, "getpriority"},
-#if defined(__NR_getrandom)
{__NR_getrandom, "getrandom"},
-#endif // defined(__NR_getrandom)
{__NR_getresgid, "getresgid"},
{__NR_getresuid, "getresuid"},
{__NR_getrlimit, "getrlimit"},
@@ -208,9 +235,7 @@ const std::map<uint32_t, std::string> LINUX_SYSCALLS{
{__NR_lstat, "lstat"},
{__NR_madvise, "madvise"},
{__NR_mbind, "mbind"},
-#if defined(__NR_membarrier)
{__NR_membarrier, "membarrier"},
-#endif // defined(__NR_membarrier)
{__NR_memfd_create, "memfd_create"},
{__NR_migrate_pages, "migrate_pages"},
{__NR_mincore, "mincore"},
@@ -254,15 +279,23 @@ const std::map<uint32_t, std::string> LINUX_SYSCALLS{
{__NR_pipe, "pipe"},
{__NR_pipe2, "pipe2"},
{__NR_pivot_root, "pivot_root"},
+#ifdef __NR_pkey_alloc
{__NR_pkey_alloc, "pkey_alloc"},
+#endif
+#ifdef __NR_pkey_free
{__NR_pkey_free, "pkey_free"},
+#endif
+#ifdef __NR_pkey_mprotect
{__NR_pkey_mprotect, "pkey_mprotect"},
+#endif
{__NR_poll, "poll"},
{__NR_ppoll, "ppoll"},
{__NR_prctl, "prctl"},
{__NR_pread64, "pread64"},
{__NR_preadv, "preadv"},
+#ifdef __NR_preadv2
{__NR_preadv2, "preadv2"},
+#endif
{__NR_prlimit64, "prlimit64"},
{__NR_process_vm_readv, "process_vm_readv"},
{__NR_process_vm_writev, "process_vm_writev"},
@@ -271,7 +304,9 @@ const std::map<uint32_t, std::string> LINUX_SYSCALLS{
{__NR_putpmsg, "putpmsg"},
{__NR_pwrite64, "pwrite64"},
{__NR_pwritev, "pwritev"},
+#ifdef __NR_pwritev2
{__NR_pwritev2, "pwritev2"},
+#endif
{__NR_query_module, "query_module"},
{__NR_quotactl, "quotactl"},
{__NR_read, "read"},
@@ -509,16 +544,16 @@ public:
void AllowAddressSpaceAccess()
{
- allowed_syscalls.insert(__NR_brk); // change data segment size
- allowed_syscalls.insert(__NR_madvise); // give advice about use of memory
-#if defined(__NR_membarrier)
+ allowed_syscalls.insert(__NR_brk); // change data segment size
+ allowed_syscalls.insert(__NR_madvise); // give advice about use of memory
allowed_syscalls.insert(__NR_membarrier); // issue memory barriers on a set of threads
-#endif // defined(__NR_membarrier)
- allowed_syscalls.insert(__NR_mlock); // lock memory
- allowed_syscalls.insert(__NR_mmap); // map files or devices into memory
- allowed_syscalls.insert(__NR_mprotect); // set protection on a region of memory
- allowed_syscalls.insert(__NR_munlock); // unlock memory
- allowed_syscalls.insert(__NR_munmap); // unmap files or devices into memory
+ allowed_syscalls.insert(__NR_mincore); // check if virtual memory is in RAM
+ allowed_syscalls.insert(__NR_mlock); // lock memory
+ allowed_syscalls.insert(__NR_mmap); // map files or devices into memory
+ allowed_syscalls.insert(__NR_mprotect); // set protection on a region of memory
+ allowed_syscalls.insert(__NR_mremap); // remap a file in memory
+ allowed_syscalls.insert(__NR_munlock); // unlock memory
+ allowed_syscalls.insert(__NR_munmap); // unmap files or devices into memory
}
void AllowEpoll()
@@ -536,31 +571,33 @@ public:
void AllowFileSystem()
{
- allowed_syscalls.insert(__NR_access); // check user's permissions for a file
- allowed_syscalls.insert(__NR_chdir); // change working directory
- allowed_syscalls.insert(__NR_chmod); // change permissions of a file
- allowed_syscalls.insert(__NR_fallocate); // manipulate file space
- allowed_syscalls.insert(__NR_fchmod); // change permissions of a file
- allowed_syscalls.insert(__NR_fchown); // change ownership of a file
- allowed_syscalls.insert(__NR_fdatasync); // synchronize a file's in-core state with storage device
- allowed_syscalls.insert(__NR_flock); // apply or remove an advisory lock on an open file
- allowed_syscalls.insert(__NR_fstat); // get file status
- allowed_syscalls.insert(__NR_fsync); // synchronize a file's in-core state with storage device
- allowed_syscalls.insert(__NR_ftruncate); // truncate a file to a specified length
- allowed_syscalls.insert(__NR_getcwd); // get current working directory
- allowed_syscalls.insert(__NR_getdents); // get directory entries
- allowed_syscalls.insert(__NR_getdents64); // get directory entries
- allowed_syscalls.insert(__NR_lstat); // get file status
- allowed_syscalls.insert(__NR_mkdir); // create a directory
- allowed_syscalls.insert(__NR_open); // open and possibly create a file
- allowed_syscalls.insert(__NR_openat); // open and possibly create a file
- allowed_syscalls.insert(__NR_readlink); // read value of a symbolic link
- allowed_syscalls.insert(__NR_rename); // change the name or location of a file
- allowed_syscalls.insert(__NR_rmdir); // delete a directory
- allowed_syscalls.insert(__NR_stat); // get file status
- allowed_syscalls.insert(__NR_statfs); // get filesystem statistics
- allowed_syscalls.insert(__NR_statx); // get file status (extended)
- allowed_syscalls.insert(__NR_unlink); // delete a name and possibly the file it refers to
+ allowed_syscalls.insert(__NR_access); // check user's permissions for a file
+ allowed_syscalls.insert(__NR_chdir); // change working directory
+ allowed_syscalls.insert(__NR_chmod); // change permissions of a file
+ allowed_syscalls.insert(__NR_copy_file_range); // copy a range of data from one file to another
+ allowed_syscalls.insert(__NR_fallocate); // manipulate file space
+ allowed_syscalls.insert(__NR_fchmod); // change permissions of a file
+ allowed_syscalls.insert(__NR_fchown); // change ownership of a file
+ allowed_syscalls.insert(__NR_fdatasync); // synchronize a file's in-core state with storage device
+ allowed_syscalls.insert(__NR_flock); // apply or remove an advisory lock on an open file
+ allowed_syscalls.insert(__NR_fstat); // get file status
+ allowed_syscalls.insert(__NR_newfstatat); // get file status
+ allowed_syscalls.insert(__NR_fsync); // synchronize a file's in-core state with storage device
+ allowed_syscalls.insert(__NR_ftruncate); // truncate a file to a specified length
+ allowed_syscalls.insert(__NR_getcwd); // get current working directory
+ allowed_syscalls.insert(__NR_getdents); // get directory entries
+ allowed_syscalls.insert(__NR_getdents64); // get directory entries
+ allowed_syscalls.insert(__NR_lstat); // get file status
+ allowed_syscalls.insert(__NR_mkdir); // create a directory
+ allowed_syscalls.insert(__NR_open); // open and possibly create a file
+ allowed_syscalls.insert(__NR_openat); // open and possibly create a file
+ allowed_syscalls.insert(__NR_readlink); // read value of a symbolic link
+ allowed_syscalls.insert(__NR_rename); // change the name or location of a file
+ allowed_syscalls.insert(__NR_rmdir); // delete a directory
+ allowed_syscalls.insert(__NR_stat); // get file status
+ allowed_syscalls.insert(__NR_statfs); // get filesystem statistics
+ allowed_syscalls.insert(__NR_statx); // get file status (extended)
+ allowed_syscalls.insert(__NR_unlink); // delete a name and possibly the file it refers to
}
void AllowFutex()
@@ -591,9 +628,7 @@ public:
void AllowGetRandom()
{
-#if defined(__NR_getrandom)
allowed_syscalls.insert(__NR_getrandom); // obtain a series of random bytes
-#endif // defined(__NR_getrandom)
}
void AllowGetSimpleId()
@@ -678,6 +713,7 @@ public:
void AllowProcessStartOrDeath()
{
allowed_syscalls.insert(__NR_clone); // create a child process
+ allowed_syscalls.insert(__NR_clone3); // create a child process
allowed_syscalls.insert(__NR_exit); // terminate the calling process
allowed_syscalls.insert(__NR_exit_group); // exit all threads in a process
allowed_syscalls.insert(__NR_fork); // create a child process
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 79c08816fa..12d7dc49b2 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -98,7 +98,7 @@ bool LockDirectory(const fs::path& directory, const std::string lockfile_name, b
fs::path pathLockFile = directory / lockfile_name;
// If a lock for this directory already exists in the map, don't try to re-lock it
- if (dir_locks.count(pathLockFile.string())) {
+ if (dir_locks.count(fs::PathToString(pathLockFile))) {
return true;
}
@@ -107,11 +107,11 @@ bool LockDirectory(const fs::path& directory, const std::string lockfile_name, b
if (file) fclose(file);
auto lock = std::make_unique<fsbridge::FileLock>(pathLockFile);
if (!lock->TryLock()) {
- return error("Error while attempting to lock directory %s: %s", directory.string(), lock->GetReason());
+ return error("Error while attempting to lock directory %s: %s", fs::PathToString(directory), lock->GetReason());
}
if (!probe_only) {
// Lock successful and we're not just probing, put it into the map
- dir_locks.emplace(pathLockFile.string(), std::move(lock));
+ dir_locks.emplace(fs::PathToString(pathLockFile), std::move(lock));
}
return true;
}
@@ -119,7 +119,7 @@ bool LockDirectory(const fs::path& directory, const std::string lockfile_name, b
void UnlockDirectory(const fs::path& directory, const std::string& lockfile_name)
{
LOCK(cs_dir_locks);
- dir_locks.erase((directory / lockfile_name).string());
+ dir_locks.erase(fs::PathToString(directory / lockfile_name));
}
void ReleaseDirectoryLocks()
@@ -242,7 +242,7 @@ namespace {
fs::path StripRedundantLastElementsOfPath(const fs::path& path)
{
auto result = path;
- while (result.filename().string() == ".") {
+ while (fs::PathToString(result.filename()) == ".") {
result = result.parent_path();
}
@@ -402,7 +402,7 @@ const fs::path& ArgsManager::GetBlocksDirPath() const
if (!path.empty()) return path;
if (IsArgSet("-blocksdir")) {
- path = fs::system_complete(GetArg("-blocksdir", ""));
+ path = fs::system_complete(fs::PathFromString(GetArg("-blocksdir", "")));
if (!fs::is_directory(path)) {
path = "";
return path;
@@ -411,7 +411,7 @@ const fs::path& ArgsManager::GetBlocksDirPath() const
path = GetDataDirBase();
}
- path /= BaseParams().DataDir();
+ path /= fs::PathFromString(BaseParams().DataDir());
path /= "blocks";
fs::create_directories(path);
path = StripRedundantLastElementsOfPath(path);
@@ -429,7 +429,7 @@ const fs::path& ArgsManager::GetDataDir(bool net_specific) const
std::string datadir = GetArg("-datadir", "");
if (!datadir.empty()) {
- path = fs::system_complete(datadir);
+ path = fs::system_complete(fs::PathFromString(datadir));
if (!fs::is_directory(path)) {
path = "";
return path;
@@ -438,7 +438,7 @@ const fs::path& ArgsManager::GetDataDir(bool net_specific) const
path = GetDefaultDataDir();
}
if (net_specific)
- path /= BaseParams().DataDir();
+ path /= fs::PathFromString(BaseParams().DataDir());
if (fs::create_directories(path)) {
// This is the first run, create wallets subdirectory too
@@ -517,7 +517,7 @@ bool ArgsManager::GetSettingsPath(fs::path* filepath, bool temp) const
}
if (filepath) {
std::string settings = GetArg("-settings", BITCOIN_SETTINGS_FILENAME);
- *filepath = fsbridge::AbsPathJoin(GetDataDirNet(), temp ? settings + ".tmp" : settings);
+ *filepath = fsbridge::AbsPathJoin(GetDataDirNet(), fs::PathFromString(temp ? settings + ".tmp" : settings));
}
return true;
}
@@ -572,7 +572,7 @@ bool ArgsManager::WriteSettingsFile(std::vector<std::string>* errors) const
return false;
}
if (!RenameOver(path_tmp, path)) {
- SaveErrors({strprintf("Failed renaming settings file %s to %s\n", path_tmp.string(), path.string())}, errors);
+ SaveErrors({strprintf("Failed renaming settings file %s to %s\n", fs::PathToString(path_tmp), fs::PathToString(path))}, errors);
return false;
}
return true;
@@ -809,12 +809,12 @@ fs::path GetDefaultDataDir()
bool CheckDataDirOption()
{
std::string datadir = gArgs.GetArg("-datadir", "");
- return datadir.empty() || fs::is_directory(fs::system_complete(datadir));
+ return datadir.empty() || fs::is_directory(fs::system_complete(fs::PathFromString(datadir)));
}
fs::path GetConfigFile(const std::string& confPath)
{
- return AbsPathForConfigVal(fs::path(confPath), false);
+ return AbsPathForConfigVal(fs::PathFromString(confPath), false);
}
static bool GetConfigOptions(std::istream& stream, const std::string& filepath, std::string& error, std::vector<std::pair<std::string, std::string>>& options, std::list<SectionInfo>& sections)
@@ -1065,7 +1065,7 @@ bool RenameOver(fs::path src, fs::path dest)
return MoveFileExW(src.wstring().c_str(), dest.wstring().c_str(),
MOVEFILE_REPLACE_EXISTING) != 0;
#else
- int rc = std::rename(src.string().c_str(), dest.string().c_str());
+ int rc = std::rename(src.c_str(), dest.c_str());
return (rc == 0);
#endif /* WIN32 */
}
diff --git a/src/validation.cpp b/src/validation.cpp
index 880a01eb7d..78559a8ee6 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -474,7 +474,6 @@ private:
std::unique_ptr<CTxMemPoolEntry> m_entry;
std::list<CTransactionRef> m_replaced_transactions;
- bool m_replacement_transaction;
CAmount m_base_fees;
CAmount m_modified_fees;
/** Total modified fees of all transactions being replaced. */
@@ -556,7 +555,6 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting;
CTxMemPool::setEntries& setAncestors = ws.m_ancestors;
std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
- bool& fReplacementTransaction = ws.m_replacement_transaction;
CAmount& nModifiedFees = ws.m_modified_fees;
CAmount& nConflictingFees = ws.m_conflicting_fees;
size_t& nConflictingSize = ws.m_conflicting_size;
@@ -779,8 +777,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
}
- fReplacementTransaction = setConflicts.size();
- if (fReplacementTransaction) {
+ if (!setConflicts.empty()) {
CFeeRate newFeeRate(nModifiedFees, nSize);
// It's possible that the replacement pays more fees than its direct conflicts but not more
// than all conflicts (i.e. the direct conflicts have high-fee descendants). However, if the
@@ -885,7 +882,6 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
const CAmount& nModifiedFees = ws.m_modified_fees;
const CAmount& nConflictingFees = ws.m_conflicting_fees;
const size_t& nConflictingSize = ws.m_conflicting_size;
- const bool fReplacementTransaction = ws.m_replacement_transaction;
std::unique_ptr<CTxMemPoolEntry>& entry = ws.m_entry;
// Remove conflicting transactions from the mempool
@@ -901,11 +897,10 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws)
m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED);
// This transaction should only count for fee estimation if:
- // - it isn't a BIP 125 replacement transaction (may not be widely supported)
// - it's not being re-added during a reorg which bypasses typical mempool fee limits
// - the node is not behind
// - the transaction is not dependent on any other transactions in the mempool
- bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
+ bool validForFeeEstimation = !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx);
// Store transaction in memory
m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation);
@@ -1882,14 +1877,13 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
- TRACE7(validation, block_connected,
- block.GetHash().ToString().c_str(),
+ TRACE6(validation, block_connected,
+ block.GetHash().data(),
pindex->nHeight,
block.vtx.size(),
nInputs,
nSigOpsCost,
- GetTimeMicros() - nTimeStart, // in microseconds (µs)
- block.GetHash().data()
+ GetTimeMicros() - nTimeStart // in microseconds (µs)
);
return true;
@@ -2034,8 +2028,8 @@ bool CChainState::FlushStateToDisk(
}
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
- LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)",
- coins_count, coins_mem_usage / 1000));
+ LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fkB)",
+ coins_count, coins_mem_usage / 1000), BCLog::BENCH);
// Typical Coin structures on disk are around 48 bytes in size.
// Pushing a new one to the database can cause it to be written
@@ -3210,7 +3204,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
if (ppindex)
*ppindex = pindex;
if (pindex->nStatus & BLOCK_FAILED_MASK) {
- LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
+ LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n", __func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
}
return true;
@@ -3225,16 +3219,18 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
CBlockIndex* pindexPrev = nullptr;
BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
if (mi == m_block_index.end()) {
- LogPrintf("ERROR: %s: prev block not found\n", __func__);
+ LogPrint(BCLog::VALIDATION, "%s: %s prev block not found\n", __func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
}
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
- LogPrintf("ERROR: %s: prev block invalid\n", __func__);
+ LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
- if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime()))
- return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
+ if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime())) {
+ LogPrint(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
+ return false;
+ }
/* Determine if this block descends from any block which has been found
* invalid (m_failed_blocks), then mark pindexPrev and any blocks between
@@ -3269,7 +3265,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
setDirtyBlockIndex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
- LogPrintf("ERROR: %s: prev block invalid\n", __func__);
+ LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
}
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 2eb4d3106c..2290e119fd 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -61,9 +61,9 @@ bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId& rhs) const
std::shared_ptr<BerkeleyEnvironment> GetBerkeleyEnv(const fs::path& env_directory)
{
LOCK(cs_db);
- auto inserted = g_dbenvs.emplace(env_directory.string(), std::weak_ptr<BerkeleyEnvironment>());
+ auto inserted = g_dbenvs.emplace(fs::PathToString(env_directory), std::weak_ptr<BerkeleyEnvironment>());
if (inserted.second) {
- auto env = std::make_shared<BerkeleyEnvironment>(env_directory.string());
+ auto env = std::make_shared<BerkeleyEnvironment>(env_directory);
inserted.first->second = env;
return env;
}
@@ -101,7 +101,7 @@ void BerkeleyEnvironment::Close()
if (error_file) fclose(error_file);
- UnlockDirectory(strPath, ".walletlock");
+ UnlockDirectory(fs::PathFromString(strPath), ".walletlock");
}
void BerkeleyEnvironment::Reset()
@@ -111,7 +111,7 @@ void BerkeleyEnvironment::Reset()
fMockDb = false;
}
-BerkeleyEnvironment::BerkeleyEnvironment(const fs::path& dir_path) : strPath(dir_path.string())
+BerkeleyEnvironment::BerkeleyEnvironment(const fs::path& dir_path) : strPath(fs::PathToString(dir_path))
{
Reset();
}
@@ -129,24 +129,24 @@ bool BerkeleyEnvironment::Open(bilingual_str& err)
return true;
}
- fs::path pathIn = strPath;
+ fs::path pathIn = fs::PathFromString(strPath);
TryCreateDirectories(pathIn);
if (!LockDirectory(pathIn, ".walletlock")) {
LogPrintf("Cannot obtain a lock on wallet directory %s. Another instance of bitcoin may be using it.\n", strPath);
- err = strprintf(_("Error initializing wallet database environment %s!"), Directory());
+ err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory())));
return false;
}
fs::path pathLogDir = pathIn / "database";
TryCreateDirectories(pathLogDir);
fs::path pathErrorFile = pathIn / "db.log";
- LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", pathLogDir.string(), pathErrorFile.string());
+ LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", fs::PathToString(pathLogDir), fs::PathToString(pathErrorFile));
unsigned int nEnvFlags = 0;
if (gArgs.GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB))
nEnvFlags |= DB_PRIVATE;
- dbenv->set_lg_dir(pathLogDir.string().c_str());
+ dbenv->set_lg_dir(fs::PathToString(pathLogDir).c_str());
dbenv->set_cachesize(0, 0x100000, 1); // 1 MiB should be enough for just the wallet
dbenv->set_lg_bsize(0x10000);
dbenv->set_lg_max(1048576);
@@ -173,7 +173,7 @@ bool BerkeleyEnvironment::Open(bilingual_str& err)
LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed database environment: %s\n", ret2, DbEnv::strerror(ret2));
}
Reset();
- err = strprintf(_("Error initializing wallet database environment %s!"), Directory());
+ err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory())));
if (ret == DB_RUNRECOVERY) {
err += Untranslated(" ") + _("This error could occur if this wallet was not shutdown cleanly and was last loaded using a build with a newer version of Berkeley DB. If so, please use the software that last loaded this wallet");
}
@@ -261,7 +261,7 @@ bool BerkeleyDatabase::Verify(bilingual_str& errorStr)
fs::path file_path = walletDir / strFile;
LogPrintf("Using BerkeleyDB version %s\n", BerkeleyDatabaseVersion());
- LogPrintf("Using wallet %s\n", file_path.string());
+ LogPrintf("Using wallet %s\n", fs::PathToString(file_path));
if (!env->Open(errorStr)) {
return false;
@@ -274,7 +274,7 @@ bool BerkeleyDatabase::Verify(bilingual_str& errorStr)
Db db(env->dbenv.get(), 0);
int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
if (result != 0) {
- errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), file_path);
+ errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), fs::quoted(fs::PathToString(file_path)));
return false;
}
}
@@ -566,7 +566,7 @@ void BerkeleyEnvironment::Flush(bool fShutdown)
dbenv->log_archive(&listp, DB_ARCH_REMOVE);
Close();
if (!fMockDb) {
- fs::remove_all(fs::path(strPath) / "database");
+ fs::remove_all(fs::PathFromString(strPath) / "database");
}
}
}
@@ -614,21 +614,21 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const
// Copy wallet file
fs::path pathSrc = env->Directory() / strFile;
- fs::path pathDest(strDest);
+ fs::path pathDest(fs::PathFromString(strDest));
if (fs::is_directory(pathDest))
- pathDest /= strFile;
+ pathDest /= fs::PathFromString(strFile);
try {
if (fs::equivalent(pathSrc, pathDest)) {
- LogPrintf("cannot backup to wallet source file %s\n", pathDest.string());
+ LogPrintf("cannot backup to wallet source file %s\n", fs::PathToString(pathDest));
return false;
}
fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists);
- LogPrintf("copied %s to %s\n", strFile, pathDest.string());
+ LogPrintf("copied %s to %s\n", strFile, fs::PathToString(pathDest));
return true;
} catch (const fs::filesystem_error& e) {
- LogPrintf("error copying %s to %s - %s\n", strFile, pathDest.string(), fsbridge::get_filesystem_error_message(e));
+ LogPrintf("error copying %s to %s - %s\n", strFile, fs::PathToString(pathDest), fsbridge::get_filesystem_error_message(e));
return false;
}
}
@@ -828,10 +828,10 @@ std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, con
std::unique_ptr<BerkeleyDatabase> db;
{
LOCK(cs_db); // Lock env.m_databases until insert in BerkeleyDatabase constructor
- std::string data_filename = data_file.filename().string();
+ std::string data_filename = fs::PathToString(data_file.filename());
std::shared_ptr<BerkeleyEnvironment> env = GetBerkeleyEnv(data_file.parent_path());
if (env->m_databases.count(data_filename)) {
- error = Untranslated(strprintf("Refusing to load database. Data file '%s' is already loaded.", (env->Directory() / data_filename).string()));
+ error = Untranslated(strprintf("Refusing to load database. Data file '%s' is already loaded.", fs::PathToString(env->Directory() / data_filename)));
status = DatabaseStatus::FAILED_ALREADY_LOADED;
return nullptr;
}
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
index a8209587d7..b666a8e73a 100644
--- a/src/wallet/bdb.h
+++ b/src/wallet/bdb.h
@@ -63,7 +63,7 @@ public:
bool IsMock() const { return fMockDb; }
bool IsInitialized() const { return fDbEnvInit; }
- fs::path Directory() const { return strPath; }
+ fs::path Directory() const { return fs::PathFromString(strPath); }
bool Open(bilingual_str& error);
void Close();
@@ -141,7 +141,7 @@ public:
bool Verify(bilingual_str& error);
/** Return path to main database filename */
- std::string Filename() override { return (env->Directory() / strFile).string(); }
+ std::string Filename() override { return fs::PathToString(env->Directory() / strFile); }
std::string Format() override { return "bdb"; }
/**
diff --git a/src/wallet/coincontrol.h b/src/wallet/coincontrol.h
index c989512d3e..edd81e590f 100644
--- a/src/wallet/coincontrol.h
+++ b/src/wallet/coincontrol.h
@@ -93,7 +93,7 @@ public:
setSelected.insert(output);
}
- void Select(const COutPoint& outpoint, const CTxOut& txout)
+ void SelectExternal(const COutPoint& outpoint, const CTxOut& txout)
{
setSelected.insert(outpoint);
m_external_txouts.emplace(outpoint, txout);
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index 8d5316e0af..c74c69ed09 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -12,7 +12,7 @@
std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
{
- const size_t offset = wallet_dir.string().size() + (wallet_dir == wallet_dir.root_name() ? 0 : 1);
+ const size_t offset = wallet_dir.native().size() + (wallet_dir == wallet_dir.root_name() ? 0 : 1);
std::vector<fs::path> paths;
boost::system::error_code ec;
@@ -20,9 +20,9 @@ std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
if (ec) {
if (fs::is_directory(*it)) {
it.no_push();
- LogPrintf("%s: %s %s -- skipping.\n", __func__, ec.message(), it->path().string());
+ LogPrintf("%s: %s %s -- skipping.\n", __func__, ec.message(), fs::PathToString(it->path()));
} else {
- LogPrintf("%s: %s %s\n", __func__, ec.message(), it->path().string());
+ LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(it->path()));
}
continue;
}
@@ -30,7 +30,8 @@ std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
try {
// Get wallet path relative to walletdir by removing walletdir from the wallet path.
// This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60.
- const fs::path path = it->path().string().substr(offset);
+ const auto path_str = it->path().native().substr(offset);
+ const fs::path path{path_str.begin(), path_str.end()};
if (it->status().type() == fs::directory_file &&
(IsBDBFile(BDBDataFile(it->path())) || IsSQLiteFile(SQLiteDataFile(it->path())))) {
@@ -50,7 +51,7 @@ std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
}
}
} catch (const std::exception& e) {
- LogPrintf("%s: Error scanning %s: %s\n", __func__, it->path().string(), e.what());
+ LogPrintf("%s: Error scanning %s: %s\n", __func__, fs::PathToString(it->path()), e.what());
it.no_push();
}
}
@@ -85,7 +86,7 @@ bool IsBDBFile(const fs::path& path)
// This check also prevents opening lock files.
boost::system::error_code ec;
auto size = fs::file_size(path, ec);
- if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
+ if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(path));
if (size < 4096) return false;
fsbridge::ifstream file(path, std::ios::binary);
@@ -109,7 +110,7 @@ bool IsSQLiteFile(const fs::path& path)
// A SQLite Database file is at least 512 bytes.
boost::system::error_code ec;
auto size = fs::file_size(path, ec);
- if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
+ if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(path));
if (size < 512) return false;
fsbridge::ifstream file(path, std::ios::binary);
diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp
index c39c0c7e73..08d94b76d9 100644
--- a/src/wallet/dump.cpp
+++ b/src/wallet/dump.cpp
@@ -19,16 +19,16 @@ bool DumpWallet(CWallet& wallet, bilingual_str& error)
return false;
}
- fs::path path = dump_filename;
+ fs::path path = fs::PathFromString(dump_filename);
path = fs::absolute(path);
if (fs::exists(path)) {
- error = strprintf(_("File %s already exists. If you are sure this is what you want, move it out of the way first."), path.string());
+ error = strprintf(_("File %s already exists. If you are sure this is what you want, move it out of the way first."), fs::PathToString(path));
return false;
}
fsbridge::ofstream dump_file;
dump_file.open(path);
if (dump_file.fail()) {
- error = strprintf(_("Unable to open %s for writing"), path.string());
+ error = strprintf(_("Unable to open %s for writing"), fs::PathToString(path));
return false;
}
@@ -114,10 +114,10 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
return false;
}
- fs::path dump_path = dump_filename;
+ fs::path dump_path = fs::PathFromString(dump_filename);
dump_path = fs::absolute(dump_path);
if (!fs::exists(dump_path)) {
- error = strprintf(_("Dump file %s does not exist."), dump_path.string());
+ error = strprintf(_("Dump file %s does not exist."), fs::PathToString(dump_path));
return false;
}
fsbridge::ifstream dump_file(dump_path);
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index 7d0cdb6934..57f1a6a67a 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -551,13 +551,13 @@ public:
}
std::string getWalletDir() override
{
- return GetWalletDir().string();
+ return fs::PathToString(GetWalletDir());
}
std::vector<std::string> listWalletDir() override
{
std::vector<std::string> paths;
for (auto& path : ListDatabases(GetWalletDir())) {
- paths.push_back(path.string());
+ paths.push_back(fs::PathToString(path));
}
return paths;
}
diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp
index 1b841026b8..7ef5a0cf55 100644
--- a/src/wallet/load.cpp
+++ b/src/wallet/load.cpp
@@ -25,25 +25,25 @@ bool VerifyWallets(WalletContext& context)
ArgsManager& args = *Assert(context.args);
if (args.IsArgSet("-walletdir")) {
- fs::path wallet_dir = args.GetArg("-walletdir", "");
+ fs::path wallet_dir = fs::PathFromString(args.GetArg("-walletdir", ""));
boost::system::error_code error;
// The canonical path cleans the path, preventing >1 Berkeley environment instances for the same directory
fs::path canonical_wallet_dir = fs::canonical(wallet_dir, error);
if (error || !fs::exists(wallet_dir)) {
- chain.initError(strprintf(_("Specified -walletdir \"%s\" does not exist"), wallet_dir.string()));
+ chain.initError(strprintf(_("Specified -walletdir \"%s\" does not exist"), fs::PathToString(wallet_dir)));
return false;
} else if (!fs::is_directory(wallet_dir)) {
- chain.initError(strprintf(_("Specified -walletdir \"%s\" is not a directory"), wallet_dir.string()));
+ chain.initError(strprintf(_("Specified -walletdir \"%s\" is not a directory"), fs::PathToString(wallet_dir)));
return false;
// The canonical path transforms relative paths into absolute ones, so we check the non-canonical version
} else if (!wallet_dir.is_absolute()) {
- chain.initError(strprintf(_("Specified -walletdir \"%s\" is a relative path"), wallet_dir.string()));
+ chain.initError(strprintf(_("Specified -walletdir \"%s\" is a relative path"), fs::PathToString(wallet_dir)));
return false;
}
- args.ForceSetArg("-walletdir", canonical_wallet_dir.string());
+ args.ForceSetArg("-walletdir", fs::PathToString(canonical_wallet_dir));
}
- LogPrintf("Using wallet directory %s\n", GetWalletDir().string());
+ LogPrintf("Using wallet directory %s\n", fs::PathToString(GetWalletDir()));
chain.initMessage(_("Verifying wallet(s)…").translated);
@@ -70,7 +70,7 @@ bool VerifyWallets(WalletContext& context)
for (const auto& wallet : chain.getSettingsList("wallet")) {
const auto& wallet_file = wallet.get_str();
- const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), wallet_file);
+ const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), fs::PathFromString(wallet_file));
if (!wallet_paths.insert(path).second) {
chain.initWarning(strprintf(_("Ignoring duplicate -wallet %s."), wallet_file));
@@ -102,7 +102,7 @@ bool LoadWallets(WalletContext& context)
std::set<fs::path> wallet_paths;
for (const auto& wallet : chain.getSettingsList("wallet")) {
const auto& name = wallet.get_str();
- if (!wallet_paths.insert(name).second) {
+ if (!wallet_paths.insert(fs::PathFromString(name)).second) {
continue;
}
DatabaseOptions options;
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 4d7fb2d38c..9b09bc23d6 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -550,7 +550,7 @@ RPCHelpMan importwallet()
EnsureWalletIsUnlocked(*pwallet);
fsbridge::ifstream file;
- file.open(request.params[0].get_str(), std::ios::in | std::ios::ate);
+ file.open(fs::u8path(request.params[0].get_str()), std::ios::in | std::ios::ate);
if (!file.is_open()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file");
}
@@ -745,7 +745,7 @@ RPCHelpMan dumpwallet()
EnsureWalletIsUnlocked(wallet);
- fs::path filepath = request.params[0].get_str();
+ fs::path filepath = fs::u8path(request.params[0].get_str());
filepath = fs::absolute(filepath);
/* Prevent arbitrary files from being overwritten. There have been reports
@@ -754,7 +754,7 @@ RPCHelpMan dumpwallet()
* It may also avoid other security issues.
*/
if (fs::exists(filepath)) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, filepath.string() + " already exists. If you are sure this is what you want, move it out of the way first");
+ throw JSONRPCError(RPC_INVALID_PARAMETER, filepath.u8string() + " already exists. If you are sure this is what you want, move it out of the way first");
}
fsbridge::ofstream file;
@@ -844,7 +844,7 @@ RPCHelpMan dumpwallet()
file.close();
UniValue reply(UniValue::VOBJ);
- reply.pushKV("filename", filepath.string());
+ reply.pushKV("filename", filepath.u8string());
return reply;
},
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 39cde4e4ec..6959466d1d 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -1854,7 +1854,7 @@ static RPCHelpMan keypoolrefill()
"\nFills the keypool."+
HELP_REQUIRING_PASSPHRASE,
{
- {"newsize", RPCArg::Type::NUM, RPCArg::Default{100}, "The new keypool size"},
+ {"newsize", RPCArg::Type::NUM, RPCArg::DefaultHint{strprintf("%u, or as set by -keypool", DEFAULT_KEYPOOL_SIZE)}, "The new keypool size"},
},
RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
@@ -1893,6 +1893,33 @@ static RPCHelpMan keypoolrefill()
}
+static RPCHelpMan newkeypool()
+{
+ return RPCHelpMan{"newkeypool",
+ "\nEntirely clears and refills the keypool."+
+ HELP_REQUIRING_PASSPHRASE,
+ {},
+ RPCResult{RPCResult::Type::NONE, "", ""},
+ RPCExamples{
+ HelpExampleCli("newkeypool", "")
+ + HelpExampleRpc("newkeypool", "")
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
+ std::shared_ptr<CWallet> const pwallet = GetWalletForJSONRPCRequest(request);
+ if (!pwallet) return NullUniValue;
+
+ LOCK(pwallet->cs_wallet);
+
+ LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet, true);
+ spk_man.NewKeyPool();
+
+ return NullUniValue;
+},
+ };
+}
+
+
static RPCHelpMan walletpassphrase()
{
return RPCHelpMan{"walletpassphrase",
@@ -2555,7 +2582,7 @@ static RPCHelpMan listwalletdir()
UniValue wallets(UniValue::VARR);
for (const auto& path : ListDatabases(GetWalletDir())) {
UniValue wallet(UniValue::VOBJ);
- wallet.pushKV("name", path.string());
+ wallet.pushKV("name", path.u8string());
wallets.push_back(wallet);
}
@@ -2856,7 +2883,7 @@ static RPCHelpMan restorewallet()
WalletContext& context = EnsureWalletContext(request.context);
- std::string backup_file = request.params[1].get_str();
+ auto backup_file = fs::u8path(request.params[1].get_str());
if (!fs::exists(backup_file)) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Backup file does not exist");
@@ -2864,14 +2891,14 @@ static RPCHelpMan restorewallet()
std::string wallet_name = request.params[0].get_str();
- const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), wallet_name);
+ const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), fs::u8path(wallet_name));
if (fs::exists(wallet_path)) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Wallet name already exists.");
}
if (!TryCreateDirectories(wallet_path)) {
- throw JSONRPCError(RPC_WALLET_ERROR, strprintf("Failed to create database path '%s'. Database already exists.", wallet_path.string()));
+ throw JSONRPCError(RPC_WALLET_ERROR, strprintf("Failed to create database path '%s'. Database already exists.", wallet_path.u8string()));
}
auto wallet_file = wallet_path / "wallet.dat";
@@ -3177,6 +3204,34 @@ static RPCHelpMan listunspent()
};
}
+// Only includes key documentation where the key is snake_case in all RPC methods. MixedCase keys can be added later.
+static std::vector<RPCArg> FundTxDoc()
+{
+ return {
+ {"conf_target", RPCArg::Type::NUM, RPCArg::DefaultHint{"wallet -txconfirmtarget"}, "Confirmation target in blocks"},
+ {"estimate_mode", RPCArg::Type::STR, RPCArg::Default{"unset"}, std::string() + "The fee estimate mode, must be one of (case insensitive):\n"
+ " \"" + FeeModes("\"\n\"") + "\""},
+ {"replaceable", RPCArg::Type::BOOL, RPCArg::DefaultHint{"wallet default"}, "Marks this transaction as BIP125 replaceable.\n"
+ "Allows this transaction to be replaced by a transaction with higher fees"},
+ {"solving_data", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "Keys and scripts needed for producing a final transaction with a dummy signature.\n"
+ "Used for fee estimation during coin selection.",
+ {
+ {"pubkeys", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Public keys involved in this transaction.",
+ {
+ {"pubkey", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A public key"},
+ }},
+ {"scripts", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Scripts involved in this transaction.",
+ {
+ {"script", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A script"},
+ }},
+ {"descriptors", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Descriptors that provide solving data for this transaction.",
+ {
+ {"descriptor", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A descriptor"},
+ }},
+ }},
+ };
+}
+
void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out, int& change_position, const UniValue& options, CCoinControl& coinControl, bool override_min_fee)
{
// Make sure the results are valid at least up to the most recent block
@@ -3292,7 +3347,7 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
}
if (options.exists("solving_data")) {
- UniValue solving_data = options["solving_data"].get_obj();
+ const UniValue solving_data = options["solving_data"].get_obj();
if (solving_data.exists("pubkeys")) {
for (const UniValue& pk_univ : solving_data["pubkeys"].get_array().getValues()) {
const std::string& pk_str = pk_univ.get_str();
@@ -3300,7 +3355,7 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("'%s' is not hex", pk_str));
}
const std::vector<unsigned char> data(ParseHex(pk_str));
- CPubKey pubkey(data.begin(), data.end());
+ const CPubKey pubkey(data.begin(), data.end());
if (!pubkey.IsFullyValid()) {
throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("'%s' is not a valid public key", pk_str));
}
@@ -3365,7 +3420,7 @@ void FundTransaction(CWallet& wallet, CMutableTransaction& tx, CAmount& fee_out,
wallet.chain().findCoins(coins);
for (const auto& coin : coins) {
if (!coin.second.out.IsNull()) {
- coinControl.Select(coin.first, coin.second.out);
+ coinControl.SelectExternal(coin.first, coin.second.out);
}
}
@@ -3394,6 +3449,7 @@ static RPCHelpMan fundrawtransaction()
{
{"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex string of the raw transaction"},
{"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "for backward compatibility: passing in a true instead of an object will result in {\"includeWatching\":true}",
+ Cat<std::vector<RPCArg>>(
{
{"add_inputs", RPCArg::Type::BOOL, RPCArg::Default{true}, "For a transaction with existing inputs, automatically include more if they are not enough."},
{"include_unsafe", RPCArg::Type::BOOL, RPCArg::Default{false}, "Include inputs that are not safe to spend (unconfirmed transactions from outside keys and unconfirmed replacement transactions).\n"
@@ -3416,32 +3472,8 @@ static RPCHelpMan fundrawtransaction()
{"vout_index", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "The zero-based output index, before a change output is added."},
},
},
- {"replaceable", RPCArg::Type::BOOL, RPCArg::DefaultHint{"wallet default"}, "Marks this transaction as BIP125 replaceable.\n"
- "Allows this transaction to be replaced by a transaction with higher fees"},
- {"conf_target", RPCArg::Type::NUM, RPCArg::DefaultHint{"wallet -txconfirmtarget"}, "Confirmation target in blocks"},
- {"estimate_mode", RPCArg::Type::STR, RPCArg::Default{"unset"}, std::string() + "The fee estimate mode, must be one of (case insensitive):\n"
- " \"" + FeeModes("\"\n\"") + "\""},
- {"solving_data", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "Keys and scripts needed for producing a final transaction with a dummy signature.\n"
- "Used for fee estimation during coin selection.",
- {
- {"pubkeys", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Public keys involved in this transaction.",
- {
- {"pubkey", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A public key"},
- },
- },
- {"scripts", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Scripts involved in this transaction.",
- {
- {"script", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A script"},
- },
- },
- {"descriptors", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Descriptors that provide solving data for this transaction.",
- {
- {"descriptor", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A descriptor"},
- },
- }
- }
- },
},
+ FundTxDoc()),
"options"},
{"iswitness", RPCArg::Type::BOOL, RPCArg::DefaultHint{"depends on heuristic tests"}, "Whether the transaction hex is a serialized witness transaction.\n"
"If iswitness is not present, heuristic tests will be used in decoding.\n"
@@ -4250,6 +4282,7 @@ static RPCHelpMan send()
" \"" + FeeModes("\"\n\"") + "\""},
{"fee_rate", RPCArg::Type::AMOUNT, RPCArg::DefaultHint{"not set, fall back to wallet fee estimation"}, "Specify a fee rate in " + CURRENCY_ATOM + "/vB."},
{"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
+ Cat<std::vector<RPCArg>>(
{
{"add_inputs", RPCArg::Type::BOOL, RPCArg::Default{false}, "If inputs are specified, automatically include more if they are not enough."},
{"include_unsafe", RPCArg::Type::BOOL, RPCArg::Default{false}, "Include inputs that are not safe to spend (unconfirmed transactions from outside keys and unconfirmed replacement transactions).\n"
@@ -4259,9 +4292,6 @@ static RPCHelpMan send()
{"change_address", RPCArg::Type::STR_HEX, RPCArg::DefaultHint{"pool address"}, "The bitcoin address to receive the change"},
{"change_position", RPCArg::Type::NUM, RPCArg::DefaultHint{"random"}, "The index of the change output"},
{"change_type", RPCArg::Type::STR, RPCArg::DefaultHint{"set by -changetype"}, "The output type to use. Only valid if change_address is not specified. Options are \"legacy\", \"p2sh-segwit\", and \"bech32\"."},
- {"conf_target", RPCArg::Type::NUM, RPCArg::DefaultHint{"wallet -txconfirmtarget"}, "Confirmation target in blocks"},
- {"estimate_mode", RPCArg::Type::STR, RPCArg::Default{"unset"}, std::string() + "The fee estimate mode, must be one of (case insensitive):\n"
- " \"" + FeeModes("\"\n\"") + "\""},
{"fee_rate", RPCArg::Type::AMOUNT, RPCArg::DefaultHint{"not set, fall back to wallet fee estimation"}, "Specify a fee rate in " + CURRENCY_ATOM + "/vB."},
{"include_watching", RPCArg::Type::BOOL, RPCArg::DefaultHint{"true for watch-only wallets, otherwise false"}, "Also select inputs which are watch only.\n"
"Only solvable inputs can be used. Watch-only destinations are solvable if the public key and/or output script was imported,\n"
@@ -4284,29 +4314,8 @@ static RPCHelpMan send()
{"vout_index", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "The zero-based output index, before a change output is added."},
},
},
- {"replaceable", RPCArg::Type::BOOL, RPCArg::DefaultHint{"wallet default"}, "Marks this transaction as BIP125 replaceable.\n"
- "Allows this transaction to be replaced by a transaction with higher fees"},
- {"solving_data", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "Keys and scripts needed for producing a final transaction with a dummy signature.\n"
- "Used for fee estimation during coin selection.",
- {
- {"pubkeys", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Public keys involved in this transaction.",
- {
- {"pubkey", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A public key"},
- },
- },
- {"scripts", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Scripts involved in this transaction.",
- {
- {"script", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A script"},
- },
- },
- {"descriptors", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Descriptors that provide solving data for this transaction.",
- {
- {"descriptor", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A descriptor"},
- },
- }
- }
- },
},
+ FundTxDoc()),
"options"},
},
RPCResult{
@@ -4627,6 +4636,7 @@ static RPCHelpMan walletcreatefundedpsbt()
},
{"locktime", RPCArg::Type::NUM, RPCArg::Default{0}, "Raw locktime. Non-0 value also locktime-activates inputs"},
{"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "",
+ Cat<std::vector<RPCArg>>(
{
{"add_inputs", RPCArg::Type::BOOL, RPCArg::Default{false}, "If inputs are specified, automatically include more if they are not enough."},
{"include_unsafe", RPCArg::Type::BOOL, RPCArg::Default{false}, "Include inputs that are not safe to spend (unconfirmed transactions from outside keys and unconfirmed replacement transactions).\n"
@@ -4647,32 +4657,8 @@ static RPCHelpMan walletcreatefundedpsbt()
{"vout_index", RPCArg::Type::NUM, RPCArg::Optional::OMITTED, "The zero-based output index, before a change output is added."},
},
},
- {"replaceable", RPCArg::Type::BOOL, RPCArg::DefaultHint{"wallet default"}, "Marks this transaction as BIP125 replaceable.\n"
- "Allows this transaction to be replaced by a transaction with higher fees"},
- {"conf_target", RPCArg::Type::NUM, RPCArg::DefaultHint{"wallet -txconfirmtarget"}, "Confirmation target in blocks"},
- {"estimate_mode", RPCArg::Type::STR, RPCArg::Default{"unset"}, std::string() + "The fee estimate mode, must be one of (case insensitive):\n"
- " \"" + FeeModes("\"\n\"") + "\""},
- {"solving_data", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "Keys and scripts needed for producing a final transaction with a dummy signature.\n"
- "Used for fee estimation during coin selection.",
- {
- {"pubkeys", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Public keys involved in this transaction.",
- {
- {"pubkey", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A public key"},
- },
- },
- {"scripts", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Scripts involved in this transaction.",
- {
- {"script", RPCArg::Type::STR_HEX, RPCArg::Optional::OMITTED, "A script"},
- },
- },
- {"descriptors", RPCArg::Type::ARR, RPCArg::Default{UniValue::VARR}, "Descriptors that provide solving data for this transaction.",
- {
- {"descriptor", RPCArg::Type::STR, RPCArg::Optional::OMITTED, "A descriptor"},
- },
- }
- }
- },
},
+ FundTxDoc()),
"options"},
{"bip32derivs", RPCArg::Type::BOOL, RPCArg::Default{true}, "Include BIP 32 derivation paths for public keys if we know them"},
},
@@ -4916,6 +4902,7 @@ static const CRPCCommand commands[] =
{ "wallet", &listwallets, },
{ "wallet", &loadwallet, },
{ "wallet", &lockunspent, },
+ { "wallet", &newkeypool, },
{ "wallet", &removeprunedfunds, },
{ "wallet", &rescanblockchain, },
{ "wallet", &send, },
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index fdfb36bb0a..619ebc8b4f 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -489,7 +489,7 @@ bool LegacyScriptPubKeyMan::Upgrade(int prev_version, int new_version, bilingual
}
// Regenerate the keypool if upgraded to HD
if (hd_upgrade) {
- if (!TopUp()) {
+ if (!NewKeyPool()) {
error = _("Unable to generate keys");
return false;
}
diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp
index 5bed09e067..5470177440 100644
--- a/src/wallet/spend.cpp
+++ b/src/wallet/spend.cpp
@@ -475,7 +475,7 @@ bool SelectCoins(const CWallet& wallet, const std::vector<COutput>& vAvailableCo
CInputCoin coin(outpoint, txout, input_bytes);
nValueFromPresetInputs += coin.txout.nValue;
- if (coin.m_input_bytes <= 0) {
+ if (coin.m_input_bytes == -1) {
return false; // Not solvable, can't estimate size for fee
}
coin.effective_value = coin.txout.nValue - coin_selection_params.m_effective_feerate.GetFee(coin.m_input_bytes);
@@ -814,7 +814,7 @@ static bool CreateTransactionInternal(
// Calculate the transaction fee
TxSize tx_sizes = CalculateMaximumSignedTxSize(CTransaction(txNew), &wallet, &coin_control);
int nBytes = tx_sizes.vsize;
- if (nBytes < 0) {
+ if (nBytes == -1) {
error = _("Missing solving data for estimating transaction size");
return false;
}
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index 815d17967c..650e083e8e 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -67,7 +67,7 @@ static void SetPragma(sqlite3* db, const std::string& key, const std::string& va
}
SQLiteDatabase::SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, bool mock)
- : WalletDatabase(), m_mock(mock), m_dir_path(dir_path.string()), m_file_path(file_path.string())
+ : WalletDatabase(), m_mock(mock), m_dir_path(fs::PathToString(dir_path)), m_file_path(fs::PathToString(file_path))
{
{
LOCK(g_sqlite_mutex);
@@ -206,7 +206,7 @@ void SQLiteDatabase::Open()
if (m_db == nullptr) {
if (!m_mock) {
- TryCreateDirectories(m_dir_path);
+ TryCreateDirectories(fs::PathFromString(m_dir_path));
}
int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr);
if (ret != SQLITE_OK) {
diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp
index 16cb7e0baf..dba3f35025 100644
--- a/src/wallet/test/db_tests.cpp
+++ b/src/wallet/test/db_tests.cpp
@@ -16,7 +16,7 @@ BOOST_FIXTURE_TEST_SUITE(db_tests, BasicTestingSetup)
static std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& path, std::string& database_filename)
{
fs::path data_file = BDBDataFile(path);
- database_filename = data_file.filename().string();
+ database_filename = fs::PathToString(data_file.filename());
return GetBerkeleyEnv(data_file.parent_path());
}
@@ -25,11 +25,7 @@ BOOST_AUTO_TEST_CASE(getwalletenv_file)
std::string test_name = "test_name.dat";
const fs::path datadir = gArgs.GetDataDirNet();
fs::path file_path = datadir / test_name;
-#if BOOST_VERSION >= 107700
- std::ofstream f(BOOST_FILESYSTEM_C_STR(file_path));
-#else
- std::ofstream f(file_path.BOOST_FILESYSTEM_C_STR);
-#endif // BOOST_VERSION >= 107700
+ fs::ofstream f(file_path);
f.close();
std::string filename;
diff --git a/src/wallet/test/init_test_fixture.cpp b/src/wallet/test/init_test_fixture.cpp
index 53c972c46d..170675c035 100644
--- a/src/wallet/test/init_test_fixture.cpp
+++ b/src/wallet/test/init_test_fixture.cpp
@@ -32,11 +32,7 @@ InitWalletDirTestingSetup::InitWalletDirTestingSetup(const std::string& chainNam
fs::create_directories(m_walletdir_path_cases["default"]);
fs::create_directories(m_walletdir_path_cases["custom"]);
fs::create_directories(m_walletdir_path_cases["relative"]);
-#if BOOST_VERSION >= 107700
- std::ofstream f(BOOST_FILESYSTEM_C_STR(m_walletdir_path_cases["file"]));
-#else
- std::ofstream f(m_walletdir_path_cases["file"].BOOST_FILESYSTEM_C_STR);
-#endif // BOOST_VERSION >= 107700
+ fs::ofstream f(m_walletdir_path_cases["file"]);
f.close();
}
@@ -50,5 +46,5 @@ InitWalletDirTestingSetup::~InitWalletDirTestingSetup()
void InitWalletDirTestingSetup::SetWalletDir(const fs::path& walletdir_path)
{
- gArgs.ForceSetArg("-walletdir", walletdir_path.string());
+ gArgs.ForceSetArg("-walletdir", fs::PathToString(walletdir_path));
}
diff --git a/src/wallet/test/init_tests.cpp b/src/wallet/test/init_tests.cpp
index 45e1b8c4b8..222c2bf4b7 100644
--- a/src/wallet/test/init_tests.cpp
+++ b/src/wallet/test/init_tests.cpp
@@ -17,7 +17,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_default)
SetWalletDir(m_walletdir_path_cases["default"]);
bool result = m_wallet_client->verify();
BOOST_CHECK(result == true);
- fs::path walletdir = gArgs.GetArg("-walletdir", "");
+ fs::path walletdir = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]);
BOOST_CHECK_EQUAL(walletdir, expected_path);
}
@@ -27,7 +27,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_custom)
SetWalletDir(m_walletdir_path_cases["custom"]);
bool result = m_wallet_client->verify();
BOOST_CHECK(result == true);
- fs::path walletdir = gArgs.GetArg("-walletdir", "");
+ fs::path walletdir = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
fs::path expected_path = fs::canonical(m_walletdir_path_cases["custom"]);
BOOST_CHECK_EQUAL(walletdir, expected_path);
}
@@ -67,7 +67,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing)
SetWalletDir(m_walletdir_path_cases["trailing"]);
bool result = m_wallet_client->verify();
BOOST_CHECK(result == true);
- fs::path walletdir = gArgs.GetArg("-walletdir", "");
+ fs::path walletdir = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]);
BOOST_CHECK_EQUAL(walletdir, expected_path);
}
@@ -77,7 +77,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing2)
SetWalletDir(m_walletdir_path_cases["trailing2"]);
bool result = m_wallet_client->verify();
BOOST_CHECK(result == true);
- fs::path walletdir = gArgs.GetArg("-walletdir", "");
+ fs::path walletdir = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]);
BOOST_CHECK_EQUAL(walletdir, expected_path);
}
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 9938380369..94b5abfba7 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -258,7 +258,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
SetMockTime(KEY_TIME);
m_coinbase_txns.emplace_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
- std::string backup_file = (gArgs.GetDataDirNet() / "wallet.backup").string();
+ std::string backup_file = fs::PathToString(gArgs.GetDataDirNet() / "wallet.backup");
// Import key into wallet and call dumpwallet to create backup file.
{
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index abfe9d7dba..598fce0112 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -2515,16 +2515,16 @@ std::unique_ptr<WalletDatabase> MakeWalletDatabase(const std::string& name, cons
// 2. Path to an existing directory.
// 3. Path to a symlink to a directory.
// 4. For backwards compatibility, the name of a data file in -walletdir.
- const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), name);
+ const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), fs::PathFromString(name));
fs::file_type path_type = fs::symlink_status(wallet_path).type();
if (!(path_type == fs::file_not_found || path_type == fs::directory_file ||
(path_type == fs::symlink_file && fs::is_directory(wallet_path)) ||
- (path_type == fs::regular_file && fs::path(name).filename() == name))) {
+ (path_type == fs::regular_file && fs::PathFromString(name).filename() == fs::PathFromString(name)))) {
error_string = Untranslated(strprintf(
"Invalid -wallet path '%s'. -wallet path should point to a directory where wallet.dat and "
"database/log.?????????? files can be stored, a location where such a directory could be created, "
"or (for backwards compatibility) the name of an existing data file in -walletdir (%s)",
- name, GetWalletDir()));
+ name, fs::quoted(fs::PathToString(GetWalletDir()))));
status = DatabaseStatus::FAILED_BAD_PATH;
return nullptr;
}
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 8ff09a0878..a6839f1f78 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -1106,7 +1106,7 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
try {
exists = fs::symlink_status(path).type() != fs::file_not_found;
} catch (const fs::filesystem_error& e) {
- error = Untranslated(strprintf("Failed to access database path '%s': %s", path.string(), fsbridge::get_filesystem_error_message(e)));
+ error = Untranslated(strprintf("Failed to access database path '%s': %s", fs::PathToString(path), fsbridge::get_filesystem_error_message(e)));
status = DatabaseStatus::FAILED_BAD_PATH;
return nullptr;
}
@@ -1118,33 +1118,33 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
}
if (IsSQLiteFile(SQLiteDataFile(path))) {
if (format) {
- error = Untranslated(strprintf("Failed to load database path '%s'. Data is in ambiguous format.", path.string()));
+ error = Untranslated(strprintf("Failed to load database path '%s'. Data is in ambiguous format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
format = DatabaseFormat::SQLITE;
}
} else if (options.require_existing) {
- error = Untranslated(strprintf("Failed to load database path '%s'. Path does not exist.", path.string()));
+ error = Untranslated(strprintf("Failed to load database path '%s'. Path does not exist.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_NOT_FOUND;
return nullptr;
}
if (!format && options.require_existing) {
- error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in recognized format.", path.string()));
+ error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in recognized format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
if (format && options.require_create) {
- error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", path.string()));
+ error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
return nullptr;
}
// A db already exists so format is set, but options also specifies the format, so make sure they agree
if (format && options.require_format && format != options.require_format) {
- error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in required format.", path.string()));
+ error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in required format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
@@ -1166,7 +1166,7 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
#ifdef USE_SQLITE
return MakeSQLiteDatabase(path, options, status, error);
#endif
- error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support SQLite database format.", path.string()));
+ error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support SQLite database format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
@@ -1174,7 +1174,7 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
#ifdef USE_BDB
return MakeBerkeleyDatabase(path, options, status, error);
#endif
- error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support Berkeley DB database format.", path.string()));
+ error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support Berkeley DB database format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp
index e3cb5cee5d..788679bbeb 100644
--- a/src/wallet/wallettool.cpp
+++ b/src/wallet/wallettool.cpp
@@ -125,7 +125,7 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command)
return false;
}
const std::string name = args.GetArg("-wallet", "");
- const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), name);
+ const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), fs::PathFromString(name));
if (command == "create") {
DatabaseOptions options;
diff --git a/src/wallet/walletutil.cpp b/src/wallet/walletutil.cpp
index 1c518daba6..7f813432b3 100644
--- a/src/wallet/walletutil.cpp
+++ b/src/wallet/walletutil.cpp
@@ -12,7 +12,7 @@ fs::path GetWalletDir()
fs::path path;
if (gArgs.IsArgSet("-walletdir")) {
- path = gArgs.GetArg("-walletdir", "");
+ path = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
if (!fs::is_directory(path)) {
// If the path specified doesn't exist, we return the deliberately
// invalid empty string.
diff --git a/test/README.md b/test/README.md
index acd68d8d8f..c9e15c4968 100644
--- a/test/README.md
+++ b/test/README.md
@@ -275,12 +275,15 @@ Use the `-v` option for verbose output.
#### Dependencies
-| Lint test | Dependency | Version [used by CI](../ci/lint/04_install.sh) | Installation
-|-----------|:----------:|:-------------------------------------------:|--------------
-| [`lint-python.sh`](lint/lint-python.sh) | [flake8](https://gitlab.com/pycqa/flake8) | [3.8.3](https://github.com/bitcoin/bitcoin/pull/19348) | `pip3 install flake8==3.8.3`
-| [`lint-python.sh`](lint/lint-python.sh) | [mypy](https://github.com/python/mypy) | [0.781](https://github.com/bitcoin/bitcoin/pull/19348) | `pip3 install mypy==0.781`
-| [`lint-shell.sh`](lint/lint-shell.sh) | [ShellCheck](https://github.com/koalaman/shellcheck) | [0.7.2](https://github.com/bitcoin/bitcoin/pull/21749) | [details...](https://github.com/koalaman/shellcheck#installing)
-| [`lint-spelling.sh`](lint/lint-spelling.sh) | [codespell](https://github.com/codespell-project/codespell) | [2.0.0](https://github.com/bitcoin/bitcoin/pull/20817) | `pip3 install codespell==2.0.0`
+| Lint test | Dependency |
+|-----------|:----------:|
+| [`lint-python.sh`](lint/lint-python.sh) | [flake8](https://gitlab.com/pycqa/flake8)
+| [`lint-python.sh`](lint/lint-python.sh) | [mypy](https://github.com/python/mypy)
+| [`lint-python.sh`](lint/lint-python.sh) | [pyzmq](https://github.com/zeromq/pyzmq)
+| [`lint-shell.sh`](lint/lint-shell.sh) | [ShellCheck](https://github.com/koalaman/shellcheck)
+| [`lint-spelling.sh`](lint/lint-spelling.sh) | [codespell](https://github.com/codespell-project/codespell)
+
+In use versions and install instructions are available in the [CI setup](../ci/lint/04_install.sh).
Please be aware that on Linux distributions all dependencies are usually available as packages, but could be outdated.
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py
index 00f2833f55..71dfb4c01a 100755
--- a/test/functional/combine_logs.py
+++ b/test/functional/combine_logs.py
@@ -188,7 +188,7 @@ def print_logs_plain(log_events, colors):
def print_logs_html(log_events):
"""Renders the iterator of log events into html."""
try:
- import jinja2
+ import jinja2 #type:ignore
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
diff --git a/test/functional/data/__init__.py b/test/functional/data/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/functional/data/__init__.py
diff --git a/test/functional/feature_addrman.py b/test/functional/feature_addrman.py
index 5a8394db2e..93d50c1369 100755
--- a/test/functional/feature_addrman.py
+++ b/test/functional/feature_addrman.py
@@ -109,7 +109,7 @@ class AddrmanTest(BitcoinTestFramework):
self.stop_node(0)
write_addrman(peers_dat, len_tried=-1)
self.nodes[0].assert_start_raises_init_error(
- expected_msg=init_error("Corrupt CAddrMan serialization: nTried=-1, should be in \\[0, 16384\\]:.*"),
+ expected_msg=init_error("Corrupt AddrMan serialization: nTried=-1, should be in \\[0, 16384\\]:.*"),
match=ErrorMatch.FULL_REGEX,
)
@@ -117,7 +117,7 @@ class AddrmanTest(BitcoinTestFramework):
self.stop_node(0)
write_addrman(peers_dat, len_new=-1)
self.nodes[0].assert_start_raises_init_error(
- expected_msg=init_error("Corrupt CAddrMan serialization: nNew=-1, should be in \\[0, 65536\\]:.*"),
+ expected_msg=init_error("Corrupt AddrMan serialization: nNew=-1, should be in \\[0, 65536\\]:.*"),
match=ErrorMatch.FULL_REGEX,
)
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
index ee2c71cd42..99ac1b5884 100755
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -24,7 +24,6 @@ from test_framework.util import (
assert_equal,
assert_greater_than,
assert_raises_rpc_error,
- satoshi_round,
softfork_active,
)
from test_framework.script_util import DUMMY_P2WPKH_SCRIPT
@@ -94,7 +93,7 @@ class BIP68Test(BitcoinTestFramework):
utxo = utxos[0]
tx1 = CTransaction()
- value = int(satoshi_round(utxo["amount"] - self.relayfee)*COIN)
+ value = int((utxo["amount"] - self.relayfee) * COIN)
# Check that the disable flag disables relative locktime.
# If sequence locks were used, this would require 1 block for the
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index 9c225dc687..ac00db8ff0 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -4,6 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test fee estimation code."""
from decimal import Decimal
+import os
import random
from test_framework.messages import (
@@ -155,6 +156,21 @@ def check_estimates(node, fees_seen):
check_raw_estimates(node, fees_seen)
check_smart_estimates(node, fees_seen)
+
+def send_tx(node, utxo, feerate):
+ """Broadcast a 1in-1out transaction with a specific input and feerate (sat/vb)."""
+ overhead, op, scriptsig, nseq, value, spk = 10, 36, 5, 4, 8, 24
+ tx_size = overhead + op + scriptsig + nseq + value + spk
+ fee = tx_size * feerate
+
+ tx = CTransaction()
+ tx.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), SCRIPT_SIG[utxo["vout"]])]
+ tx.vout = [CTxOut(int(utxo["amount"] * COIN) - fee, P2SH_1)]
+ txid = node.sendrawtransaction(tx.serialize().hex())
+
+ return txid
+
+
class EstimateFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
@@ -212,20 +228,16 @@ class EstimateFeeTest(BitcoinTestFramework):
newmem.append(utx)
self.memutxo = newmem
- def run_test(self):
- self.log.info("This test is time consuming, please be patient")
- self.log.info("Splitting inputs so we can generate tx's")
-
- # Start node0
- self.start_node(0)
+ def initial_split(self, node):
+ """Split two coinbase UTxOs into many small coins"""
self.txouts = []
self.txouts2 = []
# Split a coinbase into two transaction puzzle outputs
- split_inputs(self.nodes[0], self.nodes[0].listunspent(0), self.txouts, True)
+ split_inputs(node, node.listunspent(0), self.txouts, True)
# Mine
- while len(self.nodes[0].getrawmempool()) > 0:
- self.generate(self.nodes[0], 1)
+ while len(node.getrawmempool()) > 0:
+ self.generate(node, 1)
# Repeatedly split those 2 outputs, doubling twice for each rep
# Use txouts to monitor the available utxo, since these won't be tracked in wallet
@@ -233,27 +245,19 @@ class EstimateFeeTest(BitcoinTestFramework):
while reps < 5:
# Double txouts to txouts2
while len(self.txouts) > 0:
- split_inputs(self.nodes[0], self.txouts, self.txouts2)
- while len(self.nodes[0].getrawmempool()) > 0:
- self.generate(self.nodes[0], 1)
+ split_inputs(node, self.txouts, self.txouts2)
+ while len(node.getrawmempool()) > 0:
+ self.generate(node, 1)
# Double txouts2 to txouts
while len(self.txouts2) > 0:
- split_inputs(self.nodes[0], self.txouts2, self.txouts)
- while len(self.nodes[0].getrawmempool()) > 0:
- self.generate(self.nodes[0], 1)
+ split_inputs(node, self.txouts2, self.txouts)
+ while len(node.getrawmempool()) > 0:
+ self.generate(node, 1)
reps += 1
- self.log.info("Finished splitting")
-
- # Now we can connect the other nodes, didn't want to connect them earlier
- # so the estimates would not be affected by the splitting transactions
- self.start_node(1)
- self.start_node(2)
- self.connect_nodes(1, 0)
- self.connect_nodes(0, 2)
- self.connect_nodes(2, 1)
-
- self.sync_all()
+ def sanity_check_estimates_range(self):
+ """Populate estimation buckets, assert estimates are in a sane range and
+ are strictly increasing as the target decreases."""
self.fees_per_kb = []
self.memutxo = []
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
@@ -279,11 +283,100 @@ class EstimateFeeTest(BitcoinTestFramework):
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb)
- # check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee
- self.log.info("Test fee rate estimation after restarting node with high MempoolMinFee")
+ def test_feerate_mempoolminfee(self):
high_val = 3*self.nodes[1].estimatesmartfee(1)['feerate']
self.restart_node(1, extra_args=[f'-minrelaytxfee={high_val}'])
check_estimates(self.nodes[1], self.fees_per_kb)
+ self.restart_node(1)
+
+ def sanity_check_rbf_estimates(self, utxos):
+ """During 5 blocks, broadcast low fee transactions. Only 10% of them get
+ confirmed and the remaining ones get RBF'd with a high fee transaction at
+ the next block.
+ The block policy estimator should return the high feerate.
+ """
+ # The broadcaster and block producer
+ node = self.nodes[0]
+ miner = self.nodes[1]
+ # In sat/vb
+ low_feerate = 1
+ high_feerate = 10
+ # Cache the utxos of which to replace the spender after it failed to get
+ # confirmed
+ utxos_to_respend = []
+ txids_to_replace = []
+
+ assert len(utxos) >= 250
+ for _ in range(5):
+ # Broadcast 45 low fee transactions that will need to be RBF'd
+ for _ in range(45):
+ u = utxos.pop(0)
+ txid = send_tx(node, u, low_feerate)
+ utxos_to_respend.append(u)
+ txids_to_replace.append(txid)
+ # Broadcast 5 low fee transaction which don't need to
+ for _ in range(5):
+ send_tx(node, utxos.pop(0), low_feerate)
+ # Mine the transactions on another node
+ self.sync_mempools(wait=.1, nodes=[node, miner])
+ for txid in txids_to_replace:
+ miner.prioritisetransaction(txid=txid, fee_delta=-COIN)
+ self.generate(miner, 1)
+ self.sync_blocks(wait=.1, nodes=[node, miner])
+ # RBF the low-fee transactions
+ while True:
+ try:
+ u = utxos_to_respend.pop(0)
+ send_tx(node, u, high_feerate)
+ except IndexError:
+ break
+
+ # Mine the last replacement txs
+ self.sync_mempools(wait=.1, nodes=[node, miner])
+ self.generate(miner, 1)
+ self.sync_blocks(wait=.1, nodes=[node, miner])
+
+ # Only 10% of the transactions were really confirmed with a low feerate,
+ # the rest needed to be RBF'd. We must return the 90% conf rate feerate.
+ high_feerate_kvb = Decimal(high_feerate) / COIN * 10**3
+ est_feerate = node.estimatesmartfee(2)["feerate"]
+ assert est_feerate == high_feerate_kvb
+
+ def run_test(self):
+ self.log.info("This test is time consuming, please be patient")
+ self.log.info("Splitting inputs so we can generate tx's")
+
+ # Split two coinbases into many small utxos
+ self.start_node(0)
+ self.initial_split(self.nodes[0])
+ self.log.info("Finished splitting")
+
+ # Now we can connect the other nodes, didn't want to connect them earlier
+ # so the estimates would not be affected by the splitting transactions
+ self.start_node(1)
+ self.start_node(2)
+ self.connect_nodes(1, 0)
+ self.connect_nodes(0, 2)
+ self.connect_nodes(2, 1)
+ self.sync_all()
+
+ self.log.info("Testing estimates with single transactions.")
+ self.sanity_check_estimates_range()
+
+ # check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee
+ self.log.info("Test fee rate estimation after restarting node with high MempoolMinFee")
+ self.test_feerate_mempoolminfee()
+
+ self.log.info("Restarting node with fresh estimation")
+ self.stop_node(0)
+ fee_dat = os.path.join(self.nodes[0].datadir, self.chain, "fee_estimates.dat")
+ os.remove(fee_dat)
+ self.start_node(0)
+ self.connect_nodes(0, 1)
+ self.connect_nodes(0, 2)
+
+ self.log.info("Testing estimates with RBF.")
+ self.sanity_check_rbf_estimates(self.confutxo + self.memutxo)
self.log.info("Testing that fee estimation is disabled in blocksonly.")
self.restart_node(0, ["-blocksonly"])
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 25d1cb2bf1..2f9ab34e99 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -31,11 +31,11 @@ from test_framework.script import (
OP_1,
OP_2,
OP_CHECKMULTISIG,
- OP_CHECKSIG,
OP_DROP,
OP_TRUE,
)
from test_framework.script_util import (
+ key_to_p2pk_script,
key_to_p2pkh_script,
key_to_p2wpkh_script,
script_to_p2sh_script,
@@ -459,7 +459,7 @@ class SegWitTest(BitcoinTestFramework):
importlist.append(script_to_p2wsh_script(bare).hex())
else:
pubkey = bytes.fromhex(v['pubkey'])
- p2pk = CScript([pubkey, OP_CHECKSIG])
+ p2pk = key_to_p2pk_script(pubkey)
p2pkh = key_to_p2pkh_script(pubkey)
importlist.append(p2pk.hex())
importlist.append(p2pkh.hex())
@@ -628,7 +628,7 @@ class SegWitTest(BitcoinTestFramework):
pubkey = bytes.fromhex(v['pubkey'])
p2wpkh = key_to_p2wpkh_script(pubkey)
p2sh_p2wpkh = script_to_p2sh_script(p2wpkh)
- p2pk = CScript([pubkey, OP_CHECKSIG])
+ p2pk = key_to_p2pk_script(pubkey)
p2pkh = CScript(bytes.fromhex(v['scriptPubKey']))
p2sh_p2pk = script_to_p2sh_script(p2pk)
p2sh_p2pkh = script_to_p2sh_script(p2pkh)
diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py
index c44a48f15f..50a25ee1ef 100755
--- a/test/functional/feature_taproot.py
+++ b/test/functional/feature_taproot.py
@@ -76,6 +76,7 @@ from test_framework.script import (
taproot_construct,
)
from test_framework.script_util import (
+ key_to_p2pk_script,
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
script_to_p2sh_script,
@@ -1109,7 +1110,7 @@ def spenders_taproot_active():
for witv0 in [False, True]:
for hashtype in VALID_SIGHASHES_ECDSA + [random.randrange(0x04, 0x80), random.randrange(0x84, 0x100)]:
standard = (hashtype in VALID_SIGHASHES_ECDSA) and (compressed or not witv0)
- add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=CScript([pubkey1, OP_CHECKSIG]), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
+ add_spender(spenders, "legacy/pk-wrongkey", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, script=key_to_p2pk_script(pubkey1), **SINGLE_SIG, key=eckey1, failure={"key": eckey2}, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
add_spender(spenders, "legacy/pkh-sighashflip", hashtype=hashtype, p2sh=p2sh, witv0=witv0, standard=standard, pkh=pubkey1, key=eckey1, **SIGHASH_BITFLIP, sigops_weight=4-3*witv0, **ERR_NO_SUCCESS)
# Verify that OP_CHECKSIGADD wasn't accidentally added to pre-taproot validation logic.
diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py
index 531c42ba2c..868bb42604 100755
--- a/test/functional/interface_rest.py
+++ b/test/functional/interface_rest.py
@@ -279,6 +279,13 @@ class RESTTest (BitcoinTestFramework):
json_obj = self.test_rest_request(f"/headers/5/{bb_hash}")
assert_equal(len(json_obj), 5) # now we should have 5 header objects
+ # Test number parsing
+ for num in ['5a', '-5', '0', '2001', '99999999999999999999999999999999999']:
+ assert_equal(
+ bytes(f'Header count out of range: {num}\r\n', 'ascii'),
+ self.test_rest_request(f"/headers/{num}/{bb_hash}", ret_type=RetType.BYTES, status=400),
+ )
+
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py
index 4313b05f88..5a11a62ec4 100755
--- a/test/functional/interface_zmq.py
+++ b/test/functional/interface_zmq.py
@@ -583,7 +583,7 @@ class ZMQTest (BitcoinTestFramework):
], ipv6=True)
# Generate 1 block in nodes[0]
- self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE)
+ self.generatetoaddress(self.nodes[0], 1, ADDRESS_BCRT1_UNSPENDABLE)
# Should receive the same block hash
assert_equal(self.nodes[0].getbestblockhash(), subscribers[0].receive().hex())
diff --git a/test/functional/mempool_package_limits.py b/test/functional/mempool_package_limits.py
index 2217628858..89a5c83826 100755
--- a/test/functional/mempool_package_limits.py
+++ b/test/functional/mempool_package_limits.py
@@ -244,7 +244,7 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
assert_equal(txres["package-error"], "package-mempool-limits")
# Clear mempool and check that the package passes now
- node.generate(1)
+ self.generate(node, 1)
assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=package_hex)])
def test_anc_count_limits(self):
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index c042961937..3943bba489 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -14,7 +14,6 @@ from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
chain_transaction,
- satoshi_round,
)
# default limits
@@ -209,10 +208,10 @@ class MempoolPackagesTest(BitcoinTestFramework):
entry = self.nodes[0].getmempoolentry(x)
descendant_fees += entry['fee']
if (x == chain[-1]):
- assert_equal(entry['modifiedfee'], entry['fee']+satoshi_round(0.00002))
- assert_equal(entry['fees']['modified'], entry['fee']+satoshi_round(0.00002))
+ assert_equal(entry['modifiedfee'], entry['fee'] + Decimal("0.00002"))
+ assert_equal(entry['fees']['modified'], entry['fee'] + Decimal("0.00002"))
assert_equal(entry['descendantfees'], descendant_fees * COIN + 2000)
- assert_equal(entry['fees']['descendant'], descendant_fees+satoshi_round(0.00002))
+ assert_equal(entry['fees']['descendant'], descendant_fees + Decimal("0.00002"))
# Check that node1's mempool is as expected (-> custom ancestor limit)
mempool0 = self.nodes[0].getrawmempool(False)
@@ -308,7 +307,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
value = utxo[0]['amount']
vout = utxo[0]['vout']
- send_value = satoshi_round((value - fee)/2)
+ send_value = (value - fee) / 2
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
for _ in range(2):
diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py
index da85ee54be..35274d3500 100755
--- a/test/functional/mining_prioritisetransaction.py
+++ b/test/functional/mining_prioritisetransaction.py
@@ -13,7 +13,7 @@ from test_framework.util import assert_equal, assert_raises_rpc_error, create_co
class PrioritiseTransactionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 2
+ self.num_nodes = 1
self.extra_args = [[
"-printpriority=1",
"-acceptnonstdtxn=1",
diff --git a/test/functional/p2p_compactblocks_blocksonly.py b/test/functional/p2p_compactblocks_blocksonly.py
index 4073ec03a6..5f01fa4dfe 100755
--- a/test/functional/p2p_compactblocks_blocksonly.py
+++ b/test/functional/p2p_compactblocks_blocksonly.py
@@ -33,7 +33,7 @@ class P2PCompactBlocksBlocksOnly(BitcoinTestFramework):
self.sync_all()
def build_block_on_tip(self):
- blockhash = self.nodes[2].generate(1)[0]
+ blockhash = self.generate(self.nodes[2], 1)[0]
block_hex = self.nodes[2].getblock(blockhash=blockhash, verbosity=0)
block = from_hex(CBlock(), block_hex)
block.rehash()
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index aa3b95fc4f..4e21d08e5c 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -72,6 +72,7 @@ from test_framework.script import (
hash160,
)
from test_framework.script_util import (
+ key_to_p2pk_script,
key_to_p2wpkh_script,
keyhash_to_p2pkh_script,
script_to_p2sh_script,
@@ -1455,7 +1456,7 @@ class SegWitTest(BitcoinTestFramework):
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
- witness_script = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
+ witness_script = key_to_p2pk_script(pubkey)
script_wsh = script_to_p2wsh_script(witness_script)
tx2 = CTransaction()
@@ -1533,7 +1534,7 @@ class SegWitTest(BitcoinTestFramework):
key.generate()
pubkey = key.get_pubkey().get_bytes()
- witness_script = CScript([pubkey, CScriptOp(OP_CHECKSIG)])
+ witness_script = key_to_p2pk_script(pubkey)
script_pubkey = script_to_p2wsh_script(witness_script)
# First create a witness output for use in the tests.
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index 3b01506986..b0e46c6ca7 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -1010,7 +1010,8 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[0].sendtoaddress(addr, 10)
self.nodes[0].sendtoaddress(wallet.getnewaddress(), 10)
- self.nodes[0].generate(6)
+ self.generate(self.nodes[0], 6)
+ self.sync_all()
ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
diff --git a/test/functional/rpc_invalid_address_message.py b/test/functional/rpc_invalid_address_message.py
index e362642f0f..7ab5a5e90d 100755
--- a/test/functional/rpc_invalid_address_message.py
+++ b/test/functional/rpc_invalid_address_message.py
@@ -29,9 +29,6 @@ class InvalidAddressErrorMessageTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.num_nodes = 1
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
-
def test_validateaddress(self):
node = self.nodes[0]
@@ -60,6 +57,10 @@ class InvalidAddressErrorMessageTest(BitcoinTestFramework):
assert info['isvalid']
assert 'error' not in info
+ info = node.validateaddress(BECH32_INVALID_VERSION)
+ assert not info['isvalid']
+ assert_equal(info['error'], 'Invalid Bech32 address witness version')
+
# Base58
info = node.validateaddress(BASE58_INVALID_PREFIX)
assert not info['isvalid']
@@ -87,7 +88,10 @@ class InvalidAddressErrorMessageTest(BitcoinTestFramework):
def run_test(self):
self.test_validateaddress()
- self.test_getaddressinfo()
+
+ if self.is_wallet_compiled():
+ self.init_wallet(0)
+ self.test_getaddressinfo()
if __name__ == '__main__':
diff --git a/test/functional/rpc_misc.py b/test/functional/rpc_misc.py
index ac2a7a309b..e32e562bce 100755
--- a/test/functional/rpc_misc.py
+++ b/test/functional/rpc_misc.py
@@ -57,7 +57,7 @@ class RpcMiscTest(BitcoinTestFramework):
self.log.info("test logging rpc and help")
# Test logging RPC returns the expected number of logging categories.
- assert_equal(len(node.logging()), 26)
+ assert_equal(len(node.logging()), 27)
# Test toggling a logging category on/off/on with the logging RPC.
assert_equal(node.logging()['qt'], True)
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 6b5b2c6a0f..b132ac3d31 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -627,7 +627,8 @@ class PSBTTest(BitcoinTestFramework):
addr_info = self.nodes[0].getaddressinfo(addr)
self.nodes[0].sendtoaddress(addr, 10)
- self.nodes[0].generate(6)
+ self.generate(self.nodes[0], 6)
+ self.sync_all()
ext_utxo = self.nodes[0].listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py
index 18abece253..c519d0c7d1 100755
--- a/test/functional/rpc_signrawtransaction.py
+++ b/test/functional/rpc_signrawtransaction.py
@@ -25,12 +25,12 @@ from test_framework.messages import (
from test_framework.script import (
CScript,
OP_CHECKLOCKTIMEVERIFY,
- OP_CHECKSIG,
OP_CHECKSEQUENCEVERIFY,
OP_DROP,
OP_TRUE,
)
from test_framework.script_util import (
+ key_to_p2pk_script,
key_to_p2pkh_script,
script_to_p2sh_p2wsh_script,
script_to_p2wsh_script,
@@ -229,7 +229,7 @@ class SignRawTransactionsTest(BitcoinTestFramework):
embedded_pubkey = eckey.get_pubkey().get_bytes().hex()
witness_script = {
'P2PKH': key_to_p2pkh_script(embedded_pubkey).hex(),
- 'P2PK': CScript([bytes.fromhex(embedded_pubkey), OP_CHECKSIG]).hex()
+ 'P2PK': key_to_p2pk_script(embedded_pubkey).hex()
}.get(tx_type, "Invalid tx_type")
redeem_script = script_to_p2wsh_script(witness_script).hex()
addr = script_to_p2sh(redeem_script)
diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py
index 6de372cd8e..85e3c2a383 100644
--- a/test/functional/test_framework/blocktools.py
+++ b/test/functional/test_framework/blocktools.py
@@ -33,11 +33,11 @@ from .script import (
CScriptOp,
OP_1,
OP_CHECKMULTISIG,
- OP_CHECKSIG,
OP_RETURN,
OP_TRUE,
)
from .script_util import (
+ key_to_p2pk_script,
key_to_p2wpkh_script,
script_to_p2wsh_script,
)
@@ -134,7 +134,7 @@ def create_coinbase(height, pubkey=None, extra_output_script=None, fees=0, nValu
coinbaseoutput.nValue >>= halvings
coinbaseoutput.nValue += fees
if pubkey is not None:
- coinbaseoutput.scriptPubKey = CScript([pubkey, OP_CHECKSIG])
+ coinbaseoutput.scriptPubKey = key_to_p2pk_script(pubkey)
else:
coinbaseoutput.scriptPubKey = CScript([OP_TRUE])
coinbase.vout = [coinbaseoutput]
diff --git a/test/functional/test_framework/script_util.py b/test/functional/test_framework/script_util.py
index e0dfce4c2f..82a9067dd2 100755
--- a/test/functional/test_framework/script_util.py
+++ b/test/functional/test_framework/script_util.py
@@ -5,14 +5,14 @@
"""Useful Script constants and utils."""
from test_framework.script import (
CScript,
- hash160,
- sha256,
OP_0,
- OP_DUP,
- OP_HASH160,
OP_CHECKSIG,
+ OP_DUP,
OP_EQUAL,
OP_EQUALVERIFY,
+ OP_HASH160,
+ hash160,
+ sha256,
)
# To prevent a "tx-size-small" policy rule error, a transaction has to have a
@@ -36,6 +36,11 @@ DUMMY_P2WPKH_SCRIPT = CScript([b'a' * 21])
DUMMY_2_P2WPKH_SCRIPT = CScript([b'b' * 21])
+def key_to_p2pk_script(key):
+ key = check_key(key)
+ return CScript([key, OP_CHECKSIG])
+
+
def keyhash_to_p2pkh_script(hash):
assert len(hash) == 20
return CScript([OP_DUP, OP_HASH160, hash, OP_EQUALVERIFY, OP_CHECKSIG])
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index d87d0cacfd..727ac6aed9 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -628,19 +628,19 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.sync_all()
def generate(self, generator, *args, **kwargs):
- blocks = generator.generate(*args, **kwargs)
+ blocks = generator.generate(*args, invalid_call=False, **kwargs)
return blocks
def generateblock(self, generator, *args, **kwargs):
- blocks = generator.generateblock(*args, **kwargs)
+ blocks = generator.generateblock(*args, invalid_call=False, **kwargs)
return blocks
def generatetoaddress(self, generator, *args, **kwargs):
- blocks = generator.generatetoaddress(*args, **kwargs)
+ blocks = generator.generatetoaddress(*args, invalid_call=False, **kwargs)
return blocks
def generatetodescriptor(self, generator, *args, **kwargs):
- blocks = generator.generatetodescriptor(*args, **kwargs)
+ blocks = generator.generatetodescriptor(*args, invalid_call=False, **kwargs)
return blocks
def sync_blocks(self, nodes=None, wait=1, timeout=60):
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index f9e2cfa2f5..e8ff41a46d 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -297,9 +297,21 @@ class TestNode():
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))
- def generate(self, nblocks, maxtries=1000000):
+ def generate(self, nblocks, maxtries=1000000, **kwargs):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
- return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
+ return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs)
+
+ def generateblock(self, *args, invalid_call, **kwargs):
+ assert not invalid_call
+ return self.__getattr__('generateblock')(*args, **kwargs)
+
+ def generatetoaddress(self, *args, invalid_call, **kwargs):
+ assert not invalid_call
+ return self.__getattr__('generatetoaddress')(*args, **kwargs)
+
+ def generatetodescriptor(self, *args, invalid_call, **kwargs):
+ assert not invalid_call
+ return self.__getattr__('generatetodescriptor')(*args, **kwargs)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index ef27cb3221..81aad20079 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -10,6 +10,7 @@ from enum import Enum
from random import choice
from typing import Optional
from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE
+from test_framework.descriptors import descsum_create
from test_framework.key import ECKey
from test_framework.messages import (
COIN,
@@ -23,16 +24,17 @@ from test_framework.messages import (
from test_framework.script import (
CScript,
LegacySignatureHash,
- OP_CHECKSIG,
OP_TRUE,
OP_NOP,
SIGHASH_ALL,
)
-from test_framework.script_util import key_to_p2wpkh_script
+from test_framework.script_util import (
+ key_to_p2pk_script,
+ key_to_p2wpkh_script,
+)
from test_framework.util import (
assert_equal,
assert_greater_than_or_equal,
- satoshi_round,
)
DEFAULT_FEE = Decimal("0.0001")
@@ -75,7 +77,7 @@ class MiniWallet:
self._priv_key = ECKey()
self._priv_key.set((1).to_bytes(32, 'big'), True)
pub_key = self._priv_key.get_pubkey()
- self._scriptPubKey = bytes(CScript([pub_key.get_bytes(), OP_CHECKSIG]))
+ self._scriptPubKey = key_to_p2pk_script(pub_key.get_bytes())
elif mode == MiniWalletMode.ADDRESS_OP_TRUE:
self._address = ADDRESS_BCRT1_P2WSH_OP_TRUE
self._scriptPubKey = bytes.fromhex(self._test_node.validateaddress(self._address)['scriptPubKey'])
@@ -109,16 +111,16 @@ class MiniWallet:
break
tx.vin[0].scriptSig = CScript([der_sig + bytes(bytearray([SIGHASH_ALL]))])
- def generate(self, num_blocks):
+ def generate(self, num_blocks, **kwargs):
"""Generate blocks with coinbase outputs to the internal address, and append the outputs to the internal list"""
- blocks = self._test_node.generatetodescriptor(num_blocks, self.get_descriptor())
+ blocks = self._test_node.generatetodescriptor(num_blocks, self.get_descriptor(), **kwargs)
for b in blocks:
cb_tx = self._test_node.getblock(blockhash=b, verbosity=2)['tx'][0]
self._utxos.append({'txid': cb_tx['txid'], 'vout': 0, 'value': cb_tx['vout'][0]['value']})
return blocks
def get_descriptor(self):
- return self._test_node.getdescriptorinfo(f'raw({self._scriptPubKey.hex()})')['descriptor']
+ return descsum_create(f'raw({self._scriptPubKey.hex()})')
def get_address(self):
return self._address
@@ -174,13 +176,12 @@ class MiniWallet:
vsize = Decimal(96) # anyone-can-spend
else:
vsize = Decimal(168) # P2PK (73 bytes scriptSig + 35 bytes scriptPubKey + 60 bytes other)
- send_value = satoshi_round(utxo_to_spend['value'] - fee_rate * (vsize / 1000))
- fee = utxo_to_spend['value'] - send_value
+ send_value = int(COIN * (utxo_to_spend['value'] - fee_rate * (vsize / 1000)))
assert send_value > 0
tx = CTransaction()
tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']), nSequence=sequence)]
- tx.vout = [CTxOut(int(send_value * COIN), self._scriptPubKey)]
+ tx.vout = [CTxOut(send_value, self._scriptPubKey)]
tx.nLockTime = locktime
if not self._address:
# raw script
@@ -199,7 +200,7 @@ class MiniWallet:
assert_equal(mempool_valid, tx_info['allowed'])
if mempool_valid:
assert_equal(tx_info['vsize'], vsize)
- assert_equal(tx_info['fees']['base'], fee)
+ assert_equal(tx_info['fees']['base'], utxo_to_spend['value'] - Decimal(send_value) / COIN)
return {'txid': tx_info['txid'], 'wtxid': tx_info['wtxid'], 'hex': tx_hex, 'tx': tx}
def sendrawtransaction(self, *, from_node, tx_hex):
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index c5f08b27f2..b91b294108 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -40,7 +40,7 @@ except UnicodeDecodeError:
CROSS = "x "
CIRCLE = "o "
-if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
+if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393): #type:ignore
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32 # type: ignore
@@ -207,6 +207,7 @@ BASE_SCRIPTS = [
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py --legacy-wallet',
+ 'wallet_multisig_descriptor_psbt.py',
'wallet_txn_doublespend.py --descriptors',
'feature_backwards_compatibility.py --legacy-wallet',
'feature_backwards_compatibility.py --descriptors',
diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py
index 6365840041..d6766097f6 100755
--- a/test/functional/wallet_abandonconflict.py
+++ b/test/functional/wallet_abandonconflict.py
@@ -120,6 +120,14 @@ class AbandonConflictTest(BitcoinTestFramework):
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
+ self.log.info("Check abandoned transactions in listsinceblock")
+ listsinceblock = self.nodes[0].listsinceblock()
+ txAB1_listsinceblock = [d for d in listsinceblock['transactions'] if d['txid'] == txAB1 and d['category'] == 'send']
+ for tx in txAB1_listsinceblock:
+ assert_equal(tx['abandoned'], True)
+ assert_equal(tx['confirmations'], 0)
+ assert_equal(tx['trusted'], False)
+
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.restart_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert self.nodes[0].getmempoolinfo()['loaded']
@@ -149,6 +157,7 @@ class AbandonConflictTest(BitcoinTestFramework):
assert_equal(newbalance, balance - Decimal("24.9996"))
balance = newbalance
+ self.log.info("Test transactions conflicted by a double spend")
# Create a double spend of AB1 by spending again from only A's 10 output
# Mine double spend from node 1
inputs = []
@@ -163,6 +172,34 @@ class AbandonConflictTest(BitcoinTestFramework):
self.connect_nodes(0, 1)
self.sync_blocks()
+ tx_list = self.nodes[0].listtransactions()
+
+ conflicted = [tx for tx in tx_list if tx["confirmations"] < 0]
+ assert_equal(4, len(conflicted))
+
+ wallet_conflicts = [tx for tx in conflicted if tx["walletconflicts"]]
+ assert_equal(2, len(wallet_conflicts))
+
+ double_spends = [tx for tx in tx_list if tx["walletconflicts"] and tx["confirmations"] > 0]
+ assert_equal(1, len(double_spends))
+ double_spend = double_spends[0]
+
+ # Test the properties of the conflicted transactions, i.e. with confirmations < 0.
+ for tx in conflicted:
+ assert_equal(tx["abandoned"], False)
+ assert_equal(tx["confirmations"], -1)
+ assert_equal(tx["trusted"], False)
+
+ # Test the properties of the double-spend transaction, i.e. having wallet conflicts and confirmations > 0.
+ assert_equal(double_spend["abandoned"], False)
+ assert_equal(double_spend["confirmations"], 1)
+ assert "trusted" not in double_spend.keys() # "trusted" only returned if tx has 0 or negative confirmations.
+
+ # Test the walletconflicts field of each.
+ for tx in wallet_conflicts:
+ assert_equal(double_spend["walletconflicts"], [tx["txid"]])
+ assert_equal(tx["walletconflicts"], [double_spend["txid"]])
+
# Verify that B and C's 10 BTC outputs are available for spending again because AB1 is now conflicted
newbalance = self.nodes[0].getbalance()
assert_equal(newbalance, balance + Decimal("20"))
diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py
index 17a4c79da3..4ec44a8a6c 100755
--- a/test/functional/wallet_descriptor.py
+++ b/test/functional/wallet_descriptor.py
@@ -84,7 +84,7 @@ class WalletDescriptorTest(BitcoinTestFramework):
send_wrpc = self.nodes[0].get_wallet_rpc("desc1")
# Generate some coins
- self.generatetoaddress(send_wrpc, COINBASE_MATURITY + 1, send_wrpc.getnewaddress())
+ self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, send_wrpc.getnewaddress())
# Make transactions
self.log.info("Test sending and receiving")
diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py
index d86c3737fe..c8f9664885 100755
--- a/test/functional/wallet_importdescriptors.py
+++ b/test/functional/wallet_importdescriptors.py
@@ -74,7 +74,7 @@ class ImportDescriptorsTest(BitcoinTestFramework):
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
self.log.info('Mining coins')
- self.generatetoaddress(w0, COINBASE_MATURITY + 1, w0.getnewaddress())
+ self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, w0.getnewaddress())
# RPC importdescriptors -----------------------------------------------
@@ -405,7 +405,7 @@ class ImportDescriptorsTest(BitcoinTestFramework):
solvable=True,
ismine=True)
txid = w0.sendtoaddress(address, 49.99995540)
- self.generatetoaddress(w0, 6, w0.getnewaddress())
+ self.generatetoaddress(self.nodes[0], 6, w0.getnewaddress())
self.sync_blocks()
tx = wpriv.createrawtransaction([{"txid": txid, "vout": 0}], {w0.getnewaddress(): 49.999})
signed_tx = wpriv.signrawtransactionwithwallet(tx)
diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py
index c714993234..79235646b0 100755
--- a/test/functional/wallet_keypool.py
+++ b/test/functional/wallet_keypool.py
@@ -138,6 +138,20 @@ class KeyPoolTest(BitcoinTestFramework):
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
+ if not self.options.descriptors:
+ # Check that newkeypool entirely flushes the keypool
+ start_keypath = nodes[0].getaddressinfo(nodes[0].getnewaddress())['hdkeypath']
+ start_change_keypath = nodes[0].getaddressinfo(nodes[0].getrawchangeaddress())['hdkeypath']
+ # flush keypool and get new addresses
+ nodes[0].newkeypool()
+ end_keypath = nodes[0].getaddressinfo(nodes[0].getnewaddress())['hdkeypath']
+ end_change_keypath = nodes[0].getaddressinfo(nodes[0].getrawchangeaddress())['hdkeypath']
+ # The new keypath index should be 100 more than the old one
+ new_index = int(start_keypath.rsplit('/', 1)[1][:-1]) + 100
+ new_change_index = int(start_change_keypath.rsplit('/', 1)[1][:-1]) + 100
+ assert_equal(end_keypath, "m/0'/0'/" + str(new_index) + "'")
+ assert_equal(end_change_keypath, "m/0'/1'/" + str(new_change_index) + "'")
+
# create a blank wallet
nodes[0].createwallet(wallet_name='w2', blank=True, disable_private_keys=True)
w2 = nodes[0].get_wallet_rpc('w2')
diff --git a/test/functional/wallet_multisig_descriptor_psbt.py b/test/functional/wallet_multisig_descriptor_psbt.py
new file mode 100755
index 0000000000..ed855d2525
--- /dev/null
+++ b/test/functional/wallet_multisig_descriptor_psbt.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test a basic M-of-N multisig setup between multiple people using descriptor wallets and PSBTs, as well as a signing flow.
+
+This is meant to be documentation as much as functional tests, so it is kept as simple and readable as possible.
+"""
+
+from test_framework.address import base58_to_byte
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_approx,
+ assert_equal,
+)
+
+
+class WalletMultisigDescriptorPSBTTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 3
+ self.setup_clean_chain = True
+ self.wallet_names = []
+ self.extra_args = [["-keypool=100"]] * self.num_nodes
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+ self.skip_if_no_sqlite()
+
+ @staticmethod
+ def _get_xpub(wallet):
+ """Extract the wallet's xpubs using `listdescriptors` and pick the one from the `pkh` descriptor since it's least likely to be accidentally reused (legacy addresses)."""
+ descriptor = next(filter(lambda d: d["desc"].startswith("pkh"), wallet.listdescriptors()["descriptors"]))
+ return descriptor["desc"].split("]")[-1].split("/")[0]
+
+ @staticmethod
+ def _check_psbt(psbt, to, value, multisig):
+ """Helper function for any of the N participants to check the psbt with decodepsbt and verify it is OK before signing."""
+ tx = multisig.decodepsbt(psbt)["tx"]
+ amount = 0
+ for vout in tx["vout"]:
+ address = vout["scriptPubKey"]["address"]
+ assert_equal(multisig.getaddressinfo(address)["ischange"], address != to)
+ if address == to:
+ amount += vout["value"]
+ assert_approx(amount, float(value), vspan=0.001)
+
+ def participants_create_multisigs(self, xpubs):
+ """The multisig is created by importing the following descriptors. The resulting wallet is watch-only and every participant can do this."""
+ # some simple validation
+ assert_equal(len(xpubs), self.N)
+ # a sanity-check/assertion, this will throw if the base58 checksum of any of the provided xpubs are invalid
+ for xpub in xpubs:
+ base58_to_byte(xpub)
+
+ for i, node in enumerate(self.nodes):
+ node.createwallet(wallet_name=f"{self.name}_{i}", blank=True, descriptors=True, disable_private_keys=True)
+ multisig = node.get_wallet_rpc(f"{self.name}_{i}")
+ external = multisig.getdescriptorinfo(f"wsh(sortedmulti({self.M},{f'/0/*,'.join(xpubs)}/0/*))")
+ internal = multisig.getdescriptorinfo(f"wsh(sortedmulti({self.M},{f'/1/*,'.join(xpubs)}/1/*))")
+ result = multisig.importdescriptors([
+ { # receiving addresses (internal: False)
+ "desc": external["descriptor"],
+ "active": True,
+ "internal": False,
+ "timestamp": "now",
+ },
+ { # change addresses (internal: True)
+ "desc": internal["descriptor"],
+ "active": True,
+ "internal": True,
+ "timestamp": "now",
+ },
+ ])
+ assert all(r["success"] for r in result)
+ yield multisig
+
+ def run_test(self):
+ self.M = 2
+ self.N = self.num_nodes
+ self.name = f"{self.M}_of_{self.N}_multisig"
+ self.log.info(f"Testing {self.name}...")
+
+ participants = {
+ # Every participant generates an xpub. The most straightforward way is to create a new descriptor wallet.
+ # This wallet will be the participant's `signer` for the resulting multisig. Avoid reusing this wallet for any other purpose (for privacy reasons).
+ "signers": [node.get_wallet_rpc(node.createwallet(wallet_name=f"participant_{self.nodes.index(node)}", descriptors=True)["name"]) for node in self.nodes],
+ # After participants generate and exchange their xpubs they will each create their own watch-only multisig.
+ # Note: these multisigs are all the same, this justs highlights that each participant can independently verify everything on their own node.
+ "multisigs": []
+ }
+
+ self.log.info("Generate and exchange xpubs...")
+ xpubs = [self._get_xpub(signer) for signer in participants["signers"]]
+
+ self.log.info("Every participant imports the following descriptors to create the watch-only multisig...")
+ participants["multisigs"] = list(self.participants_create_multisigs(xpubs))
+
+ self.log.info("Check that every participant's multisig generates the same addresses...")
+ for _ in range(10): # we check that the first 10 generated addresses are the same for all participant's multisigs
+ receive_addresses = [multisig.getnewaddress() for multisig in participants["multisigs"]]
+ all(address == receive_addresses[0] for address in receive_addresses)
+ change_addresses = [multisig.getrawchangeaddress() for multisig in participants["multisigs"]]
+ all(address == change_addresses[0] for address in change_addresses)
+
+ self.log.info("Get a mature utxo to send to the multisig...")
+ coordinator_wallet = participants["signers"][0]
+ self.generatetoaddress(self.nodes[0], 101, coordinator_wallet.getnewaddress())
+
+ deposit_amount = 6.15
+ multisig_receiving_address = participants["multisigs"][0].getnewaddress()
+ self.log.info("Send funds to the resulting multisig receiving address...")
+ coordinator_wallet.sendtoaddress(multisig_receiving_address, deposit_amount)
+ self.generate(self.nodes[0], 1)
+ self.sync_all()
+ for participant in participants["multisigs"]:
+ assert_approx(participant.getbalance(), deposit_amount, vspan=0.001)
+
+ self.log.info("Send a transaction from the multisig!")
+ to = participants["signers"][self.N - 1].getnewaddress()
+ value = 1
+ self.log.info("First, make a sending transaction, created using `walletcreatefundedpsbt` (anyone can initiate this)...")
+ psbt = participants["multisigs"][0].walletcreatefundedpsbt(inputs=[], outputs={to: value}, options={"feeRate": 0.00010})
+
+ psbts = []
+ self.log.info("Now at least M users check the psbt with decodepsbt and (if OK) signs it with walletprocesspsbt...")
+ for m in range(self.M):
+ signers_multisig = participants["multisigs"][m]
+ self._check_psbt(psbt["psbt"], to, value, signers_multisig)
+ signing_wallet = participants["signers"][m]
+ partially_signed_psbt = signing_wallet.walletprocesspsbt(psbt["psbt"])
+ psbts.append(partially_signed_psbt["psbt"])
+
+ self.log.info("Finally, collect the signed PSBTs with combinepsbt, finalizepsbt, then broadcast the resulting transaction...")
+ combined = coordinator_wallet.combinepsbt(psbts)
+ finalized = coordinator_wallet.finalizepsbt(combined)
+ coordinator_wallet.sendrawtransaction(finalized["hex"])
+
+ self.log.info("Check that balances are correct after the transaction has been included in a block.")
+ self.generate(self.nodes[0], 1)
+ self.sync_all()
+ assert_approx(participants["multisigs"][0].getbalance(), deposit_amount - value, vspan=0.001)
+ assert_equal(participants["signers"][self.N - 1].getbalance(), value)
+
+ self.log.info("Send another transaction from the multisig, this time with a daisy chained signing flow (one after another in series)!")
+ psbt = participants["multisigs"][0].walletcreatefundedpsbt(inputs=[], outputs={to: value}, options={"feeRate": 0.00010})
+ for m in range(self.M):
+ signers_multisig = participants["multisigs"][m]
+ self._check_psbt(psbt["psbt"], to, value, signers_multisig)
+ signing_wallet = participants["signers"][m]
+ psbt = signing_wallet.walletprocesspsbt(psbt["psbt"])
+ assert_equal(psbt["complete"], m == self.M - 1)
+ finalized = coordinator_wallet.finalizepsbt(psbt["psbt"])
+ coordinator_wallet.sendrawtransaction(finalized["hex"])
+
+ self.log.info("Check that balances are correct after the transaction has been included in a block.")
+ self.generate(self.nodes[0], 1)
+ self.sync_all()
+ assert_approx(participants["multisigs"][0].getbalance(), deposit_amount - (value * 2), vspan=0.001)
+ assert_equal(participants["signers"][self.N - 1].getbalance(), value * 2)
+
+
+if __name__ == "__main__":
+ WalletMultisigDescriptorPSBTTest().main()
diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py
index 7b23235945..c9daeabeb9 100755
--- a/test/functional/wallet_send.py
+++ b/test/functional/wallet_send.py
@@ -502,7 +502,8 @@ class WalletSendTest(BitcoinTestFramework):
self.nodes[0].sendtoaddress(addr, 10)
self.nodes[0].sendtoaddress(ext_wallet.getnewaddress(), 10)
- self.nodes[0].generate(6)
+ self.generate(self.nodes[0], 6)
+ self.sync_all()
ext_utxo = ext_fund.listunspent(addresses=[addr])[0]
# An external input without solving data should result in an error
diff --git a/test/functional/wallet_transactiontime_rescan.py b/test/functional/wallet_transactiontime_rescan.py
index 78859e6131..afa5139da7 100755
--- a/test/functional/wallet_transactiontime_rescan.py
+++ b/test/functional/wallet_transactiontime_rescan.py
@@ -63,7 +63,7 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
# generate some btc to create transactions and check blockcount
initial_mine = COINBASE_MATURITY + 1
- minernode.generatetoaddress(initial_mine, m1)
+ self.generatetoaddress(minernode, initial_mine, m1)
assert_equal(minernode.getblockcount(), initial_mine + 200)
# synchronize nodes and time
@@ -76,7 +76,7 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
miner_wallet.sendtoaddress(wo1, 10)
# generate blocks and check blockcount
- minernode.generatetoaddress(COINBASE_MATURITY, m1)
+ self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 300)
# synchronize nodes and time
@@ -89,7 +89,7 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
miner_wallet.sendtoaddress(wo2, 5)
# generate blocks and check blockcount
- minernode.generatetoaddress(COINBASE_MATURITY, m1)
+ self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 400)
# synchronize nodes and time
@@ -102,7 +102,7 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
miner_wallet.sendtoaddress(wo3, 1)
# generate more blocks and check blockcount
- minernode.generatetoaddress(COINBASE_MATURITY, m1)
+ self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 500)
self.log.info('Check user\'s final balance and transaction count')
diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py
index ed98db55c9..5800880830 100755
--- a/test/functional/wallet_upgradewallet.py
+++ b/test/functional/wallet_upgradewallet.py
@@ -234,18 +234,13 @@ class UpgradeWalletTest(BitcoinTestFramework):
assert_equal(1, hd_chain_version)
seed_id = bytearray(seed_id)
seed_id.reverse()
- old_kvs = new_kvs
- # First 2 keys should still be non-HD
- for i in range(0, 2):
- info = wallet.getaddressinfo(wallet.getnewaddress())
- assert 'hdkeypath' not in info
- assert 'hdseedid' not in info
- # Next key should be HD
+
+ # New keys (including change) should be HD (the two old keys have been flushed)
info = wallet.getaddressinfo(wallet.getnewaddress())
assert_equal(seed_id.hex(), info['hdseedid'])
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
prev_seed_id = info['hdseedid']
- # Change key should be the same keypool
+ # Change key should be HD and from the same keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/1\'', info['hdkeypath'])
@@ -291,14 +286,7 @@ class UpgradeWalletTest(BitcoinTestFramework):
hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(2, internal_counter)
- # Drain the keypool by fetching one external key and one change key. Should still be the same keypool
- info = wallet.getaddressinfo(wallet.getnewaddress())
- assert 'hdseedid' not in info
- assert 'hdkeypath' not in info
- info = wallet.getaddressinfo(wallet.getrawchangeaddress())
- assert 'hdseedid' not in info
- assert 'hdkeypath' not in info
- # The next addresses are HD and should be on different HD chains
+ # The next addresses are HD and should be on different HD chains (the one remaining key in each pool should have been flushed)
info = wallet.getaddressinfo(wallet.getnewaddress())
ext_id = info['hdseedid']
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
index 3015c4f9b9..b119cffec8 100755
--- a/test/lint/lint-locale-dependence.sh
+++ b/test/lint/lint-locale-dependence.sh
@@ -38,13 +38,10 @@ export LC_ALL=C
# https://stackoverflow.com/a/34878283 for more details.
# TODO: Reduce KNOWN_VIOLATIONS by replacing uses of locale dependent stoul/strtol with locale
-# independent ToIntegral<T>(...).
+# independent ToIntegral<T>(...) or the ParseInt*() functions.
# TODO: Reduce KNOWN_VIOLATIONS by replacing uses of locale dependent snprintf with strprintf.
KNOWN_VIOLATIONS=(
- "src/bitcoin-tx.cpp.*stoul"
- "src/dbwrapper.cpp.*stoul"
"src/dbwrapper.cpp:.*vsnprintf"
- "src/rest.cpp:.*strtol"
"src/test/dbwrapper_tests.cpp:.*snprintf"
"src/test/fuzz/locale.cpp"
"src/test/fuzz/string.cpp"
diff --git a/test/lint/lint-python.sh b/test/lint/lint-python.sh
index c448fa6f9a..3d22407fd1 100755
--- a/test/lint/lint-python.sh
+++ b/test/lint/lint-python.sh
@@ -102,7 +102,7 @@ if ! PYTHONWARNINGS="ignore" flake8 --ignore=B,C,E,F,I,N,W --select=$(IFS=","; e
EXIT_CODE=1
fi
-if ! mypy --ignore-missing-imports --show-error-codes $(git ls-files "test/functional/*.py" "contrib/devtools/*.py"); then
+if ! mypy --show-error-codes $(git ls-files "test/functional/*.py" "contrib/devtools/*.py"); then
EXIT_CODE=1
fi
diff --git a/test/util/data/bitcoin-util-test.json b/test/util/data/bitcoin-util-test.json
index a648c0287a..cca5732aa1 100644
--- a/test/util/data/bitcoin-util-test.json
+++ b/test/util/data/bitcoin-util-test.json
@@ -295,6 +295,12 @@
"description": "Create a new transaction with a single output script (OP_DROP) in a P2SH, wrapped in a P2SH (output as json)"
},
{ "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:999999999999999999999999999999"],
+ "return_code": 1,
+ "error_txt": "error: script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF",
+ "description": "Try to parse an output script with a decimal number above the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
"args": ["-create", "outscript=0:9999999999"],
"return_code": 1,
"error_txt": "error: script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF",
@@ -512,6 +518,30 @@
{ "exec": "./bitcoin-tx",
"args":
["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:11aa"],
+ "return_code": 1,
+ "error_txt": "error: invalid TX sequence id '11aa'",
+ "description": "Try to parse a sequence number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:-1"],
+ "return_code": 1,
+ "error_txt": "error: invalid TX sequence id '-1'",
+ "description": "Try to parse a sequence number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967296"],
+ "return_code": 1,
+ "error_txt": "error: invalid TX sequence id '4294967296'",
+ "description": "Try to parse a sequence number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
"in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967293",
"outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"],
"output_cmp": "txcreatedata_seq0.hex",
@@ -519,6 +549,14 @@
},
{ "exec": "./bitcoin-tx",
"args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0: 4294967293 ",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"],
+ "output_cmp": "txcreatedata_seq0.hex",
+ "description": "Creates a new transaction with one input with sequence number (+whitespace) and one address output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
["-json",
"-create",
"in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967293",
@@ -542,14 +580,26 @@
"description": "Adds a new input with sequence number to a transaction (output in json)"
},
{ "exec": "./bitcoin-tx",
+ "args": ["-create", "outmultisig=1:-2:3:02a5:021:02df", "nversion=1"],
+ "return_code": 1,
+ "error_txt": "error: invalid multisig required number '-2'",
+ "description": "Try to parse a multisig number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outmultisig=1:2:3a:02a5:021:02df", "nversion=1"],
+ "return_code": 1,
+ "error_txt": "error: invalid multisig total number '3a'",
+ "description": "Try to parse a multisig number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
"args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"],
"output_cmp": "txcreatemultisig1.hex",
"description": "Creates a new transaction with a single 2-of-3 multisig output"
},
{ "exec": "./bitcoin-tx",
- "args": ["-json", "-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"],
+ "args": ["-json", "-create", "outmultisig=1: 2 : 3 :02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"],
"output_cmp": "txcreatemultisig1.json",
- "description": "Creates a new transaction with a single 2-of-3 multisig output (output in json)"
+ "description": "Creates a new transaction with a single 2-of-3 multisig output (with whitespace, output in json)"
},
{ "exec": "./bitcoin-tx",
"args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:S", "nversion=1"],