aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml5
-rw-r--r--Makefile.am15
-rwxr-xr-xci/lint/04_install.sh3
-rwxr-xr-xci/test/00_setup_env_android.sh2
-rwxr-xr-xci/test/06_script_a.sh2
-rwxr-xr-xci/test/06_script_b.sh2
-rw-r--r--configure.ac74
-rw-r--r--contrib/devtools/pixie.py323
-rwxr-xr-xcontrib/devtools/security-check.py183
-rwxr-xr-xcontrib/devtools/symbol-check.py163
-rwxr-xr-xcontrib/devtools/test-security-check.py12
-rwxr-xr-xcontrib/devtools/test-symbol-check.py15
-rwxr-xr-xcontrib/guix/libexec/build.sh6
-rw-r--r--contrib/tracing/README.md7
-rwxr-xr-xcontrib/tracing/connectblock_benchmark.bt22
-rw-r--r--depends/packages/expat.mk2
-rw-r--r--depends/packages/fontconfig.mk2
-rw-r--r--depends/packages/freetype.mk2
-rw-r--r--depends/packages/libXau.mk2
-rw-r--r--depends/packages/qt.mk1
-rw-r--r--doc/descriptors.md41
-rw-r--r--doc/psbt.md3
-rw-r--r--doc/release-notes-22539.md8
-rw-r--r--doc/release-notes-23093.md11
-rw-r--r--doc/release-notes.md14
-rw-r--r--doc/tracing.md9
-rw-r--r--src/Makefile.am33
-rw-r--r--src/Makefile.qt.include14
-rw-r--r--src/Makefile.test.include22
-rw-r--r--src/Makefile.univalue.include6
-rw-r--r--src/addrdb.cpp20
-rw-r--r--src/bench/rpc_blockchain.cpp4
-rw-r--r--src/bitcoin-cli.cpp2
-rw-r--r--src/bitcoin-tx.cpp19
-rw-r--r--src/chainparams.cpp28
-rw-r--r--src/core_io.h11
-rw-r--r--src/core_read.cpp53
-rw-r--r--src/core_write.cpp31
-rw-r--r--src/dbwrapper.cpp18
-rw-r--r--src/flatfile.cpp4
-rw-r--r--src/fs.cpp8
-rw-r--r--src/fs.h132
-rw-r--r--src/i2p.cpp2
-rw-r--r--src/init.cpp26
-rw-r--r--src/init/common.cpp14
-rw-r--r--src/interfaces/chain.h2
-rw-r--r--src/ipc/process.cpp4
-rw-r--r--src/logging.cpp1
-rw-r--r--src/logging.h1
-rw-r--r--src/node/blockstorage.cpp19
-rw-r--r--src/node/interfaces.cpp2
-rw-r--r--src/policy/fees.cpp4
-rw-r--r--src/policy/policy.cpp7
-rw-r--r--src/qt/bitcoingui.cpp2
-rw-r--r--src/qt/guiutil.cpp4
-rw-r--r--src/qt/intro.cpp2
-rw-r--r--src/rest.cpp20
-rw-r--r--src/rpc/blockchain.cpp61
-rw-r--r--src/rpc/blockchain.h3
-rw-r--r--src/rpc/request.cpp8
-rw-r--r--src/rpc/server.cpp2
-rw-r--r--src/test/fs_tests.cpp27
-rw-r--r--src/test/fuzz/banman.cpp4
-rw-r--r--src/test/script_parse_tests.cpp2
-rw-r--r--src/test/settings_tests.cpp6
-rw-r--r--src/test/util/chainstate.h2
-rw-r--r--src/test/util/setup_common.cpp4
-rw-r--r--src/test/util_tests.cpp14
-rw-r--r--src/test/validation_chainstate_tests.cpp23
-rw-r--r--src/torcontrol.cpp8
-rw-r--r--src/univalue/.cirrus.yml44
-rw-r--r--src/univalue/.travis.yml51
-rw-r--r--src/univalue/Makefile.am90
-rw-r--r--src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4962
-rw-r--r--src/univalue/configure.ac9
-rw-r--r--src/univalue/gen/gen.cpp4
-rw-r--r--src/univalue/include/univalue.h4
-rw-r--r--src/univalue/lib/univalue.cpp16
-rw-r--r--src/univalue/lib/univalue_escapes.h442
-rw-r--r--src/univalue/lib/univalue_get.cpp7
-rw-r--r--src/univalue/lib/univalue_read.cpp6
-rw-r--r--src/univalue/lib/univalue_utffilter.h2
-rw-r--r--src/univalue/lib/univalue_write.cpp7
-rw-r--r--src/univalue/sources.mk95
-rw-r--r--src/univalue/test/object.cpp2
-rw-r--r--src/univalue/test/unitester.cpp4
-rw-r--r--src/util/asmap.cpp4
-rw-r--r--src/util/settings.cpp12
-rw-r--r--src/util/strencodings.h4
-rw-r--r--src/util/system.cpp28
-rw-r--r--src/validation.cpp25
-rw-r--r--src/wallet/bdb.cpp38
-rw-r--r--src/wallet/bdb.h4
-rw-r--r--src/wallet/db.cpp15
-rw-r--r--src/wallet/dump.cpp10
-rw-r--r--src/wallet/interfaces.cpp4
-rw-r--r--src/wallet/load.cpp16
-rw-r--r--src/wallet/rpcdump.cpp8
-rw-r--r--src/wallet/rpcwallet.cpp38
-rw-r--r--src/wallet/scriptpubkeyman.cpp2
-rw-r--r--src/wallet/sqlite.cpp4
-rw-r--r--src/wallet/test/db_tests.cpp8
-rw-r--r--src/wallet/test/init_test_fixture.cpp8
-rw-r--r--src/wallet/test/init_tests.cpp8
-rw-r--r--src/wallet/test/wallet_tests.cpp2
-rw-r--r--src/wallet/wallet.cpp6
-rw-r--r--src/wallet/walletdb.cpp16
-rw-r--r--src/wallet/wallettool.cpp2
-rw-r--r--src/wallet/walletutil.cpp2
-rw-r--r--test/README.md15
-rwxr-xr-xtest/functional/combine_logs.py2
-rw-r--r--test/functional/data/__init__.py0
-rwxr-xr-xtest/functional/interface_rest.py16
-rwxr-xr-xtest/functional/mempool_package_limits.py2
-rwxr-xr-xtest/functional/mining_prioritisetransaction.py2
-rwxr-xr-xtest/functional/rpc_blockchain.py66
-rwxr-xr-xtest/functional/rpc_misc.py2
-rwxr-xr-xtest/functional/test_framework/test_framework.py8
-rwxr-xr-xtest/functional/test_framework/test_node.py16
-rw-r--r--test/functional/test_framework/wallet.py4
-rwxr-xr-xtest/functional/test_runner.py3
-rwxr-xr-xtest/functional/wallet_abandonconflict.py8
-rwxr-xr-xtest/functional/wallet_descriptor.py2
-rwxr-xr-xtest/functional/wallet_importdescriptors.py4
-rwxr-xr-xtest/functional/wallet_keypool.py14
-rwxr-xr-xtest/functional/wallet_multisig_descriptor_psbt.py163
-rwxr-xr-xtest/functional/wallet_transactiontime_rescan.py8
-rwxr-xr-xtest/functional/wallet_upgradewallet.py20
-rwxr-xr-xtest/lint/lint-locale-dependence.sh4
-rwxr-xr-xtest/lint/lint-python.sh2
-rw-r--r--test/util/data/bitcoin-util-test.json54
131 files changed, 2593 insertions, 1439 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 5491f2c04b..44aaf005f0 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -79,14 +79,13 @@ task:
<< : *FILTER_TEMPLATE
windows_container:
cpu: 4
- memory: 16G
+ memory: 8G
image: cirrusci/windowsservercore:visualstudio2019
timeout_in: 120m
env:
PATH: 'C:\jom;C:\Python39;C:\Python39\Scripts;C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\MSBuild\Current\Bin;%PATH%'
PYTHONUTF8: 1
- VCPKG_TAG: '75522bb1f2e7d863078bcd06322348f053a9e33f'
- VCPKG_FEATURE_FLAGS: 'manifests'
+ VCPKG_TAG: '2021.05.12'
QT_DOWNLOAD_URL: 'https://download.qt.io/official_releases/qt/5.12/5.12.11/single/qt-everywhere-src-5.12.11.zip'
QT_LOCAL_PATH: 'C:\qt-everywhere-src-5.12.11.zip'
QT_SOURCE_DIR: 'C:\qt-everywhere-src-5.12.11'
diff --git a/Makefile.am b/Makefile.am
index ce66331910..af63cf0cbb 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -58,8 +58,7 @@ DIST_SHARE = \
BIN_CHECKS=$(top_srcdir)/contrib/devtools/symbol-check.py \
$(top_srcdir)/contrib/devtools/security-check.py \
- $(top_srcdir)/contrib/devtools/utils.py \
- $(top_srcdir)/contrib/devtools/pixie.py
+ $(top_srcdir)/contrib/devtools/utils.py
WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/bitcoin.ico \
$(top_srcdir)/share/pixmaps/nsis-header.bmp \
@@ -367,14 +366,14 @@ clean-local: clean-docs
test-security-check:
if TARGET_DARWIN
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_MACHO
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_MACHO
endif
if TARGET_WINDOWS
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE
endif
if TARGET_LINUX
- $(AM_V_at) CC='$(CC)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF
- $(AM_V_at) CC='$(CC)' CPPFILT='$(CPPFILT)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF
+ $(AM_V_at) CC='$(CC)' CFLAGS='$(CFLAGS)' CPPFLAGS='$(CPPFLAGS)' LDFLAGS='$(LDFLAGS)' $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_ELF
endif
diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh
index 5587618f2d..991234a436 100755
--- a/ci/lint/04_install.sh
+++ b/ci/lint/04_install.sh
@@ -13,7 +13,8 @@ update-alternatives --install /usr/bin/clang-format-diff clang-format-diff $(whi
${CI_RETRY_EXE} pip3 install codespell==2.0.0
${CI_RETRY_EXE} pip3 install flake8==3.8.3
-${CI_RETRY_EXE} pip3 install mypy==0.781
+${CI_RETRY_EXE} pip3 install mypy==0.910
+${CI_RETRY_EXE} pip3 install pyzmq==22.3.0
${CI_RETRY_EXE} pip3 install vulture==2.3
SHELLCHECK_VERSION=v0.7.2
diff --git a/ci/test/00_setup_env_android.sh b/ci/test/00_setup_env_android.sh
index 4ef3ae1ceb..2f9d1f2a9f 100755
--- a/ci/test/00_setup_env_android.sh
+++ b/ci/test/00_setup_env_android.sh
@@ -22,4 +22,4 @@ export ANDROID_HOME="${DEPENDS_DIR}/SDKs/android"
export ANDROID_NDK_HOME="${ANDROID_HOME}/ndk/${ANDROID_NDK_VERSION}"
export DEP_OPTS="ANDROID_SDK=${ANDROID_HOME} ANDROID_NDK=${ANDROID_NDK_HOME} ANDROID_API_LEVEL=${ANDROID_API_LEVEL} ANDROID_TOOLCHAIN_BIN=${ANDROID_NDK_HOME}/toolchains/llvm/prebuilt/linux-x86_64/bin/"
-export BITCOIN_CONFIG="--disable-ccache"
+export BITCOIN_CONFIG="--disable-ccache --disable-tests --enable-gui-tests --disable-bench --disable-fuzz-binary --without-utils --without-libs --without-daemon"
diff --git a/ci/test/06_script_a.sh b/ci/test/06_script_a.sh
index a42cd6cee1..b1d83883d1 100755
--- a/ci/test/06_script_a.sh
+++ b/ci/test/06_script_a.sh
@@ -10,7 +10,7 @@ if [ -n "$ANDROID_TOOLS_URL" ]; then
DOCKER_EXEC make distclean || true
DOCKER_EXEC ./autogen.sh
DOCKER_EXEC ./configure $BITCOIN_CONFIG --prefix=$DEPENDS_DIR/aarch64-linux-android || ( (DOCKER_EXEC cat config.log) && false)
- DOCKER_EXEC "cd src/qt && make $MAKEJOBS && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk"
+ DOCKER_EXEC "make $MAKEJOBS && cd src/qt && ANDROID_HOME=${ANDROID_HOME} ANDROID_NDK_HOME=${ANDROID_NDK_HOME} make apk"
exit 0
fi
diff --git a/ci/test/06_script_b.sh b/ci/test/06_script_b.sh
index 194b14beab..311a43755a 100755
--- a/ci/test/06_script_b.sh
+++ b/ci/test/06_script_b.sh
@@ -9,14 +9,12 @@ export LC_ALL=C.UTF-8
if [[ $HOST = *-mingw32 ]]; then
# Generate all binaries, so that they can be wrapped
DOCKER_EXEC make $MAKEJOBS -C src/secp256k1 VERBOSE=1
- DOCKER_EXEC make $MAKEJOBS -C src/univalue VERBOSE=1
DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-wine.sh"
fi
if [ -n "$QEMU_USER_CMD" ]; then
# Generate all binaries, so that they can be wrapped
DOCKER_EXEC make $MAKEJOBS -C src/secp256k1 VERBOSE=1
- DOCKER_EXEC make $MAKEJOBS -C src/univalue VERBOSE=1
DOCKER_EXEC "${BASE_ROOT_DIR}/ci/test/wrap-qemu.sh"
fi
diff --git a/configure.ac b/configure.ac
index 5a6f21fe42..06e0f3f25a 100644
--- a/configure.ac
+++ b/configure.ac
@@ -113,7 +113,6 @@ AC_PATH_PROG([GIT], [git])
AC_PATH_PROG(CCACHE,ccache)
AC_PATH_PROG(XGETTEXT,xgettext)
AC_PATH_PROG(HEXDUMP,hexdump)
-AC_PATH_TOOL(CPPFILT, c++filt)
AC_PATH_TOOL(OBJCOPY, objcopy)
AC_PATH_PROG(DOXYGEN, doxygen)
AM_CONDITIONAL([HAVE_DOXYGEN], [test -n "$DOXYGEN"])
@@ -262,12 +261,6 @@ if test "x$use_asm" = xyes; then
AC_DEFINE(USE_ASM, 1, [Define this symbol to build in assembly routines])
fi
-AC_ARG_WITH([system-univalue],
- [AS_HELP_STRING([--with-system-univalue],
- [Build with system UniValue (default is no)])],
- [system_univalue=$withval],
- [system_univalue=no]
-)
AC_ARG_ENABLE([zmq],
[AS_HELP_STRING([--disable-zmq],
[disable ZMQ notifications])],
@@ -321,7 +314,7 @@ AC_ARG_ENABLE([gprof],
dnl Turn warnings into errors
AC_ARG_ENABLE([werror],
[AS_HELP_STRING([--enable-werror],
- [Treat certain compiler warnings as errors (default is no)])],
+ [Treat compiler warnings as errors (default is no)])],
[enable_werror=$enableval],
[enable_werror=no])
@@ -409,34 +402,13 @@ if test "x$enable_werror" = "xyes"; then
if test "x$CXXFLAG_WERROR" = "x"; then
AC_MSG_ERROR("enable-werror set but -Werror is not usable")
fi
- AX_CHECK_COMPILE_FLAG([-Werror=gnu],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=gnu"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=vla],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=vla"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=shadow-field],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=shadow-field"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=switch],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=switch"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=thread-safety],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=thread-safety"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=range-loop-analysis],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=range-loop-analysis"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=unused-variable],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unused-variable"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=date-time],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=date-time"],,[[$CXXFLAG_WERROR]])
+ ERROR_CXXFLAGS=$CXXFLAG_WERROR
dnl -Wreturn-type is broken in GCC for MinGW-w64.
dnl https://sourceforge.net/p/mingw-w64/bugs/306/
- AX_CHECK_COMPILE_FLAG([-Werror=return-type], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=return-type"], [], [$CXXFLAG_WERROR],
+ AX_CHECK_COMPILE_FLAG([-Werror=return-type], [], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Wno-error=return-type"], [$CXXFLAG_WERROR],
[AC_LANG_SOURCE([[#include <cassert>
int f(){ assert(false); }]])])
-
- AX_CHECK_COMPILE_FLAG([-Werror=conditional-uninitialized],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=conditional-uninitialized"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=sign-compare],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=sign-compare"],,[[$CXXFLAG_WERROR]])
- dnl -Wsuggest-override is broken with GCC before 9.2
- dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78010
- AX_CHECK_COMPILE_FLAG([-Werror=suggest-override],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=suggest-override"],,[[$CXXFLAG_WERROR]],
- [AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])])
- AX_CHECK_COMPILE_FLAG([-Werror=unreachable-code-loop-increment],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Werror=mismatched-tags], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=mismatched-tags"], [], [$CXXFLAG_WERROR])
- AX_CHECK_COMPILE_FLAG([-Werror=implicit-fallthrough], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=implicit-fallthrough"], [], [$CXXFLAG_WERROR])
-
- if test x$suppress_external_warnings != xno ; then
- AX_CHECK_COMPILE_FLAG([-Werror=documentation],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=documentation"],,[[$CXXFLAG_WERROR]])
- fi
fi
if test "x$CXXFLAGS_overridden" = "xno"; then
@@ -447,19 +419,18 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
AX_CHECK_COMPILE_FLAG([-Wformat -Wformat-security],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wformat -Wformat-security"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wvla],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wvla"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wshadow-field],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wshadow-field"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wswitch],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wswitch"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wthread-safety],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wthread-safety"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wrange-loop-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wrange-loop-analysis"],,[[$CXXFLAG_WERROR]])
+ AX_CHECK_COMPILE_FLAG([-Wloop-analysis],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wloop-analysis"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wredundant-decls],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wredundant-decls"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wunused-variable],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunused-variable"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wunused-member-function],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunused-member-function"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wdate-time],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wdate-time"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wconditional-uninitialized],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wconditional-uninitialized"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wsign-compare],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsign-compare"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wduplicated-branches],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-branches"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wduplicated-cond],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wduplicated-cond"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wlogical-op],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wlogical-op"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Woverloaded-virtual],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Woverloaded-virtual"],,[[$CXXFLAG_WERROR]])
+ dnl -Wsuggest-override is broken with GCC before 9.2
+ dnl https://gcc.gnu.org/bugzilla/show_bug.cgi?id=78010
AX_CHECK_COMPILE_FLAG([-Wsuggest-override],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wsuggest-override"],,[[$CXXFLAG_WERROR]],
[AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])])
AX_CHECK_COMPILE_FLAG([-Wunreachable-code-loop-increment],[WARN_CXXFLAGS="$WARN_CXXFLAGS -Wunreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]])
@@ -474,7 +445,6 @@ if test "x$CXXFLAGS_overridden" = "xno"; then
dnl set the -Wno-foo case if it works.
AX_CHECK_COMPILE_FLAG([-Wunused-parameter],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-parameter"],,[[$CXXFLAG_WERROR]])
AX_CHECK_COMPILE_FLAG([-Wself-assign],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-self-assign"],,[[$CXXFLAG_WERROR]])
- AX_CHECK_COMPILE_FLAG([-Wunused-local-typedef],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-local-typedef"],,[[$CXXFLAG_WERROR]])
if test x$suppress_external_warnings != xyes ; then
AX_CHECK_COMPILE_FLAG([-Wdeprecated-copy],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-deprecated-copy"],,[[$CXXFLAG_WERROR]])
fi
@@ -1546,34 +1516,6 @@ if test "x$use_zmq" = xyes; then
esac
fi
-dnl univalue check
-
-need_bundled_univalue=yes
-if test x$build_bitcoin_wallet$build_bitcoin_cli$build_bitcoin_tx$build_bitcoin_util$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench = xnononononononono; then
- need_bundled_univalue=no
-else
- if test x$system_univalue != xno; then
- PKG_CHECK_MODULES([UNIVALUE], [libunivalue >= 1.0.4], [found_univalue=yes], [found_univalue=no])
- if test x$found_univalue = xyes; then
- system_univalue=yes
- need_bundled_univalue=no
- elif test x$system_univalue = xyes; then
- AC_MSG_ERROR([univalue not found])
- else
- system_univalue=no
- fi
- fi
-
- if test x$need_bundled_univalue = xyes; then
- UNIVALUE_CFLAGS='-I$(srcdir)/univalue/include'
- UNIVALUE_LIBS='univalue/libunivalue.la'
- fi
-fi
-
-AM_CONDITIONAL([EMBEDDED_UNIVALUE],[test x$need_bundled_univalue = xyes])
-AC_SUBST(UNIVALUE_CFLAGS)
-AC_SUBST(UNIVALUE_LIBS)
-
dnl libmultiprocess library check
libmultiprocess_found=no
@@ -1936,10 +1878,6 @@ PKGCONFIG_LIBDIR_TEMP="$PKG_CONFIG_LIBDIR"
unset PKG_CONFIG_LIBDIR
PKG_CONFIG_LIBDIR="$PKGCONFIG_LIBDIR_TEMP"
-if test x$need_bundled_univalue = xyes; then
- AC_CONFIG_SUBDIRS([src/univalue])
-fi
-
ac_configure_args="${ac_configure_args} --disable-shared --with-pic --enable-benchmark=no --enable-module-recovery --enable-module-schnorrsig --enable-experimental"
AC_CONFIG_SUBDIRS([src/secp256k1])
diff --git a/contrib/devtools/pixie.py b/contrib/devtools/pixie.py
deleted file mode 100644
index 64660968ad..0000000000
--- a/contrib/devtools/pixie.py
+++ /dev/null
@@ -1,323 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2020 Wladimir J. van der Laan
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-'''
-Compact, self-contained ELF implementation for bitcoin-core security checks.
-'''
-import struct
-import types
-from typing import Dict, List, Optional, Union, Tuple
-
-# you can find all these values in elf.h
-EI_NIDENT = 16
-
-# Byte indices in e_ident
-EI_CLASS = 4 # ELFCLASSxx
-EI_DATA = 5 # ELFDATAxxxx
-
-ELFCLASS32 = 1 # 32-bit
-ELFCLASS64 = 2 # 64-bit
-
-ELFDATA2LSB = 1 # little endian
-ELFDATA2MSB = 2 # big endian
-
-# relevant values for e_machine
-EM_386 = 3
-EM_PPC64 = 21
-EM_ARM = 40
-EM_AARCH64 = 183
-EM_X86_64 = 62
-EM_RISCV = 243
-
-# relevant values for e_type
-ET_DYN = 3
-
-# relevant values for sh_type
-SHT_PROGBITS = 1
-SHT_STRTAB = 3
-SHT_DYNAMIC = 6
-SHT_DYNSYM = 11
-SHT_GNU_verneed = 0x6ffffffe
-SHT_GNU_versym = 0x6fffffff
-
-# relevant values for p_type
-PT_LOAD = 1
-PT_GNU_STACK = 0x6474e551
-PT_GNU_RELRO = 0x6474e552
-
-# relevant values for p_flags
-PF_X = (1 << 0)
-PF_W = (1 << 1)
-PF_R = (1 << 2)
-
-# relevant values for d_tag
-DT_NEEDED = 1
-DT_FLAGS = 30
-
-# relevant values of `d_un.d_val' in the DT_FLAGS entry
-DF_BIND_NOW = 0x00000008
-
-# relevant d_tags with string payload
-STRING_TAGS = {DT_NEEDED}
-
-# rrlevant values for ST_BIND subfield of st_info (symbol binding)
-STB_LOCAL = 0
-
-class ELFRecord(types.SimpleNamespace):
- '''Unified parsing for ELF records.'''
- def __init__(self, data: bytes, offset: int, eh: 'ELFHeader', total_size: Optional[int]) -> None:
- hdr_struct = self.STRUCT[eh.ei_class][0][eh.ei_data]
- if total_size is not None and hdr_struct.size > total_size:
- raise ValueError(f'{self.__class__.__name__} header size too small ({total_size} < {hdr_struct.size})')
- for field, value in zip(self.STRUCT[eh.ei_class][1], hdr_struct.unpack(data[offset:offset + hdr_struct.size])):
- setattr(self, field, value)
-
-def BiStruct(chars: str) -> Dict[int, struct.Struct]:
- '''Compile a struct parser for both endians.'''
- return {
- ELFDATA2LSB: struct.Struct('<' + chars),
- ELFDATA2MSB: struct.Struct('>' + chars),
- }
-
-class ELFHeader(ELFRecord):
- FIELDS = ['e_type', 'e_machine', 'e_version', 'e_entry', 'e_phoff', 'e_shoff', 'e_flags', 'e_ehsize', 'e_phentsize', 'e_phnum', 'e_shentsize', 'e_shnum', 'e_shstrndx']
- STRUCT = {
- ELFCLASS32: (BiStruct('HHIIIIIHHHHHH'), FIELDS),
- ELFCLASS64: (BiStruct('HHIQQQIHHHHHH'), FIELDS),
- }
-
- def __init__(self, data: bytes, offset: int) -> None:
- self.e_ident = data[offset:offset + EI_NIDENT]
- if self.e_ident[0:4] != b'\x7fELF':
- raise ValueError('invalid ELF magic')
- self.ei_class = self.e_ident[EI_CLASS]
- self.ei_data = self.e_ident[EI_DATA]
-
- super().__init__(data, offset + EI_NIDENT, self, None)
-
- def __repr__(self) -> str:
- return f'Header(e_ident={self.e_ident!r}, e_type={self.e_type}, e_machine={self.e_machine}, e_version={self.e_version}, e_entry={self.e_entry}, e_phoff={self.e_phoff}, e_shoff={self.e_shoff}, e_flags={self.e_flags}, e_ehsize={self.e_ehsize}, e_phentsize={self.e_phentsize}, e_phnum={self.e_phnum}, e_shentsize={self.e_shentsize}, e_shnum={self.e_shnum}, e_shstrndx={self.e_shstrndx})'
-
-class Section(ELFRecord):
- name: Optional[bytes] = None
- FIELDS = ['sh_name', 'sh_type', 'sh_flags', 'sh_addr', 'sh_offset', 'sh_size', 'sh_link', 'sh_info', 'sh_addralign', 'sh_entsize']
- STRUCT = {
- ELFCLASS32: (BiStruct('IIIIIIIIII'), FIELDS),
- ELFCLASS64: (BiStruct('IIQQQQIIQQ'), FIELDS),
- }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
- super().__init__(data, offset, eh, eh.e_shentsize)
- self._data = data
-
- def __repr__(self) -> str:
- return f'Section(sh_name={self.sh_name}({self.name!r}), sh_type=0x{self.sh_type:x}, sh_flags={self.sh_flags}, sh_addr=0x{self.sh_addr:x}, sh_offset=0x{self.sh_offset:x}, sh_size={self.sh_size}, sh_link={self.sh_link}, sh_info={self.sh_info}, sh_addralign={self.sh_addralign}, sh_entsize={self.sh_entsize})'
-
- def contents(self) -> bytes:
- '''Return section contents.'''
- return self._data[self.sh_offset:self.sh_offset + self.sh_size]
-
-class ProgramHeader(ELFRecord):
- STRUCT = {
- # different ELF classes have the same fields, but in a different order to optimize space versus alignment
- ELFCLASS32: (BiStruct('IIIIIIII'), ['p_type', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_flags', 'p_align']),
- ELFCLASS64: (BiStruct('IIQQQQQQ'), ['p_type', 'p_flags', 'p_offset', 'p_vaddr', 'p_paddr', 'p_filesz', 'p_memsz', 'p_align']),
- }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
- super().__init__(data, offset, eh, eh.e_phentsize)
-
- def __repr__(self) -> str:
- return f'ProgramHeader(p_type={self.p_type}, p_offset={self.p_offset}, p_vaddr={self.p_vaddr}, p_paddr={self.p_paddr}, p_filesz={self.p_filesz}, p_memsz={self.p_memsz}, p_flags={self.p_flags}, p_align={self.p_align})'
-
-class Symbol(ELFRecord):
- STRUCT = {
- # different ELF classes have the same fields, but in a different order to optimize space versus alignment
- ELFCLASS32: (BiStruct('IIIBBH'), ['st_name', 'st_value', 'st_size', 'st_info', 'st_other', 'st_shndx']),
- ELFCLASS64: (BiStruct('IBBHQQ'), ['st_name', 'st_info', 'st_other', 'st_shndx', 'st_value', 'st_size']),
- }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader, symtab: Section, strings: bytes, version: Optional[bytes]) -> None:
- super().__init__(data, offset, eh, symtab.sh_entsize)
- self.name = _lookup_string(strings, self.st_name)
- self.version = version
-
- def __repr__(self) -> str:
- return f'Symbol(st_name={self.st_name}({self.name!r}), st_value={self.st_value}, st_size={self.st_size}, st_info={self.st_info}, st_other={self.st_other}, st_shndx={self.st_shndx}, version={self.version!r})'
-
- @property
- def is_import(self) -> bool:
- '''Returns whether the symbol is an imported symbol.'''
- return self.st_bind != STB_LOCAL and self.st_shndx == 0
-
- @property
- def is_export(self) -> bool:
- '''Returns whether the symbol is an exported symbol.'''
- return self.st_bind != STB_LOCAL and self.st_shndx != 0
-
- @property
- def st_bind(self) -> int:
- '''Returns STB_*.'''
- return self.st_info >> 4
-
-class Verneed(ELFRecord):
- DEF = (BiStruct('HHIII'), ['vn_version', 'vn_cnt', 'vn_file', 'vn_aux', 'vn_next'])
- STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader) -> None:
- super().__init__(data, offset, eh, None)
-
- def __repr__(self) -> str:
- return f'Verneed(vn_version={self.vn_version}, vn_cnt={self.vn_cnt}, vn_file={self.vn_file}, vn_aux={self.vn_aux}, vn_next={self.vn_next})'
-
-class Vernaux(ELFRecord):
- DEF = (BiStruct('IHHII'), ['vna_hash', 'vna_flags', 'vna_other', 'vna_name', 'vna_next'])
- STRUCT = { ELFCLASS32: DEF, ELFCLASS64: DEF }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader, strings: bytes) -> None:
- super().__init__(data, offset, eh, None)
- self.name = _lookup_string(strings, self.vna_name)
-
- def __repr__(self) -> str:
- return f'Veraux(vna_hash={self.vna_hash}, vna_flags={self.vna_flags}, vna_other={self.vna_other}, vna_name={self.vna_name}({self.name!r}), vna_next={self.vna_next})'
-
-class DynTag(ELFRecord):
- STRUCT = {
- ELFCLASS32: (BiStruct('II'), ['d_tag', 'd_val']),
- ELFCLASS64: (BiStruct('QQ'), ['d_tag', 'd_val']),
- }
-
- def __init__(self, data: bytes, offset: int, eh: ELFHeader, section: Section) -> None:
- super().__init__(data, offset, eh, section.sh_entsize)
-
- def __repr__(self) -> str:
- return f'DynTag(d_tag={self.d_tag}, d_val={self.d_val})'
-
-def _lookup_string(data: bytes, index: int) -> bytes:
- '''Look up string by offset in ELF string table.'''
- endx = data.find(b'\x00', index)
- assert endx != -1
- return data[index:endx]
-
-VERSYM_S = BiStruct('H') # .gnu_version section has a single 16-bit integer per symbol in the linked section
-def _parse_symbol_table(section: Section, strings: bytes, eh: ELFHeader, versym: bytes, verneed: Dict[int, bytes]) -> List[Symbol]:
- '''Parse symbol table, return a list of symbols.'''
- data = section.contents()
- symbols = []
- versym_iter = (verneed.get(v[0]) for v in VERSYM_S[eh.ei_data].iter_unpack(versym))
- for ofs, version in zip(range(0, len(data), section.sh_entsize), versym_iter):
- symbols.append(Symbol(data, ofs, eh, section, strings, version))
- return symbols
-
-def _parse_verneed(section: Section, strings: bytes, eh: ELFHeader) -> Dict[int, bytes]:
- '''Parse .gnu.version_r section, return a dictionary of {versym: 'GLIBC_...'}.'''
- data = section.contents()
- ofs = 0
- result = {}
- while True:
- verneed = Verneed(data, ofs, eh)
- aofs = ofs + verneed.vn_aux
- while True:
- vernaux = Vernaux(data, aofs, eh, strings)
- result[vernaux.vna_other] = vernaux.name
- if not vernaux.vna_next:
- break
- aofs += vernaux.vna_next
-
- if not verneed.vn_next:
- break
- ofs += verneed.vn_next
-
- return result
-
-def _parse_dyn_tags(section: Section, strings: bytes, eh: ELFHeader) -> List[Tuple[int, Union[bytes, int]]]:
- '''Parse dynamic tags. Return array of tuples.'''
- data = section.contents()
- ofs = 0
- result = []
- for ofs in range(0, len(data), section.sh_entsize):
- tag = DynTag(data, ofs, eh, section)
- val = _lookup_string(strings, tag.d_val) if tag.d_tag in STRING_TAGS else tag.d_val
- result.append((tag.d_tag, val))
-
- return result
-
-class ELFFile:
- sections: List[Section]
- program_headers: List[ProgramHeader]
- dyn_symbols: List[Symbol]
- dyn_tags: List[Tuple[int, Union[bytes, int]]]
-
- def __init__(self, data: bytes) -> None:
- self.data = data
- self.hdr = ELFHeader(self.data, 0)
- self._load_sections()
- self._load_program_headers()
- self._load_dyn_symbols()
- self._load_dyn_tags()
- self._section_to_segment_mapping()
-
- def _load_sections(self) -> None:
- self.sections = []
- for idx in range(self.hdr.e_shnum):
- offset = self.hdr.e_shoff + idx * self.hdr.e_shentsize
- self.sections.append(Section(self.data, offset, self.hdr))
-
- shstr = self.sections[self.hdr.e_shstrndx].contents()
- for section in self.sections:
- section.name = _lookup_string(shstr, section.sh_name)
-
- def _load_program_headers(self) -> None:
- self.program_headers = []
- for idx in range(self.hdr.e_phnum):
- offset = self.hdr.e_phoff + idx * self.hdr.e_phentsize
- self.program_headers.append(ProgramHeader(self.data, offset, self.hdr))
-
- def _load_dyn_symbols(self) -> None:
- # first, load 'verneed' section
- verneed = None
- for section in self.sections:
- if section.sh_type == SHT_GNU_verneed:
- strtab = self.sections[section.sh_link].contents() # associated string table
- assert verneed is None # only one section of this kind please
- verneed = _parse_verneed(section, strtab, self.hdr)
- assert verneed is not None
-
- # then, correlate GNU versym sections with dynamic symbol sections
- versym = {}
- for section in self.sections:
- if section.sh_type == SHT_GNU_versym:
- versym[section.sh_link] = section
-
- # finally, load dynsym sections
- self.dyn_symbols = []
- for idx, section in enumerate(self.sections):
- if section.sh_type == SHT_DYNSYM: # find dynamic symbol tables
- strtab_data = self.sections[section.sh_link].contents() # associated string table
- versym_data = versym[idx].contents() # associated symbol version table
- self.dyn_symbols += _parse_symbol_table(section, strtab_data, self.hdr, versym_data, verneed)
-
- def _load_dyn_tags(self) -> None:
- self.dyn_tags = []
- for idx, section in enumerate(self.sections):
- if section.sh_type == SHT_DYNAMIC: # find dynamic tag tables
- strtab = self.sections[section.sh_link].contents() # associated string table
- self.dyn_tags += _parse_dyn_tags(section, strtab, self.hdr)
-
- def _section_to_segment_mapping(self) -> None:
- for ph in self.program_headers:
- ph.sections = []
- for section in self.sections:
- if ph.p_vaddr <= section.sh_addr < (ph.p_vaddr + ph.p_memsz):
- ph.sections.append(section)
-
- def query_dyn_tags(self, tag_in: int) -> List[Union[int, bytes]]:
- '''Return the values of all dyn tags with the specified tag.'''
- return [val for (tag, val) in self.dyn_tags if tag == tag_in]
-
-
-def load(filename: str) -> ELFFile:
- with open(filename, 'rb') as f:
- data = f.read()
- return ELFFile(data)
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 0b59d8eada..ef421aebb1 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -8,192 +8,155 @@ Exit status will be 0 if successful, and the program will be silent.
Otherwise the exit status will be 1 and it will log which executables failed which checks.
'''
import sys
-from typing import List, Optional
+from typing import List
-import lief
-import pixie
+import lief #type:ignore
-def check_ELF_PIE(executable) -> bool:
- '''
- Check for position independent executable (PIE), allowing for address space randomization.
- '''
- elf = pixie.load(executable)
- return elf.hdr.e_type == pixie.ET_DYN
-
-def check_ELF_NX(executable) -> bool:
- '''
- Check that no sections are writable and executable (including the stack)
- '''
- elf = pixie.load(executable)
- have_wx = False
- have_gnu_stack = False
- for ph in elf.program_headers:
- if ph.p_type == pixie.PT_GNU_STACK:
- have_gnu_stack = True
- if (ph.p_flags & pixie.PF_W) != 0 and (ph.p_flags & pixie.PF_X) != 0: # section is both writable and executable
- have_wx = True
- return have_gnu_stack and not have_wx
-
-def check_ELF_RELRO(executable) -> bool:
+def check_ELF_RELRO(binary) -> bool:
'''
Check for read-only relocations.
GNU_RELRO program header must exist
Dynamic section must have BIND_NOW flag
'''
- elf = pixie.load(executable)
have_gnu_relro = False
- for ph in elf.program_headers:
+ for segment in binary.segments:
# Note: not checking p_flags == PF_R: here as linkers set the permission differently
# This does not affect security: the permission flags of the GNU_RELRO program
# header are ignored, the PT_LOAD header determines the effective permissions.
# However, the dynamic linker need to write to this area so these are RW.
# Glibc itself takes care of mprotecting this area R after relocations are finished.
# See also https://marc.info/?l=binutils&m=1498883354122353
- if ph.p_type == pixie.PT_GNU_RELRO:
+ if segment.type == lief.ELF.SEGMENT_TYPES.GNU_RELRO:
have_gnu_relro = True
have_bindnow = False
- for flags in elf.query_dyn_tags(pixie.DT_FLAGS):
- assert isinstance(flags, int)
- if flags & pixie.DF_BIND_NOW:
+ try:
+ flags = binary.get(lief.ELF.DYNAMIC_TAGS.FLAGS)
+ if flags.value & lief.ELF.DYNAMIC_FLAGS.BIND_NOW:
have_bindnow = True
+ except:
+ have_bindnow = False
return have_gnu_relro and have_bindnow
-def check_ELF_Canary(executable) -> bool:
+def check_ELF_Canary(binary) -> bool:
'''
Check for use of stack canary
'''
- elf = pixie.load(executable)
- ok = False
- for symbol in elf.dyn_symbols:
- if symbol.name == b'__stack_chk_fail':
- ok = True
- return ok
+ return binary.has_symbol('__stack_chk_fail')
-def check_ELF_separate_code(executable):
+def check_ELF_separate_code(binary):
'''
Check that sections are appropriately separated in virtual memory,
based on their permissions. This checks for missing -Wl,-z,separate-code
and potentially other problems.
'''
- elf = pixie.load(executable)
- R = pixie.PF_R
- W = pixie.PF_W
- E = pixie.PF_X
+ R = lief.ELF.SEGMENT_FLAGS.R
+ W = lief.ELF.SEGMENT_FLAGS.W
+ E = lief.ELF.SEGMENT_FLAGS.X
EXPECTED_FLAGS = {
# Read + execute
- b'.init': R | E,
- b'.plt': R | E,
- b'.plt.got': R | E,
- b'.plt.sec': R | E,
- b'.text': R | E,
- b'.fini': R | E,
+ '.init': R | E,
+ '.plt': R | E,
+ '.plt.got': R | E,
+ '.plt.sec': R | E,
+ '.text': R | E,
+ '.fini': R | E,
# Read-only data
- b'.interp': R,
- b'.note.gnu.property': R,
- b'.note.gnu.build-id': R,
- b'.note.ABI-tag': R,
- b'.gnu.hash': R,
- b'.dynsym': R,
- b'.dynstr': R,
- b'.gnu.version': R,
- b'.gnu.version_r': R,
- b'.rela.dyn': R,
- b'.rela.plt': R,
- b'.rodata': R,
- b'.eh_frame_hdr': R,
- b'.eh_frame': R,
- b'.qtmetadata': R,
- b'.gcc_except_table': R,
- b'.stapsdt.base': R,
+ '.interp': R,
+ '.note.gnu.property': R,
+ '.note.gnu.build-id': R,
+ '.note.ABI-tag': R,
+ '.gnu.hash': R,
+ '.dynsym': R,
+ '.dynstr': R,
+ '.gnu.version': R,
+ '.gnu.version_r': R,
+ '.rela.dyn': R,
+ '.rela.plt': R,
+ '.rodata': R,
+ '.eh_frame_hdr': R,
+ '.eh_frame': R,
+ '.qtmetadata': R,
+ '.gcc_except_table': R,
+ '.stapsdt.base': R,
# Writable data
- b'.init_array': R | W,
- b'.fini_array': R | W,
- b'.dynamic': R | W,
- b'.got': R | W,
- b'.data': R | W,
- b'.bss': R | W,
+ '.init_array': R | W,
+ '.fini_array': R | W,
+ '.dynamic': R | W,
+ '.got': R | W,
+ '.data': R | W,
+ '.bss': R | W,
}
- if elf.hdr.e_machine == pixie.EM_PPC64:
+ if binary.header.machine_type == lief.ELF.ARCH.PPC64:
# .plt is RW on ppc64 even with separate-code
- EXPECTED_FLAGS[b'.plt'] = R | W
+ EXPECTED_FLAGS['.plt'] = R | W
# For all LOAD program headers get mapping to the list of sections,
# and for each section, remember the flags of the associated program header.
flags_per_section = {}
- for ph in elf.program_headers:
- if ph.p_type == pixie.PT_LOAD:
- for section in ph.sections:
+ for segment in binary.segments:
+ if segment.type == lief.ELF.SEGMENT_TYPES.LOAD:
+ for section in segment.sections:
assert(section.name not in flags_per_section)
- flags_per_section[section.name] = ph.p_flags
+ flags_per_section[section.name] = segment.flags
# Spot-check ELF LOAD program header flags per section
# If these sections exist, check them against the expected R/W/E flags
for (section, flags) in flags_per_section.items():
if section in EXPECTED_FLAGS:
- if EXPECTED_FLAGS[section] != flags:
+ if int(EXPECTED_FLAGS[section]) != int(flags):
return False
return True
-def check_PE_DYNAMIC_BASE(executable) -> bool:
+def check_PE_DYNAMIC_BASE(binary) -> bool:
'''PIE: DllCharacteristics bit 0x40 signifies dynamicbase (ASLR)'''
- binary = lief.parse(executable)
return lief.PE.DLL_CHARACTERISTICS.DYNAMIC_BASE in binary.optional_header.dll_characteristics_lists
# Must support high-entropy 64-bit address space layout randomization
# in addition to DYNAMIC_BASE to have secure ASLR.
-def check_PE_HIGH_ENTROPY_VA(executable) -> bool:
+def check_PE_HIGH_ENTROPY_VA(binary) -> bool:
'''PIE: DllCharacteristics bit 0x20 signifies high-entropy ASLR'''
- binary = lief.parse(executable)
return lief.PE.DLL_CHARACTERISTICS.HIGH_ENTROPY_VA in binary.optional_header.dll_characteristics_lists
-def check_PE_RELOC_SECTION(executable) -> bool:
+def check_PE_RELOC_SECTION(binary) -> bool:
'''Check for a reloc section. This is required for functional ASLR.'''
- binary = lief.parse(executable)
return binary.has_relocations
-def check_MACHO_NOUNDEFS(executable) -> bool:
+def check_MACHO_NOUNDEFS(binary) -> bool:
'''
Check for no undefined references.
'''
- binary = lief.parse(executable)
return binary.header.has(lief.MachO.HEADER_FLAGS.NOUNDEFS)
-def check_MACHO_LAZY_BINDINGS(executable) -> bool:
+def check_MACHO_LAZY_BINDINGS(binary) -> bool:
'''
Check for no lazy bindings.
We don't use or check for MH_BINDATLOAD. See #18295.
'''
- binary = lief.parse(executable)
return binary.dyld_info.lazy_bind == (0,0)
-def check_MACHO_Canary(executable) -> bool:
+def check_MACHO_Canary(binary) -> bool:
'''
Check for use of stack canary
'''
- binary = lief.parse(executable)
return binary.has_symbol('___stack_chk_fail')
-def check_PIE(executable) -> bool:
+def check_PIE(binary) -> bool:
'''
Check for position independent executable (PIE),
allowing for address space randomization.
'''
- binary = lief.parse(executable)
return binary.is_pie
-def check_NX(executable) -> bool:
+def check_NX(binary) -> bool:
'''
Check for no stack execution
'''
- binary = lief.parse(executable)
return binary.has_nx
-def check_control_flow(executable) -> bool:
+def check_control_flow(binary) -> bool:
'''
Check for control flow instrumentation
'''
- binary = lief.parse(executable)
-
content = binary.get_content_from_virtual_address(binary.entrypoint, 4, lief.Binary.VA_TYPES.AUTO)
if content == [243, 15, 30, 250]: # endbr64
@@ -203,8 +166,8 @@ def check_control_flow(executable) -> bool:
CHECKS = {
'ELF': [
- ('PIE', check_ELF_PIE),
- ('NX', check_ELF_NX),
+ ('PIE', check_PIE),
+ ('NX', check_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary),
('separate_code', check_ELF_separate_code),
@@ -226,30 +189,20 @@ CHECKS = {
]
}
-def identify_executable(executable) -> Optional[str]:
- with open(filename, 'rb') as f:
- magic = f.read(4)
- if magic.startswith(b'MZ'):
- return 'PE'
- elif magic.startswith(b'\x7fELF'):
- return 'ELF'
- elif magic.startswith(b'\xcf\xfa'):
- return 'MACHO'
- return None
-
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
- etype = identify_executable(filename)
- if etype is None:
- print(f'{filename}: unknown format')
+ binary = lief.parse(filename)
+ etype = binary.format.name
+ if etype == lief.EXE_FORMATS.UNKNOWN:
+ print(f'{filename}: unknown executable format')
retval = 1
continue
failed: List[str] = []
for (name, func) in CHECKS[etype]:
- if not func(filename):
+ if not func(binary):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py
index 98cab1b7fc..136a9b70c1 100755
--- a/contrib/devtools/symbol-check.py
+++ b/contrib/devtools/symbol-check.py
@@ -10,14 +10,14 @@ Example usage:
find ../path/to/binaries -type f -executable | xargs python3 contrib/devtools/symbol-check.py
'''
-import subprocess
import sys
-from typing import List, Optional
+from typing import List, Dict
-import lief
-import pixie
+import lief #type:ignore
-from utils import determine_wellknown_cmd
+# temporary constant, to be replaced with lief.ELF.ARCH.RISCV
+# https://github.com/lief-project/LIEF/pull/562
+LIEF_ELF_ARCH_RISCV = lief.ELF.ARCH(243)
# Debian 8 (Jessie) EOL: 2020. https://wiki.debian.org/DebianReleases#Production_Releases
#
@@ -43,12 +43,12 @@ from utils import determine_wellknown_cmd
MAX_VERSIONS = {
'GCC': (4,8,0),
'GLIBC': {
- pixie.EM_386: (2,17),
- pixie.EM_X86_64: (2,17),
- pixie.EM_ARM: (2,17),
- pixie.EM_AARCH64:(2,17),
- pixie.EM_PPC64: (2,17),
- pixie.EM_RISCV: (2,27),
+ lief.ELF.ARCH.i386: (2,17),
+ lief.ELF.ARCH.x86_64: (2,17),
+ lief.ELF.ARCH.ARM: (2,17),
+ lief.ELF.ARCH.AARCH64:(2,17),
+ lief.ELF.ARCH.PPC64: (2,17),
+ LIEF_ELF_ARCH_RISCV: (2,27),
},
'LIBATOMIC': (1,0),
'V': (0,5,0), # xkb (bitcoin-qt only)
@@ -58,10 +58,35 @@ MAX_VERSIONS = {
# Ignore symbols that are exported as part of every executable
IGNORE_EXPORTS = {
-'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__', '__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
+'_edata', '_end', '__end__', '_init', '__bss_start', '__bss_start__', '_bss_end__',
+'__bss_end__', '_fini', '_IO_stdin_used', 'stdin', 'stdout', 'stderr',
'environ', '_environ', '__environ',
}
+# Expected linker-loader names can be found here:
+# https://sourceware.org/glibc/wiki/ABIList?action=recall&rev=16
+ELF_INTERPRETER_NAMES: Dict[lief.ELF.ARCH, Dict[lief.ENDIANNESS, str]] = {
+ lief.ELF.ARCH.i386: {
+ lief.ENDIANNESS.LITTLE: "/lib/ld-linux.so.2",
+ },
+ lief.ELF.ARCH.x86_64: {
+ lief.ENDIANNESS.LITTLE: "/lib64/ld-linux-x86-64.so.2",
+ },
+ lief.ELF.ARCH.ARM: {
+ lief.ENDIANNESS.LITTLE: "/lib/ld-linux-armhf.so.3",
+ },
+ lief.ELF.ARCH.AARCH64: {
+ lief.ENDIANNESS.LITTLE: "/lib/ld-linux-aarch64.so.1",
+ },
+ lief.ELF.ARCH.PPC64: {
+ lief.ENDIANNESS.BIG: "/lib64/ld64.so.1",
+ lief.ENDIANNESS.LITTLE: "/lib64/ld64.so.2",
+ },
+ LIEF_ELF_ARCH_RISCV: {
+ lief.ENDIANNESS.LITTLE: "/lib/ld-linux-riscv64-lp64d.so.1",
+ },
+}
+
# Allowed NEEDED libraries
ELF_ALLOWED_LIBRARIES = {
# bitcoind and bitcoin-qt
@@ -133,31 +158,8 @@ PE_ALLOWED_LIBRARIES = {
'WTSAPI32.dll',
}
-class CPPFilt(object):
- '''
- Demangle C++ symbol names.
-
- Use a pipe to the 'c++filt' command.
- '''
- def __init__(self):
- self.proc = subprocess.Popen(determine_wellknown_cmd('CPPFILT', 'c++filt'), stdin=subprocess.PIPE, stdout=subprocess.PIPE, universal_newlines=True)
-
- def __call__(self, mangled):
- self.proc.stdin.write(mangled + '\n')
- self.proc.stdin.flush()
- return self.proc.stdout.readline().rstrip()
-
- def close(self):
- self.proc.stdin.close()
- self.proc.stdout.close()
- self.proc.wait()
-
def check_version(max_versions, version, arch) -> bool:
- if '_' in version:
- (lib, _, ver) = version.rpartition('_')
- else:
- lib = version
- ver = '0'
+ (lib, _, ver) = version.rpartition('_')
ver = tuple([int(x) for x in ver.split('.')])
if not lib in max_versions:
return False
@@ -166,48 +168,45 @@ def check_version(max_versions, version, arch) -> bool:
else:
return ver <= max_versions[lib][arch]
-def check_imported_symbols(filename) -> bool:
- elf = pixie.load(filename)
- cppfilt = CPPFilt()
+def check_imported_symbols(binary) -> bool:
ok: bool = True
- for symbol in elf.dyn_symbols:
- if not symbol.is_import:
+ for symbol in binary.imported_symbols:
+ if not symbol.imported:
continue
- sym = symbol.name.decode()
- version = symbol.version.decode() if symbol.version is not None else None
- if version and not check_version(MAX_VERSIONS, version, elf.hdr.e_machine):
- print('{}: symbol {} from unsupported version {}'.format(filename, cppfilt(sym), version))
- ok = False
+
+ version = symbol.symbol_version if symbol.has_version else None
+
+ if version:
+ aux_version = version.symbol_version_auxiliary.name if version.has_auxiliary_version else None
+ if aux_version and not check_version(MAX_VERSIONS, aux_version, binary.header.machine_type):
+ print(f'{filename}: symbol {symbol.name} from unsupported version {version}')
+ ok = False
return ok
-def check_exported_symbols(filename) -> bool:
- elf = pixie.load(filename)
- cppfilt = CPPFilt()
+def check_exported_symbols(binary) -> bool:
ok: bool = True
- for symbol in elf.dyn_symbols:
- if not symbol.is_export:
+
+ for symbol in binary.dynamic_symbols:
+ if not symbol.exported:
continue
- sym = symbol.name.decode()
- if elf.hdr.e_machine == pixie.EM_RISCV or sym in IGNORE_EXPORTS:
+ name = symbol.name
+ if binary.header.machine_type == LIEF_ELF_ARCH_RISCV or name in IGNORE_EXPORTS:
continue
- print('{}: export of symbol {} not allowed'.format(filename, cppfilt(sym)))
+ print(f'{binary.name}: export of symbol {name} not allowed!')
ok = False
return ok
-def check_ELF_libraries(filename) -> bool:
+def check_ELF_libraries(binary) -> bool:
ok: bool = True
- elf = pixie.load(filename)
- for library_name in elf.query_dyn_tags(pixie.DT_NEEDED):
- assert(isinstance(library_name, bytes))
- if library_name.decode() not in ELF_ALLOWED_LIBRARIES:
- print('{}: NEEDED library {} is not allowed'.format(filename, library_name.decode()))
+ for library in binary.libraries:
+ if library not in ELF_ALLOWED_LIBRARIES:
+ print(f'{filename}: {library} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
-def check_MACHO_libraries(filename) -> bool:
+def check_MACHO_libraries(binary) -> bool:
ok: bool = True
- binary = lief.parse(filename)
for dylib in binary.libraries:
split = dylib.name.split('/')
if split[-1] not in MACHO_ALLOWED_LIBRARIES:
@@ -215,40 +214,42 @@ def check_MACHO_libraries(filename) -> bool:
ok = False
return ok
-def check_MACHO_min_os(filename) -> bool:
- binary = lief.parse(filename)
+def check_MACHO_min_os(binary) -> bool:
if binary.build_version.minos == [10,15,0]:
return True
return False
-def check_MACHO_sdk(filename) -> bool:
- binary = lief.parse(filename)
+def check_MACHO_sdk(binary) -> bool:
if binary.build_version.sdk == [10, 15, 6]:
return True
return False
-def check_PE_libraries(filename) -> bool:
+def check_PE_libraries(binary) -> bool:
ok: bool = True
- binary = lief.parse(filename)
for dylib in binary.libraries:
if dylib not in PE_ALLOWED_LIBRARIES:
print(f'{dylib} is not in ALLOWED_LIBRARIES!')
ok = False
return ok
-def check_PE_subsystem_version(filename) -> bool:
- binary = lief.parse(filename)
+def check_PE_subsystem_version(binary) -> bool:
major: int = binary.optional_header.major_subsystem_version
minor: int = binary.optional_header.minor_subsystem_version
if major == 6 and minor == 1:
return True
return False
+def check_ELF_interpreter(binary) -> bool:
+ expected_interpreter = ELF_INTERPRETER_NAMES[binary.header.machine_type][binary.abstract.header.endianness]
+
+ return binary.concrete.interpreter == expected_interpreter
+
CHECKS = {
'ELF': [
('IMPORTED_SYMBOLS', check_imported_symbols),
('EXPORTED_SYMBOLS', check_exported_symbols),
- ('LIBRARY_DEPENDENCIES', check_ELF_libraries)
+ ('LIBRARY_DEPENDENCIES', check_ELF_libraries),
+ ('INTERPRETER_NAME', check_ELF_interpreter),
],
'MACHO': [
('DYNAMIC_LIBRARIES', check_MACHO_libraries),
@@ -261,30 +262,20 @@ CHECKS = {
]
}
-def identify_executable(executable) -> Optional[str]:
- with open(filename, 'rb') as f:
- magic = f.read(4)
- if magic.startswith(b'MZ'):
- return 'PE'
- elif magic.startswith(b'\x7fELF'):
- return 'ELF'
- elif magic.startswith(b'\xcf\xfa'):
- return 'MACHO'
- return None
-
if __name__ == '__main__':
retval: int = 0
for filename in sys.argv[1:]:
try:
- etype = identify_executable(filename)
- if etype is None:
- print(f'{filename}: unknown format')
+ binary = lief.parse(filename)
+ etype = binary.format.name
+ if etype == lief.EXE_FORMATS.UNKNOWN:
+ print(f'{filename}: unknown executable format')
retval = 1
continue
failed: List[str] = []
for (name, func) in CHECKS[etype]:
- if not func(filename):
+ if not func(binary):
failed.append(name)
if failed:
print(f'{filename}: failed {" ".join(failed)}')
diff --git a/contrib/devtools/test-security-check.py b/contrib/devtools/test-security-check.py
index 14058e2cc8..0af7cdf5e6 100755
--- a/contrib/devtools/test-security-check.py
+++ b/contrib/devtools/test-security-check.py
@@ -7,6 +7,7 @@ Test script for security-check.py
'''
import os
import subprocess
+from typing import List
import unittest
from utils import determine_wellknown_cmd
@@ -27,7 +28,16 @@ def clean_files(source, executable):
os.remove(executable)
def call_security_check(cc, source, executable, options):
- subprocess.run([*cc,source,'-o',executable] + options, check=True)
+ # This should behave the same as AC_TRY_LINK, so arrange well-known flags
+ # in the same order as autoconf would.
+ #
+ # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for
+ # reference.
+ env_flags: List[str] = []
+ for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']:
+ env_flags += filter(None, os.environ.get(var, '').split(' '))
+
+ subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True)
p = subprocess.run(['./contrib/devtools/security-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
return (p.returncode, p.stdout.rstrip())
diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py
index 651589c11b..5246375fe3 100755
--- a/contrib/devtools/test-symbol-check.py
+++ b/contrib/devtools/test-symbol-check.py
@@ -13,7 +13,16 @@ import unittest
from utils import determine_wellknown_cmd
def call_symbol_check(cc: List[str], source, executable, options):
- subprocess.run([*cc,source,'-o',executable] + options, check=True)
+ # This should behave the same as AC_TRY_LINK, so arrange well-known flags
+ # in the same order as autoconf would.
+ #
+ # See the definitions for ac_link in autoconf's lib/autoconf/c.m4 file for
+ # reference.
+ env_flags: List[str] = []
+ for var in ['CFLAGS', 'CPPFLAGS', 'LDFLAGS']:
+ env_flags += filter(None, os.environ.get(var, '').split(' '))
+
+ subprocess.run([*cc,source,'-o',executable] + env_flags + options, check=True)
p = subprocess.run(['./contrib/devtools/symbol-check.py',executable], stdout=subprocess.PIPE, universal_newlines=True)
os.remove(source)
os.remove(executable)
@@ -51,7 +60,7 @@ class TestSymbolChecks(unittest.TestCase):
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-lm']),
- (1, executable + ': symbol nextup from unsupported version GLIBC_2.24\n' +
+ (1, executable + ': symbol nextup from unsupported version GLIBC_2.24(3)\n' +
executable + ': failed IMPORTED_SYMBOLS'))
# -lutil is part of the libc6 package so a safe bet that it's installed
@@ -70,7 +79,7 @@ class TestSymbolChecks(unittest.TestCase):
''')
self.assertEqual(call_symbol_check(cc, source, executable, ['-lutil']),
- (1, executable + ': NEEDED library libutil.so.1 is not allowed\n' +
+ (1, executable + ': libutil.so.1 is not in ALLOWED_LIBRARIES!\n' +
executable + ': failed LIBRARY_DEPENDENCIES'))
# finally, check a simple conforming binary
diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh
index 93526f8c45..e009f97c60 100755
--- a/contrib/guix/libexec/build.sh
+++ b/contrib/guix/libexec/build.sh
@@ -169,8 +169,8 @@ case "$HOST" in
arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;;
aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;;
riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;;
- powerpc64-linux-gnu) echo /lib/ld64.so.1;;
- powerpc64le-linux-gnu) echo /lib/ld64.so.2;;
+ powerpc64-linux-gnu) echo /lib64/ld64.so.1;;
+ powerpc64le-linux-gnu) echo /lib64/ld64.so.2;;
*) exit 1 ;;
esac
)
@@ -297,7 +297,7 @@ mkdir -p "$DISTSRC"
${HOST_CXXFLAGS:+CXXFLAGS="${HOST_CXXFLAGS}"} \
${HOST_LDFLAGS:+LDFLAGS="${HOST_LDFLAGS}"}
- sed -i.old 's/-lstdc++ //g' config.status libtool src/univalue/config.status src/univalue/libtool
+ sed -i.old 's/-lstdc++ //g' config.status libtool
# Build Bitcoin Core
make --jobs="$JOBS" ${V:+V=1}
diff --git a/contrib/tracing/README.md b/contrib/tracing/README.md
index 047354cda1..1f93474fa0 100644
--- a/contrib/tracing/README.md
+++ b/contrib/tracing/README.md
@@ -176,17 +176,12 @@ third acts as a duration threshold in milliseconds. When the `ConnectBlock()`
function takes longer than the threshold, information about the block, is
printed. For more details, see the header comment in the script.
-By default, `bpftrace` limits strings to 64 bytes due to the limited stack size
-in the kernel VM. Block hashes as zero-terminated hex strings are 65 bytes which
-exceed the string limit. The string size limit can be set to 65 bytes with the
-environment variable `BPFTRACE_STRLEN`.
-
The following command can be used to benchmark, for example, `ConnectBlock()`
between height 20000 and 38000 on SigNet while logging all blocks that take
longer than 25ms to connect.
```
-$ BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 20000 38000 25
+$ bpftrace contrib/tracing/connectblock_benchmark.bt 20000 38000 25
```
In a different terminal, starting Bitcoin Core in SigNet mode and with
diff --git a/contrib/tracing/connectblock_benchmark.bt b/contrib/tracing/connectblock_benchmark.bt
index d268eff7f8..6e7a98ef07 100755
--- a/contrib/tracing/connectblock_benchmark.bt
+++ b/contrib/tracing/connectblock_benchmark.bt
@@ -4,11 +4,8 @@
USAGE:
- BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt <start height> <end height> <logging threshold in ms>
+ bpftrace contrib/tracing/connectblock_benchmark.bt <start height> <end height> <logging threshold in ms>
- - The environment variable BPFTRACE_STRLEN needs to be set to 65 chars as
- strings are limited to 64 chars by default. Hex strings with Bitcoin block
- hashes are 64 hex chars + 1 null-termination char.
- <start height> sets the height at which the benchmark should start. Setting
the start height to 0 starts the benchmark immediately, even before the
first block is connected.
@@ -23,7 +20,7 @@
EXAMPLES:
- BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 300000 680000 1000
+ bpftrace contrib/tracing/connectblock_benchmark.bt 300000 680000 1000
When run together 'bitcoind -reindex', this benchmarks the time it takes to
connect the blocks between height 300.000 and 680.000 (inclusive) and prints
@@ -31,7 +28,7 @@
histogram with block connection times when the benchmark is finished.
- BPFTRACE_STRLEN=65 bpftrace contrib/tracing/connectblock_benchmark.bt 0 0 500
+ bpftrace contrib/tracing/connectblock_benchmark.bt 0 0 500
When running together 'bitcoind', all newly connected blocks that
take longer than 500ms to connect are logged. A histogram with block
@@ -107,14 +104,23 @@ usdt:./src/bitcoind:validation:block_connected /arg1 >= $1 && (arg1 <= $2 || $2
*/
usdt:./src/bitcoind:validation:block_connected / (uint64) arg5 / 1000> $3 /
{
- $hash_str = str(arg0);
+ $hash = arg0;
$height = (int32) arg1;
$transactions = (uint64) arg2;
$inputs = (int32) arg3;
$sigops = (int64) arg4;
$duration = (int64) arg5;
- printf("Block %d (%s) %4d tx %5d ins %5d sigops took %4d ms\n", $height, $hash_str, $transactions, $inputs, $sigops, (uint64) $duration / 1000);
+
+ printf("Block %d (", $height);
+ /* Prints each byte of the block hash as hex in big-endian (the block-explorer format) */
+ $p = $hash + 31;
+ unroll(32) {
+ $b = *(uint8*)$p;
+ printf("%02x", $b);
+ $p -= 1;
+ }
+ printf(") %4d tx %5d ins %5d sigops took %4d ms\n", $transactions, $inputs, $sigops, (uint64) $duration / 1000);
}
diff --git a/depends/packages/expat.mk b/depends/packages/expat.mk
index 902fe43be2..41c1114be0 100644
--- a/depends/packages/expat.mk
+++ b/depends/packages/expat.mk
@@ -23,5 +23,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- rm lib/*.la
+ rm -rf share lib/*.la
endef
diff --git a/depends/packages/fontconfig.mk b/depends/packages/fontconfig.mk
index 0d5f94f380..22b5022f06 100644
--- a/depends/packages/fontconfig.mk
+++ b/depends/packages/fontconfig.mk
@@ -29,5 +29,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- rm lib/*.la
+ rm -rf var lib/*.la
endef
diff --git a/depends/packages/freetype.mk b/depends/packages/freetype.mk
index a1584608e1..aebc8a5f3b 100644
--- a/depends/packages/freetype.mk
+++ b/depends/packages/freetype.mk
@@ -23,5 +23,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- rm lib/*.la
+ rm -rf share/man lib/*.la
endef
diff --git a/depends/packages/libXau.mk b/depends/packages/libXau.mk
index 4c55c2df04..24e0e9d325 100644
--- a/depends/packages/libXau.mk
+++ b/depends/packages/libXau.mk
@@ -30,5 +30,5 @@ define $(package)_stage_cmds
endef
define $(package)_postprocess_cmds
- rm lib/*.la
+ rm -rf share lib/*.la
endef
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 9004b064d6..12e0494ad4 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -248,7 +248,6 @@ endef
define $(package)_config_cmds
export PKG_CONFIG_SYSROOT_DIR=/ && \
export PKG_CONFIG_LIBDIR=$(host_prefix)/lib/pkgconfig && \
- export PKG_CONFIG_PATH=$(host_prefix)/share/pkgconfig && \
cd qtbase && \
./configure -top-level $($(package)_config_opts)
endef
diff --git a/doc/descriptors.md b/doc/descriptors.md
index 3bbb626a42..57a0f99d70 100644
--- a/doc/descriptors.md
+++ b/doc/descriptors.md
@@ -139,6 +139,47 @@ Key order does not matter for `sortedmulti()`. `sortedmulti()` behaves in the sa
as `multi()` does but the keys are reordered in the resulting script such that they
are lexicographically ordered as described in BIP67.
+#### Basic multisig example
+
+For a good example of a basic M-of-N multisig between multiple participants using descriptor
+wallets and PSBTs, as well as a signing flow, see [this functional test](/test/functional/wallet_multisig_descriptor_psbt.py).
+
+Disclaimers: It is important to note that this example serves as a quick-start and is kept basic for readability. A downside of the approach
+outlined here is that each participant must maintain (and backup) two separate wallets: a signer and the corresponding multisig.
+It should also be noted that privacy best-practices are not "by default" here - participants should take care to only use the signer to sign
+transactions related to the multisig. Lastly, it is not recommended to use anything other than a Bitcoin Core descriptor wallet to serve as your
+signer(s). Other wallets, whether hardware or software, likely impose additional checks and safeguards to prevent users from signing transactions that
+could lead to loss of funds, or are deemed security hazards. Conforming to various 3rd-party checks and verifications is not in the scope of this example.
+
+The basic steps are:
+
+ 1. Every participant generates an xpub. The most straightforward way is to create a new descriptor wallet which we will refer to as
+ the participant's signer wallet. Avoid reusing this wallet for any purpose other than signing transactions from the
+ corresponding multisig we are about to create. Hint: extract the wallet's xpubs using `listdescriptors` and pick the one from the
+ `pkh` descriptor since it's least likely to be accidentally reused (legacy addresses)
+ 2. Create a watch-only descriptor wallet (blank, private keys disabled). Now the multisig is created by importing the two descriptors:
+ `wsh(sortedmulti(<M>,XPUB1/0/*,XPUB2/0/*,…,XPUBN/0/*))` and `wsh(sortedmulti(<M>,XPUB1/1/*,XPUB2/1/*,…,XPUBN/1/*))`
+ (one descriptor w/ `0` for receiving addresses and another w/ `1` for change). Every participant does this
+ 3. A receiving address is generated for the multisig. As a check to ensure step 2 was done correctly, every participant
+ should verify they get the same addresses
+ 4. Funds are sent to the resulting address
+ 5. A sending transaction from the multisig is created using `walletcreatefundedpsbt` (anyone can initiate this). It is simple to do
+ this in the GUI by going to the `Send` tab in the multisig wallet and creating an unsigned transaction (PSBT)
+ 6. At least `M` participants check the PSBT with their multisig using `decodepsbt` to verify the transaction is OK before signing it.
+ 7. (If OK) the participant signs the PSBT with their signer wallet using `walletprocesspsbt`. It is simple to do this in the GUI by
+ loading the PSBT from file and signing it
+ 8. The signed PSBTs are collected with `combinepsbt`, finalized w/ `finalizepsbt`, and then the resulting transaction is broadcasted
+ to the network. Note that any wallet (eg one of the signers or multisig) is capable of doing this.
+ 9. Checks that balances are correct after the transaction has been included in a block
+
+You may prefer a daisy chained signing flow where each participant signs the PSBT one after another until
+the PSBT has been signed `M` times and is "complete." For the most part, the steps above remain the same, except (6, 7)
+change slightly from signing the original PSBT in parallel to signing it in series. `combinepsbt` is not necessary with
+this signing flow and the last (`m`th) signer can just broadcast the PSBT after signing. Note that a parallel signing flow may be
+preferable in cases where there are more signers. This signing flow is also included in the test / Python example.
+[The test](/test/functional/wallet_multisig_descriptor_psbt.py) is meant to be documentation as much as it is a functional test, so
+it is kept as simple and readable as possible.
+
### BIP32 derived keys and chains
Most modern wallet software and hardware uses keys that are derived using
diff --git a/doc/psbt.md b/doc/psbt.md
index c411b31d5d..0f31cb8eba 100644
--- a/doc/psbt.md
+++ b/doc/psbt.md
@@ -92,6 +92,9 @@ hardware implementations will typically implement multiple roles simultaneously.
#### Multisig with multiple Bitcoin Core instances
+For a quick start see [Basic M-of-N multisig example using descriptor wallets and PSBTs](./descriptors.md#basic-multisig-example).
+If you are using legacy wallets feel free to continue with the example provided here.
+
Alice, Bob, and Carol want to create a 2-of-3 multisig address. They're all using
Bitcoin Core. We assume their wallets only contain the multisig funds. In case
they also have a personal wallet, this can be accomplished through the
diff --git a/doc/release-notes-22539.md b/doc/release-notes-22539.md
deleted file mode 100644
index 9f56071451..0000000000
--- a/doc/release-notes-22539.md
+++ /dev/null
@@ -1,8 +0,0 @@
-Notable changes
-===============
-
-P2P and network changes
------------------------
-
-- Fee estimation now takes the feerate of replacement (RBF) transactions into
- account.
diff --git a/doc/release-notes-23093.md b/doc/release-notes-23093.md
new file mode 100644
index 0000000000..68fbaec53c
--- /dev/null
+++ b/doc/release-notes-23093.md
@@ -0,0 +1,11 @@
+Notable changes
+===============
+
+Updated RPCs
+------------
+
+- `upgradewallet` will now automatically flush the keypool if upgrading
+from a non-HD wallet to an HD wallet, to immediately start using the
+newly-generated HD keys.
+- a new RPC `newkeypool` has been added, which will flush (entirely
+clear and refill) the keypool.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 81e79dd3a9..b460cd3eb2 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -61,6 +61,12 @@ P2P and network changes
They will become eligible for address gossip after sending an ADDR, ADDRV2,
or GETADDR message. (#21528)
+Fee estimation changes
+----------------------
+
+- Fee estimation now takes the feerate of replacement (RBF) transactions into
+ account. (#22539)
+
Rescan startup parameter removed
--------------------------------
@@ -76,6 +82,14 @@ Updated RPCs
`gettransaction verbose=true` and REST endpoints `/rest/tx`, `/rest/getutxos`,
`/rest/block` no longer return the `addresses` and `reqSigs` fields, which
were previously deprecated in 22.0. (#22650)
+- The `getblock` RPC command now supports verbose level 3 containing transaction inputs
+ `prevout` information. The existing `/rest/block/` REST endpoint is modified to contain
+ this information too. Every `vin` field will contain an additional `prevout` subfield
+ describing the spent output. `prevout` contains the following keys:
+ - `generated` - true if the spent coins was a coinbase.
+ - `height`
+ - `value`
+ - `scriptPubKey`
- `listunspent` now includes `ancestorcount`, `ancestorsize`, and
`ancestorfees` for each transaction output that is still in the mempool.
diff --git a/doc/tracing.md b/doc/tracing.md
index 87fc9603fe..57104c43a0 100644
--- a/doc/tracing.md
+++ b/doc/tracing.md
@@ -101,19 +101,12 @@ Is called *after* a block is connected to the chain. Can, for example, be used
to benchmark block connections together with `-reindex`.
Arguments passed:
-1. Block Header Hash as `pointer to C-style String` (64 characters)
+1. Block Header Hash as `pointer to unsigned chars` (i.e. 32 bytes in little-endian)
2. Block Height as `int32`
3. Transactions in the Block as `uint64`
4. Inputs spend in the Block as `int32`
5. SigOps in the Block (excluding coinbase SigOps) `uint64`
6. Time it took to connect the Block in microseconds (µs) as `uint64`
-7. Block Header Hash as `pointer to unsigned chars` (i.e. 32 bytes in little-endian)
-
-Note: The 7th argument can't be accessed by bpftrace and is purposefully chosen
-to be the block header hash as bytes. See [bpftrace argument limit] for more
-details.
-
-[bpftrace argument limit]: #bpftrace-argument-limit
## Adding tracepoints to Bitcoin Core
diff --git a/src/Makefile.am b/src/Makefile.am
index 12fdc9ad75..68c81c1a12 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -6,7 +6,7 @@
print-%: FORCE
@echo '$*'='$($*)'
-DIST_SUBDIRS = secp256k1 univalue
+DIST_SUBDIRS = secp256k1
AM_LDFLAGS = $(LIBTOOL_LDFLAGS) $(HARDENED_LDFLAGS) $(GPROF_LDFLAGS) $(SANITIZER_LDFLAGS)
AM_CXXFLAGS = $(DEBUG_CXXFLAGS) $(HARDENED_CXXFLAGS) $(WARN_CXXFLAGS) $(NOWARN_CXXFLAGS) $(ERROR_CXXFLAGS) $(GPROF_CXXFLAGS) $(SANITIZER_CXXFLAGS)
@@ -15,18 +15,7 @@ AM_LIBTOOLFLAGS = --preserve-dup-deps
PTHREAD_FLAGS = $(PTHREAD_CFLAGS) $(PTHREAD_LIBS)
EXTRA_LIBRARIES =
-if EMBEDDED_UNIVALUE
-LIBUNIVALUE = univalue/libunivalue.la
-
-$(LIBUNIVALUE): $(wildcard univalue/lib/*) $(wildcard univalue/include/*)
- $(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C $(@D) $(@F)
-else
-LIBUNIVALUE = $(UNIVALUE_LIBS)
-endif
-
-BITCOIN_INCLUDES=-I$(builddir) -I$(srcdir)/secp256k1/include $(BDB_CPPFLAGS) $(BOOST_CPPFLAGS) $(LEVELDB_CPPFLAGS)
-
-BITCOIN_INCLUDES += $(UNIVALUE_CFLAGS)
+BITCOIN_INCLUDES=-I$(builddir) -I$(srcdir)/secp256k1/include -I$(srcdir)/$(UNIVALUE_INCLUDE_DIR_INT) $(BDB_CPPFLAGS) $(BOOST_CPPFLAGS) $(LEVELDB_CPPFLAGS)
LIBBITCOIN_SERVER=libbitcoin_server.a
LIBBITCOIN_COMMON=libbitcoin_common.a
@@ -80,6 +69,7 @@ EXTRA_LIBRARIES += \
$(LIBBITCOIN_ZMQ)
lib_LTLIBRARIES = $(LIBBITCOINCONSENSUS)
+noinst_LTLIBRARIES =
bin_PROGRAMS =
noinst_PROGRAMS =
@@ -797,7 +787,6 @@ $(top_srcdir)/$(subdir)/config/bitcoin-config.h.in: $(am__configure_deps)
clean-local:
-$(MAKE) -C secp256k1 clean
- -$(MAKE) -C univalue clean
-rm -f leveldb/*/*.gcda leveldb/*/*.gcno leveldb/helpers/memenv/*.gcda leveldb/helpers/memenv/*.gcno
-rm -f config.h
-rm -rf test/__pycache__
@@ -808,20 +797,8 @@ clean-local:
$(AM_V_GEN) $(WINDRES) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(CPPFLAGS) -DWINDRES_PREPROC -i $< -o $@
check-symbols: $(bin_PROGRAMS)
-if TARGET_DARWIN
- @echo "Checking macOS dynamic libraries..."
+ @echo "Running symbol and dynamic library checks..."
$(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
-endif
-
-if TARGET_WINDOWS
- @echo "Checking Windows dynamic libraries..."
- $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
-endif
-
-if TARGET_LINUX
- @echo "Checking glibc back compat..."
- $(AM_V_at) CPPFILT='$(CPPFILT)' $(PYTHON) $(top_srcdir)/contrib/devtools/symbol-check.py $(bin_PROGRAMS)
-endif
check-security: $(bin_PROGRAMS)
if HARDEN
@@ -887,3 +864,5 @@ endif
if ENABLE_QT_TESTS
include Makefile.qttest.include
endif
+
+include Makefile.univalue.include
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index f4b0b3adbe..1e3d75a8d8 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -168,10 +168,10 @@ BITCOIN_QT_H = \
qt/walletview.h \
qt/winshutdownmonitor.h
-RES_FONTS = \
+QT_RES_FONTS = \
qt/res/fonts/RobotoMono-Bold.ttf
-RES_ICONS = \
+QT_RES_ICONS = \
qt/res/icons/add.png \
qt/res/icons/address-book.png \
qt/res/icons/bitcoin.ico \
@@ -287,9 +287,9 @@ if ENABLE_WALLET
BITCOIN_QT_CPP += $(BITCOIN_QT_WALLET_CPP)
endif # ENABLE_WALLET
-RES_ANIMATION = $(wildcard $(srcdir)/qt/res/animation/spinner-*.png)
+QT_RES_ANIMATION = $(wildcard $(srcdir)/qt/res/animation/spinner-*.png)
-BITCOIN_RC = qt/res/bitcoin-qt-res.rc
+BITCOIN_QT_RC = qt/res/bitcoin-qt-res.rc
BITCOIN_QT_INCLUDES = -DQT_NO_KEYWORDS -DQT_USE_QSTRINGBUILDER
@@ -299,7 +299,7 @@ qt_libbitcoinqt_a_CXXFLAGS = $(AM_CXXFLAGS) $(QT_PIE_FLAGS)
qt_libbitcoinqt_a_OBJCXXFLAGS = $(AM_OBJCXXFLAGS) $(QT_PIE_FLAGS)
qt_libbitcoinqt_a_SOURCES = $(BITCOIN_QT_CPP) $(BITCOIN_QT_H) $(QT_FORMS_UI) \
- $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(RES_FONTS) $(RES_ICONS) $(RES_ANIMATION)
+ $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(QT_RES_FONTS) $(QT_RES_ICONS) $(QT_RES_ANIMATION)
if TARGET_DARWIN
qt_libbitcoinqt_a_SOURCES += $(BITCOIN_MM)
endif
@@ -321,7 +321,7 @@ bitcoin_qt_cxxflags = $(AM_CXXFLAGS) $(QT_PIE_FLAGS)
bitcoin_qt_sources = qt/main.cpp
if TARGET_WINDOWS
- bitcoin_qt_sources += $(BITCOIN_RC)
+ bitcoin_qt_sources += $(BITCOIN_QT_RC)
endif
bitcoin_qt_ldadd = qt/libbitcoinqt.a $(LIBBITCOIN_SERVER)
if ENABLE_WALLET
@@ -371,7 +371,7 @@ $(QT_QRC_LOCALE_CPP): $(QT_QRC_LOCALE) $(QT_QM)
$(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(RCC) -name bitcoin_locale --format-version 1 $(@D)/temp_$(<F) > $@
@rm $(@D)/temp_$(<F)
-$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(RES_FONTS) $(RES_ICONS) $(RES_ANIMATION)
+$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(QT_RES_FONTS) $(QT_RES_ICONS) $(QT_RES_ANIMATION)
@test -f $(RCC)
$(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(RCC) -name bitcoin --format-version 1 $< > $@
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index d70793ffa9..27f9382631 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -350,8 +350,26 @@ if ENABLE_BENCH
endif
endif
$(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C secp256k1 check
-if EMBEDDED_UNIVALUE
- $(AM_V_at)$(MAKE) $(AM_MAKEFLAGS) -C univalue check
+
+if !ENABLE_FUZZ
+UNIVALUE_TESTS = univalue/test/object univalue/test/unitester univalue/test/no_nul
+noinst_PROGRAMS += $(UNIVALUE_TESTS)
+TESTS += $(UNIVALUE_TESTS)
+
+univalue_test_unitester_SOURCES = $(UNIVALUE_TEST_UNITESTER_INT)
+univalue_test_unitester_LDADD = $(LIBUNIVALUE)
+univalue_test_unitester_CPPFLAGS = -I$(srcdir)/$(UNIVALUE_INCLUDE_DIR_INT) -DJSON_TEST_SRC=\"$(srcdir)/$(UNIVALUE_TEST_DATA_DIR_INT)\"
+univalue_test_unitester_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
+
+univalue_test_no_nul_SOURCES = $(UNIVALUE_TEST_NO_NUL_INT)
+univalue_test_no_nul_LDADD = $(LIBUNIVALUE)
+univalue_test_no_nul_CPPFLAGS = -I$(srcdir)/$(UNIVALUE_INCLUDE_DIR_INT)
+univalue_test_no_nul_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
+
+univalue_test_object_SOURCES = $(UNIVALUE_TEST_OBJECT_INT)
+univalue_test_object_LDADD = $(LIBUNIVALUE)
+univalue_test_object_CPPFLAGS = -I$(srcdir)/$(UNIVALUE_INCLUDE_DIR_INT)
+univalue_test_object_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
endif
%.cpp.test: %.cpp
diff --git a/src/Makefile.univalue.include b/src/Makefile.univalue.include
new file mode 100644
index 0000000000..3644e36368
--- /dev/null
+++ b/src/Makefile.univalue.include
@@ -0,0 +1,6 @@
+include univalue/sources.mk
+
+LIBUNIVALUE = libunivalue.la
+noinst_LTLIBRARIES += $(LIBUNIVALUE)
+libunivalue_la_SOURCES = $(UNIVALUE_LIB_SOURCES_INT) $(UNIVALUE_DIST_HEADERS_INT) $(UNIVALUE_LIB_HEADERS_INT) $(UNIVALUE_TEST_FILES_INT)
+libunivalue_la_CPPFLAGS = $(AM_CPPFLAGS) -I$(srcdir)/$(UNIVALUE_INCLUDE_DIR_INT)
diff --git a/src/addrdb.cpp b/src/addrdb.cpp
index 50fd09101e..bdb1fc6b2b 100644
--- a/src/addrdb.cpp
+++ b/src/addrdb.cpp
@@ -58,7 +58,7 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data
if (fileout.IsNull()) {
fileout.fclose();
remove(pathTmp);
- return error("%s: Failed to open file %s", __func__, pathTmp.string());
+ return error("%s: Failed to open file %s", __func__, fs::PathToString(pathTmp));
}
// Serialize
@@ -70,7 +70,7 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data
if (!FileCommit(fileout.Get())) {
fileout.fclose();
remove(pathTmp);
- return error("%s: Failed to flush file %s", __func__, pathTmp.string());
+ return error("%s: Failed to flush file %s", __func__, fs::PathToString(pathTmp));
}
fileout.fclose();
@@ -122,8 +122,8 @@ void DeserializeFileDB(const fs::path& path, Data& data, int version)
} // namespace
CBanDB::CBanDB(fs::path ban_list_path)
- : m_banlist_dat(ban_list_path.string() + ".dat"),
- m_banlist_json(ban_list_path.string() + ".json")
+ : m_banlist_dat(ban_list_path + ".dat"),
+ m_banlist_json(ban_list_path + ".json")
{
}
@@ -143,7 +143,7 @@ bool CBanDB::Write(const banmap_t& banSet)
bool CBanDB::Read(banmap_t& banSet)
{
if (fs::exists(m_banlist_dat)) {
- LogPrintf("banlist.dat ignored because it can only be read by " PACKAGE_NAME " version 22.x. Remove %s to silence this warning.\n", m_banlist_dat);
+ LogPrintf("banlist.dat ignored because it can only be read by " PACKAGE_NAME " version 22.x. Remove %s to silence this warning.\n", fs::quoted(fs::PathToString(m_banlist_dat)));
}
// If the JSON banlist does not exist, then recreate it
if (!fs::exists(m_banlist_json)) {
@@ -155,7 +155,7 @@ bool CBanDB::Read(banmap_t& banSet)
if (!util::ReadSettings(m_banlist_json, settings, errors)) {
for (const auto& err : errors) {
- LogPrintf("Cannot load banlist %s: %s\n", m_banlist_json.string(), err);
+ LogPrintf("Cannot load banlist %s: %s\n", fs::PathToString(m_banlist_json), err);
}
return false;
}
@@ -163,7 +163,7 @@ bool CBanDB::Read(banmap_t& banSet)
try {
BanMapFromJson(settings[JSON_KEY], banSet);
} catch (const std::runtime_error& e) {
- LogPrintf("Cannot parse banlist %s: %s\n", m_banlist_json.string(), e.what());
+ LogPrintf("Cannot parse banlist %s: %s\n", fs::PathToString(m_banlist_json), e.what());
return false;
}
@@ -194,12 +194,12 @@ std::optional<bilingual_str> LoadAddrman(const std::vector<bool>& asmap, const A
} catch (const DbNotFoundError&) {
// Addrman can be in an inconsistent state after failure, reset it
addrman = std::make_unique<AddrMan>(asmap, /* deterministic */ false, /* consistency_check_ratio */ check_addrman);
- LogPrintf("Creating peers.dat because the file was not found (%s)\n", path_addr);
+ LogPrintf("Creating peers.dat because the file was not found (%s)\n", fs::quoted(fs::PathToString(path_addr)));
DumpPeerAddresses(args, *addrman);
} catch (const std::exception& e) {
addrman = nullptr;
return strprintf(_("Invalid or corrupt peers.dat (%s). If you believe this is a bug, please report it to %s. As a workaround, you can move the file (%s) out of the way (rename, move, or delete) to have a new one created on the next start."),
- e.what(), PACKAGE_BUGREPORT, path_addr);
+ e.what(), PACKAGE_BUGREPORT, fs::quoted(fs::PathToString(path_addr)));
}
return std::nullopt;
}
@@ -215,7 +215,7 @@ std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path)
std::vector<CAddress> anchors;
try {
DeserializeFileDB(anchors_db_path, anchors, CLIENT_VERSION | ADDRV2_FORMAT);
- LogPrintf("Loaded %i addresses from %s\n", anchors.size(), anchors_db_path.filename());
+ LogPrintf("Loaded %i addresses from %s\n", anchors.size(), fs::quoted(fs::PathToString(anchors_db_path.filename())));
} catch (const std::exception&) {
anchors.clear();
}
diff --git a/src/bench/rpc_blockchain.cpp b/src/bench/rpc_blockchain.cpp
index c8886a4c23..3bef64f720 100644
--- a/src/bench/rpc_blockchain.cpp
+++ b/src/bench/rpc_blockchain.cpp
@@ -40,7 +40,7 @@ static void BlockToJsonVerbose(benchmark::Bench& bench)
{
TestBlockAndIndex data;
bench.run([&] {
- auto univalue = blockToJSON(data.block, &data.blockindex, &data.blockindex, /*verbose*/ true);
+ auto univalue = blockToJSON(data.block, &data.blockindex, &data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT);
ankerl::nanobench::doNotOptimizeAway(univalue);
});
}
@@ -50,7 +50,7 @@ BENCHMARK(BlockToJsonVerbose);
static void BlockToJsonVerboseWrite(benchmark::Bench& bench)
{
TestBlockAndIndex data;
- auto univalue = blockToJSON(data.block, &data.blockindex, &data.blockindex, /*verbose*/ true);
+ auto univalue = blockToJSON(data.block, &data.blockindex, &data.blockindex, TxVerbosity::SHOW_DETAILS_AND_PREVOUT);
bench.run([&] {
auto str = univalue.write();
ankerl::nanobench::doNotOptimizeAway(str);
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index 3c22ee0f67..43e986a765 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -786,7 +786,7 @@ static UniValue CallRPC(BaseRequestHandler* rh, const std::string& strMethod, co
if (failedToGetAuthCookie) {
throw std::runtime_error(strprintf(
"Could not locate RPC credentials. No authentication cookie could be found, and RPC password is not set. See -rpcpassword and -stdinrpcpass. Configuration file: (%s)",
- GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)).string()));
+ fs::PathToString(GetConfigFile(gArgs.GetArg("-conf", BITCOIN_CONF_FILENAME)))));
} else {
throw std::runtime_error("Authorization failed: Incorrect rpcuser or rpcpassword");
}
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index fc3bc6aa71..eb97cfc6f6 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -235,6 +235,16 @@ static void MutateTxRBFOptIn(CMutableTransaction& tx, const std::string& strInId
}
}
+template <typename T>
+static T TrimAndParse(const std::string& int_str, const std::string& err)
+{
+ const auto parsed{ToIntegral<T>(TrimString(int_str))};
+ if (!parsed.has_value()) {
+ throw std::runtime_error(err + " '" + int_str + "'");
+ }
+ return parsed.value();
+}
+
static void MutateTxAddInput(CMutableTransaction& tx, const std::string& strInput)
{
std::vector<std::string> vStrInputParts;
@@ -261,8 +271,9 @@ static void MutateTxAddInput(CMutableTransaction& tx, const std::string& strInpu
// extract the optional sequence number
uint32_t nSequenceIn = CTxIn::SEQUENCE_FINAL;
- if (vStrInputParts.size() > 2)
- nSequenceIn = std::stoul(vStrInputParts[2]);
+ if (vStrInputParts.size() > 2) {
+ nSequenceIn = TrimAndParse<uint32_t>(vStrInputParts.at(2), "invalid TX sequence id");
+ }
// append to transaction input list
CTxIn txin(txid, vout, CScript(), nSequenceIn);
@@ -352,10 +363,10 @@ static void MutateTxAddOutMultiSig(CMutableTransaction& tx, const std::string& s
CAmount value = ExtractAndValidateValue(vStrInputParts[0]);
// Extract REQUIRED
- uint32_t required = stoul(vStrInputParts[1]);
+ const uint32_t required{TrimAndParse<uint32_t>(vStrInputParts.at(1), "invalid multisig required number")};
// Extract NUMKEYS
- uint32_t numkeys = stoul(vStrInputParts[2]);
+ const uint32_t numkeys{TrimAndParse<uint32_t>(vStrInputParts.at(2), "invalid multisig total number")};
// Validate there are the correct number of pubkeys
if (vStrInputParts.size() < numkeys + 3)
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index b155745794..2e823c1211 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -118,15 +118,15 @@ public:
// This is fine at runtime as we'll fall back to using them as an addrfetch if they don't support the
// service bits we want, but we should get them updated to support all service bits wanted by any
// release ASAP to avoid it where possible.
- vSeeds.emplace_back("seed.bitcoin.sipa.be"); // Pieter Wuille, only supports x1, x5, x9, and xd
- vSeeds.emplace_back("dnsseed.bluematt.me"); // Matt Corallo, only supports x9
- vSeeds.emplace_back("dnsseed.bitcoin.dashjr.org"); // Luke Dashjr
- vSeeds.emplace_back("seed.bitcoinstats.com"); // Christian Decker, supports x1 - xf
- vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch"); // Jonas Schnelli, only supports x1, x5, x9, and xd
- vSeeds.emplace_back("seed.btc.petertodd.org"); // Peter Todd, only supports x1, x5, x9, and xd
- vSeeds.emplace_back("seed.bitcoin.sprovoost.nl"); // Sjors Provoost
- vSeeds.emplace_back("dnsseed.emzy.de"); // Stephan Oeste
- vSeeds.emplace_back("seed.bitcoin.wiz.biz"); // Jason Maurice
+ vSeeds.emplace_back("seed.bitcoin.sipa.be."); // Pieter Wuille, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("dnsseed.bluematt.me."); // Matt Corallo, only supports x9
+ vSeeds.emplace_back("dnsseed.bitcoin.dashjr.org."); // Luke Dashjr
+ vSeeds.emplace_back("seed.bitcoinstats.com."); // Christian Decker, supports x1 - xf
+ vSeeds.emplace_back("seed.bitcoin.jonasschnelli.ch."); // Jonas Schnelli, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("seed.btc.petertodd.org."); // Peter Todd, only supports x1, x5, x9, and xd
+ vSeeds.emplace_back("seed.bitcoin.sprovoost.nl."); // Sjors Provoost
+ vSeeds.emplace_back("dnsseed.emzy.de."); // Stephan Oeste
+ vSeeds.emplace_back("seed.bitcoin.wiz.biz."); // Jason Maurice
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,0);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,5);
@@ -230,10 +230,10 @@ public:
vFixedSeeds.clear();
vSeeds.clear();
// nodes with support for servicebits filtering should be at the top
- vSeeds.emplace_back("testnet-seed.bitcoin.jonasschnelli.ch");
- vSeeds.emplace_back("seed.tbtc.petertodd.org");
- vSeeds.emplace_back("seed.testnet.bitcoin.sprovoost.nl");
- vSeeds.emplace_back("testnet-seed.bluematt.me"); // Just a static list of stable node(s), only supports x9
+ vSeeds.emplace_back("testnet-seed.bitcoin.jonasschnelli.ch.");
+ vSeeds.emplace_back("seed.tbtc.petertodd.org.");
+ vSeeds.emplace_back("seed.testnet.bitcoin.sprovoost.nl.");
+ vSeeds.emplace_back("testnet-seed.bluematt.me."); // Just a static list of stable node(s), only supports x9
base58Prefixes[PUBKEY_ADDRESS] = std::vector<unsigned char>(1,111);
base58Prefixes[SCRIPT_ADDRESS] = std::vector<unsigned char>(1,196);
@@ -280,7 +280,7 @@ public:
if (!args.IsArgSet("-signetchallenge")) {
bin = ParseHex("512103ad5e0edad18cb1f0fc0d28a3d4f1f3e445640337489abb10404f2d1e086be430210359ef5021964fe22d6f8e05b2463c9540ce96883fe3b278760f048f5189f2e6c452ae");
- vSeeds.emplace_back("seed.signet.bitcoin.sprovoost.nl");
+ vSeeds.emplace_back("seed.signet.bitcoin.sprovoost.nl.");
// Hardcoded nodes can be removed once there are more DNS seeds
vSeeds.emplace_back("178.128.221.177");
diff --git a/src/core_io.h b/src/core_io.h
index be93a17efe..4d7199ab12 100644
--- a/src/core_io.h
+++ b/src/core_io.h
@@ -20,6 +20,15 @@ class uint256;
class UniValue;
class CTxUndo;
+/**
+ * Verbose level for block's transaction
+ */
+enum class TxVerbosity {
+ SHOW_TXID, //!< Only TXID for each block's transaction
+ SHOW_DETAILS, //!< Include TXID, inputs, outputs, and other common block's transaction information
+ SHOW_DETAILS_AND_PREVOUT //!< The same as previous option with information about prevouts if available
+};
+
// core_read.cpp
CScript ParseScript(const std::string& s);
std::string ScriptToAsmStr(const CScript& script, const bool fAttemptSighashDecode = false);
@@ -46,6 +55,6 @@ std::string EncodeHexTx(const CTransaction& tx, const int serializeFlags = 0);
std::string SighashToStr(unsigned char sighash_type);
void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool include_hex, bool include_address = true);
void ScriptToUniv(const CScript& script, UniValue& out);
-void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry, bool include_hex = true, int serialize_flags = 0, const CTxUndo* txundo = nullptr);
+void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry, bool include_hex = true, int serialize_flags = 0, const CTxUndo* txundo = nullptr, TxVerbosity verbosity = TxVerbosity::SHOW_DETAILS);
#endif // BITCOIN_CORE_IO_H
diff --git a/src/core_read.cpp b/src/core_read.cpp
index 320811b9e9..2149b428d2 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -26,20 +26,20 @@ opcodetype ParseOpCode(const std::string& s)
{
static std::map<std::string, opcodetype> mapOpNames;
- if (mapOpNames.empty())
- {
- for (unsigned int op = 0; op <= MAX_OPCODE; op++)
- {
+ if (mapOpNames.empty()) {
+ for (unsigned int op = 0; op <= MAX_OPCODE; op++) {
// Allow OP_RESERVED to get into mapOpNames
- if (op < OP_NOP && op != OP_RESERVED)
+ if (op < OP_NOP && op != OP_RESERVED) {
continue;
+ }
std::string strName = GetOpName(static_cast<opcodetype>(op));
- if (strName == "OP_UNKNOWN")
+ if (strName == "OP_UNKNOWN") {
continue;
+ }
mapOpNames[strName] = static_cast<opcodetype>(op);
// Convenience: OP_ADD and just ADD are both recognized:
- if (strName.compare(0, 3, "OP_") == 0) { // strName starts with "OP_"
+ if (strName.compare(0, 3, "OP_") == 0) { // strName starts with "OP_"
mapOpNames[strName.substr(3)] = static_cast<opcodetype>(op);
}
}
@@ -59,44 +59,35 @@ CScript ParseScript(const std::string& s)
std::vector<std::string> words;
boost::algorithm::split(words, s, boost::algorithm::is_any_of(" \t\n"), boost::algorithm::token_compress_on);
- for (std::vector<std::string>::const_iterator w = words.begin(); w != words.end(); ++w)
- {
- if (w->empty())
- {
+ for (const std::string& w : words) {
+ if (w.empty()) {
// Empty string, ignore. (boost::split given '' will return one word)
- }
- else if (std::all_of(w->begin(), w->end(), ::IsDigit) ||
- (w->front() == '-' && w->size() > 1 && std::all_of(w->begin()+1, w->end(), ::IsDigit)))
+ } else if (std::all_of(w.begin(), w.end(), ::IsDigit) ||
+ (w.front() == '-' && w.size() > 1 && std::all_of(w.begin() + 1, w.end(), ::IsDigit)))
{
// Number
- int64_t n = LocaleIndependentAtoi<int64_t>(*w);
+ const auto num{ToIntegral<int64_t>(w)};
- //limit the range of numbers ParseScript accepts in decimal
- //since numbers outside -0xFFFFFFFF...0xFFFFFFFF are illegal in scripts
- if (n > int64_t{0xffffffff} || n < -1 * int64_t{0xffffffff}) {
+ // limit the range of numbers ParseScript accepts in decimal
+ // since numbers outside -0xFFFFFFFF...0xFFFFFFFF are illegal in scripts
+ if (!num.has_value() || num > int64_t{0xffffffff} || num < -1 * int64_t{0xffffffff}) {
throw std::runtime_error("script parse error: decimal numeric value only allowed in the "
"range -0xFFFFFFFF...0xFFFFFFFF");
}
- result << n;
- }
- else if (w->substr(0,2) == "0x" && w->size() > 2 && IsHex(std::string(w->begin()+2, w->end())))
- {
+ result << num.value();
+ } else if (w.substr(0, 2) == "0x" && w.size() > 2 && IsHex(std::string(w.begin() + 2, w.end()))) {
// Raw hex data, inserted NOT pushed onto stack:
- std::vector<unsigned char> raw = ParseHex(std::string(w->begin()+2, w->end()));
+ std::vector<unsigned char> raw = ParseHex(std::string(w.begin() + 2, w.end()));
result.insert(result.end(), raw.begin(), raw.end());
- }
- else if (w->size() >= 2 && w->front() == '\'' && w->back() == '\'')
- {
+ } else if (w.size() >= 2 && w.front() == '\'' && w.back() == '\'') {
// Single-quoted string, pushed as data. NOTE: this is poor-man's
// parsing, spaces/tabs/newlines in single-quoted strings won't work.
- std::vector<unsigned char> value(w->begin()+1, w->end()-1);
+ std::vector<unsigned char> value(w.begin() + 1, w.end() - 1);
result << value;
- }
- else
- {
+ } else {
// opcode, e.g. OP_ADD or ADD:
- result << ParseOpCode(*w);
+ result << ParseOpCode(w);
}
}
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 6b13e4c586..468694b011 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -163,7 +163,7 @@ void ScriptPubKeyToUniv(const CScript& scriptPubKey, UniValue& out, bool include
out.pushKV("type", GetTxnOutputType(type));
}
-void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry, bool include_hex, int serialize_flags, const CTxUndo* txundo)
+void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry, bool include_hex, int serialize_flags, const CTxUndo* txundo, TxVerbosity verbosity)
{
entry.pushKV("txid", tx.GetHash().GetHex());
entry.pushKV("hash", tx.GetWitnessHash().GetHex());
@@ -179,7 +179,7 @@ void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry,
// If available, use Undo data to calculate the fee. Note that txundo == nullptr
// for coinbase transactions and for transactions where undo data is unavailable.
- const bool calculate_fee = txundo != nullptr;
+ const bool have_undo = txundo != nullptr;
CAmount amt_total_in = 0;
CAmount amt_total_out = 0;
@@ -203,9 +203,28 @@ void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry,
}
in.pushKV("txinwitness", txinwitness);
}
- if (calculate_fee) {
- const CTxOut& prev_txout = txundo->vprevout[i].out;
+ if (have_undo) {
+ const Coin& prev_coin = txundo->vprevout[i];
+ const CTxOut& prev_txout = prev_coin.out;
+
amt_total_in += prev_txout.nValue;
+ switch (verbosity) {
+ case TxVerbosity::SHOW_TXID:
+ case TxVerbosity::SHOW_DETAILS:
+ break;
+
+ case TxVerbosity::SHOW_DETAILS_AND_PREVOUT:
+ UniValue o_script_pub_key(UniValue::VOBJ);
+ ScriptPubKeyToUniv(prev_txout.scriptPubKey, o_script_pub_key, /* includeHex */ true);
+
+ UniValue p(UniValue::VOBJ);
+ p.pushKV("generated", bool(prev_coin.fCoinBase));
+ p.pushKV("height", uint64_t(prev_coin.nHeight));
+ p.pushKV("value", ValueFromAmount(prev_txout.nValue));
+ p.pushKV("scriptPubKey", o_script_pub_key);
+ in.pushKV("prevout", p);
+ break;
+ }
}
in.pushKV("sequence", (int64_t)txin.nSequence);
vin.push_back(in);
@@ -226,13 +245,13 @@ void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry,
out.pushKV("scriptPubKey", o);
vout.push_back(out);
- if (calculate_fee) {
+ if (have_undo) {
amt_total_out += txout.nValue;
}
}
entry.pushKV("vout", vout);
- if (calculate_fee) {
+ if (have_undo) {
const CAmount fee = amt_total_in - amt_total_out;
CHECK_NONFATAL(MoneyRange(fee));
entry.pushKV("fee", ValueFromAmount(fee));
diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp
index bcaf746167..2fdc54464a 100644
--- a/src/dbwrapper.cpp
+++ b/src/dbwrapper.cpp
@@ -115,7 +115,7 @@ static leveldb::Options GetOptions(size_t nCacheSize)
}
CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bool fWipe, bool obfuscate)
- : m_name{path.stem().string()}
+ : m_name{fs::PathToString(path.stem())}
{
penv = nullptr;
readoptions.verify_checksums = true;
@@ -129,21 +129,21 @@ CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bo
options.env = penv;
} else {
if (fWipe) {
- LogPrintf("Wiping LevelDB in %s\n", path.string());
- leveldb::Status result = leveldb::DestroyDB(path.string(), options);
+ LogPrintf("Wiping LevelDB in %s\n", fs::PathToString(path));
+ leveldb::Status result = leveldb::DestroyDB(fs::PathToString(path), options);
dbwrapper_private::HandleError(result);
}
TryCreateDirectories(path);
- LogPrintf("Opening LevelDB in %s\n", path.string());
+ LogPrintf("Opening LevelDB in %s\n", fs::PathToString(path));
}
- leveldb::Status status = leveldb::DB::Open(options, path.string(), &pdb);
+ leveldb::Status status = leveldb::DB::Open(options, fs::PathToString(path), &pdb);
dbwrapper_private::HandleError(status);
LogPrintf("Opened LevelDB successfully\n");
if (gArgs.GetBoolArg("-forcecompactdb", false)) {
- LogPrintf("Starting database compaction of %s\n", path.string());
+ LogPrintf("Starting database compaction of %s\n", fs::PathToString(path));
pdb->CompactRange(nullptr, nullptr);
- LogPrintf("Finished database compaction of %s\n", path.string());
+ LogPrintf("Finished database compaction of %s\n", fs::PathToString(path));
}
// The base-case obfuscation key, which is a noop.
@@ -160,10 +160,10 @@ CDBWrapper::CDBWrapper(const fs::path& path, size_t nCacheSize, bool fMemory, bo
Write(OBFUSCATE_KEY_KEY, new_key);
obfuscate_key = new_key;
- LogPrintf("Wrote new obfuscate key for %s: %s\n", path.string(), HexStr(obfuscate_key));
+ LogPrintf("Wrote new obfuscate key for %s: %s\n", fs::PathToString(path), HexStr(obfuscate_key));
}
- LogPrintf("Using obfuscation key for %s: %s\n", path.string(), HexStr(obfuscate_key));
+ LogPrintf("Using obfuscation key for %s: %s\n", fs::PathToString(path), HexStr(obfuscate_key));
}
CDBWrapper::~CDBWrapper()
diff --git a/src/flatfile.cpp b/src/flatfile.cpp
index 151f1a38f1..929808c7fa 100644
--- a/src/flatfile.cpp
+++ b/src/flatfile.cpp
@@ -41,11 +41,11 @@ FILE* FlatFileSeq::Open(const FlatFilePos& pos, bool read_only)
if (!file && !read_only)
file = fsbridge::fopen(path, "wb+");
if (!file) {
- LogPrintf("Unable to open file %s\n", path.string());
+ LogPrintf("Unable to open file %s\n", fs::PathToString(path));
return nullptr;
}
if (pos.nPos && fseek(file, pos.nPos, SEEK_SET)) {
- LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, path.string());
+ LogPrintf("Unable to seek to position %u of %s\n", pos.nPos, fs::PathToString(path));
fclose(file);
return nullptr;
}
diff --git a/src/fs.cpp b/src/fs.cpp
index b9b3c46d8d..8cae7f32c6 100644
--- a/src/fs.cpp
+++ b/src/fs.cpp
@@ -24,7 +24,7 @@ namespace fsbridge {
FILE *fopen(const fs::path& p, const char *mode)
{
#ifndef WIN32
- return ::fopen(p.string().c_str(), mode);
+ return ::fopen(p.c_str(), mode);
#else
std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t> utf8_cvt;
return ::_wfopen(p.wstring().c_str(), utf8_cvt.from_bytes(mode).c_str());
@@ -46,7 +46,7 @@ static std::string GetErrorReason()
FileLock::FileLock(const fs::path& file)
{
- fd = open(file.string().c_str(), O_RDWR);
+ fd = open(file.c_str(), O_RDWR);
if (fd == -1) {
reason = GetErrorReason();
}
@@ -249,9 +249,9 @@ void ofstream::close()
#else // __GLIBCXX__
#if BOOST_VERSION >= 107700
-static_assert(sizeof(*BOOST_FILESYSTEM_C_STR(fs::path())) == sizeof(wchar_t),
+static_assert(sizeof(*BOOST_FILESYSTEM_C_STR(boost::filesystem::path())) == sizeof(wchar_t),
#else
-static_assert(sizeof(*fs::path().BOOST_FILESYSTEM_C_STR) == sizeof(wchar_t),
+static_assert(sizeof(*boost::filesystem::path().BOOST_FILESYSTEM_C_STR) == sizeof(wchar_t),
#endif // BOOST_VERSION >= 107700
"Warning: This build is using boost::filesystem ofstream and ifstream "
"implementations which will fail to open paths containing multibyte "
diff --git a/src/fs.h b/src/fs.h
index d77b90be66..4a0bf39e95 100644
--- a/src/fs.h
+++ b/src/fs.h
@@ -13,9 +13,132 @@
#include <boost/filesystem.hpp>
#include <boost/filesystem/fstream.hpp>
+#include <tinyformat.h>
/** Filesystem operations and types */
-namespace fs = boost::filesystem;
+namespace fs {
+
+using namespace boost::filesystem;
+
+/**
+ * Path class wrapper to prepare application code for transition from
+ * boost::filesystem library to std::filesystem implementation. The main
+ * purpose of the class is to define fs::path::u8string() and fs::u8path()
+ * functions not present in boost. It also blocks calls to the
+ * fs::path(std::string) implicit constructor and the fs::path::string()
+ * method, which worked well in the boost::filesystem implementation, but have
+ * unsafe and unpredictable behavior on Windows in the std::filesystem
+ * implementation (see implementation note in \ref PathToString for details).
+ */
+class path : public boost::filesystem::path
+{
+public:
+ using boost::filesystem::path::path;
+
+ // Allow path objects arguments for compatibility.
+ path(boost::filesystem::path path) : boost::filesystem::path::path(std::move(path)) {}
+ path& operator=(boost::filesystem::path path) { boost::filesystem::path::operator=(std::move(path)); return *this; }
+ path& operator/=(boost::filesystem::path path) { boost::filesystem::path::operator/=(std::move(path)); return *this; }
+
+ // Allow literal string arguments, which are safe as long as the literals are ASCII.
+ path(const char* c) : boost::filesystem::path(c) {}
+ path& operator=(const char* c) { boost::filesystem::path::operator=(c); return *this; }
+ path& operator/=(const char* c) { boost::filesystem::path::operator/=(c); return *this; }
+ path& append(const char* c) { boost::filesystem::path::append(c); return *this; }
+
+ // Disallow std::string arguments to avoid locale-dependent decoding on windows.
+ path(std::string) = delete;
+ path& operator=(std::string) = delete;
+ path& operator/=(std::string) = delete;
+ path& append(std::string) = delete;
+
+ // Disallow std::string conversion method to avoid locale-dependent encoding on windows.
+ std::string string() const = delete;
+
+ // Define UTF-8 string conversion method not present in boost::filesystem but present in std::filesystem.
+ std::string u8string() const { return boost::filesystem::path::string(); }
+};
+
+// Define UTF-8 string conversion function not present in boost::filesystem but present in std::filesystem.
+static inline path u8path(const std::string& string)
+{
+ return boost::filesystem::path(string);
+}
+
+// Disallow implicit std::string conversion for system_complete to avoid
+// locale-dependent encoding on windows.
+static inline path system_complete(const path& p)
+{
+ return boost::filesystem::system_complete(p);
+}
+
+// Disallow implicit std::string conversion for exists to avoid
+// locale-dependent encoding on windows.
+static inline bool exists(const path& p)
+{
+ return boost::filesystem::exists(p);
+}
+
+// Allow explicit quoted stream I/O.
+static inline auto quoted(const std::string& s)
+{
+ return boost::io::quoted(s, '&');
+}
+
+// Allow safe path append operations.
+static inline path operator+(path p1, path p2)
+{
+ p1 += std::move(p2);
+ return p1;
+}
+
+/**
+ * Convert path object to byte string. On POSIX, paths natively are byte
+ * strings so this is trivial. On Windows, paths natively are Unicode, so an
+ * encoding step is necessary.
+ *
+ * The inverse of \ref PathToString is \ref PathFromString. The strings
+ * returned and parsed by these functions can be used to call POSIX APIs, and
+ * for roundtrip conversion, logging, and debugging. But they are not
+ * guaranteed to be valid UTF-8, and are generally meant to be used internally,
+ * not externally. When communicating with external programs and libraries that
+ * require UTF-8, fs::path::u8string() and fs::u8path() methods can be used.
+ * For other applications, if support for non UTF-8 paths is required, or if
+ * higher-level JSON or XML or URI or C-style escapes are preferred, it may be
+ * also be appropriate to use different path encoding functions.
+ *
+ * Implementation note: On Windows, the std::filesystem::path(string)
+ * constructor and std::filesystem::path::string() method are not safe to use
+ * here, because these methods encode the path using C++'s narrow multibyte
+ * encoding, which on Windows corresponds to the current "code page", which is
+ * unpredictable and typically not able to represent all valid paths. So
+ * std::filesystem::path::u8string() and std::filesystem::u8path() functions
+ * are used instead on Windows. On POSIX, u8string/u8path functions are not
+ * safe to use because paths are not always valid UTF-8, so plain string
+ * methods which do not transform the path there are used.
+ */
+static inline std::string PathToString(const path& path)
+{
+#ifdef WIN32
+ return path.u8string();
+#else
+ static_assert(std::is_same<path::string_type, std::string>::value, "PathToString not implemented on this platform");
+ return path.boost::filesystem::path::string();
+#endif
+}
+
+/**
+ * Convert byte string to path object. Inverse of \ref PathToString.
+ */
+static inline path PathFromString(const std::string& string)
+{
+#ifdef WIN32
+ return u8path(string);
+#else
+ return boost::filesystem::path(string);
+#endif
+}
+} // namespace fs
/** Bridge operations to C stdio */
namespace fsbridge {
@@ -103,4 +226,11 @@ namespace fsbridge {
#endif // WIN32 && __GLIBCXX__
};
+// Disallow path operator<< formatting in tinyformat to avoid locale-dependent
+// encoding on windows.
+namespace tinyformat {
+template<> inline void formatValue(std::ostream&, const char*, const char*, int, const boost::filesystem::path&) = delete;
+template<> inline void formatValue(std::ostream&, const char*, const char*, int, const fs::path&) = delete;
+} // namespace tinyformat
+
#endif // BITCOIN_FS_H
diff --git a/src/i2p.cpp b/src/i2p.cpp
index 5e7e42fb77..35ac8731f2 100644
--- a/src/i2p.cpp
+++ b/src/i2p.cpp
@@ -328,7 +328,7 @@ void Session::GenerateAndSavePrivateKey(const Sock& sock)
if (!WriteBinaryFile(m_private_key_file,
std::string(m_private_key.begin(), m_private_key.end()))) {
throw std::runtime_error(
- strprintf("Cannot save I2P private key to %s", m_private_key_file));
+ strprintf("Cannot save I2P private key to %s", fs::quoted(fs::PathToString(m_private_key_file))));
}
}
diff --git a/src/init.cpp b/src/init.cpp
index 4495ded64d..164b7bb55d 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -113,7 +113,7 @@ static const char* BITCOIN_PID_FILENAME = "bitcoind.pid";
static fs::path GetPidFile(const ArgsManager& args)
{
- return AbsPathForConfigVal(fs::path(args.GetArg("-pid", BITCOIN_PID_FILENAME)));
+ return AbsPathForConfigVal(fs::PathFromString(args.GetArg("-pid", BITCOIN_PID_FILENAME)));
}
[[nodiscard]] static bool CreatePidFile(const ArgsManager& args)
@@ -127,7 +127,7 @@ static fs::path GetPidFile(const ArgsManager& args)
#endif
return true;
} else {
- return InitError(strprintf(_("Unable to create the PID file '%s': %s"), GetPidFile(args).string(), std::strerror(errno)));
+ return InitError(strprintf(_("Unable to create the PID file '%s': %s"), fs::PathToString(GetPidFile(args)), std::strerror(errno)));
}
}
@@ -1062,10 +1062,10 @@ static bool LockDataDirectory(bool probeOnly)
// Make sure only a single Bitcoin process is using the data directory.
fs::path datadir = gArgs.GetDataDirNet();
if (!DirIsWritable(datadir)) {
- return InitError(strprintf(_("Cannot write to data directory '%s'; check permissions."), datadir.string()));
+ return InitError(strprintf(_("Cannot write to data directory '%s'; check permissions."), fs::PathToString(datadir)));
}
if (!LockDirectory(datadir, ".lock", probeOnly)) {
- return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), datadir.string(), PACKAGE_NAME));
+ return InitError(strprintf(_("Cannot obtain a lock on data directory %s. %s is probably already running."), fs::PathToString(datadir), PACKAGE_NAME));
}
return true;
}
@@ -1126,12 +1126,12 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
LogPrintf("Using at most %i automatic connections (%i file descriptors available)\n", nMaxConnections, nFD);
// Warn about relative -datadir path.
- if (args.IsArgSet("-datadir") && !fs::path(args.GetArg("-datadir", "")).is_absolute()) {
+ if (args.IsArgSet("-datadir") && !fs::PathFromString(args.GetArg("-datadir", "")).is_absolute()) {
LogPrintf("Warning: relative datadir option '%s' specified, which will be interpreted relative to the " /* Continued */
"current working directory '%s'. This is fragile, because if bitcoin is started in the future "
"from a different location, it will be unable to locate the current data files. There could "
"also be data loss if bitcoin is started while in a temporary directory.\n",
- args.GetArg("-datadir", ""), fs::current_path().string());
+ args.GetArg("-datadir", ""), fs::PathToString(fs::current_path()));
}
InitSignatureCache();
@@ -1215,20 +1215,20 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// Read asmap file if configured
std::vector<bool> asmap;
if (args.IsArgSet("-asmap")) {
- fs::path asmap_path = fs::path(args.GetArg("-asmap", ""));
+ fs::path asmap_path = fs::PathFromString(args.GetArg("-asmap", ""));
if (asmap_path.empty()) {
- asmap_path = DEFAULT_ASMAP_FILENAME;
+ asmap_path = fs::PathFromString(DEFAULT_ASMAP_FILENAME);
}
if (!asmap_path.is_absolute()) {
asmap_path = gArgs.GetDataDirNet() / asmap_path;
}
if (!fs::exists(asmap_path)) {
- InitError(strprintf(_("Could not find asmap file %s"), asmap_path));
+ InitError(strprintf(_("Could not find asmap file %s"), fs::quoted(fs::PathToString(asmap_path))));
return false;
}
asmap = DecodeAsmap(asmap_path);
if (asmap.size() == 0) {
- InitError(strprintf(_("Could not parse asmap file %s"), asmap_path));
+ InitError(strprintf(_("Could not parse asmap file %s"), fs::quoted(fs::PathToString(asmap_path))));
return false;
}
const uint256 asmap_version = SerializeHash(asmap);
@@ -1653,11 +1653,11 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// ********************************************************* Step 11: import blocks
if (!CheckDiskSpace(gArgs.GetDataDirNet())) {
- InitError(strprintf(_("Error: Disk space is low for %s"), gArgs.GetDataDirNet()));
+ InitError(strprintf(_("Error: Disk space is low for %s"), fs::quoted(fs::PathToString(gArgs.GetDataDirNet()))));
return false;
}
if (!CheckDiskSpace(gArgs.GetBlocksDirPath())) {
- InitError(strprintf(_("Error: Disk space is low for %s"), gArgs.GetBlocksDirPath()));
+ InitError(strprintf(_("Error: Disk space is low for %s"), fs::quoted(fs::PathToString(gArgs.GetBlocksDirPath()))));
return false;
}
@@ -1685,7 +1685,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
std::vector<fs::path> vImportFiles;
for (const std::string& strFile : args.GetArgs("-loadblock")) {
- vImportFiles.push_back(strFile);
+ vImportFiles.push_back(fs::PathFromString(strFile));
}
chainman.m_load_block = std::thread(&util::TraceThread, "loadblk", [=, &chainman, &args] {
diff --git a/src/init/common.cpp b/src/init/common.cpp
index 5c1f469081..8f9e0ebc87 100644
--- a/src/init/common.cpp
+++ b/src/init/common.cpp
@@ -81,7 +81,7 @@ void AddLoggingArgs(ArgsManager& argsman)
void SetLoggingOptions(const ArgsManager& args)
{
LogInstance().m_print_to_file = !args.IsArgNegated("-debuglogfile");
- LogInstance().m_file_path = AbsPathForConfigVal(args.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE));
+ LogInstance().m_file_path = AbsPathForConfigVal(fs::PathFromString(args.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE)));
LogInstance().m_print_to_console = args.GetBoolArg("-printtoconsole", !args.GetBoolArg("-daemon", false));
LogInstance().m_log_timestamps = args.GetBoolArg("-logtimestamps", DEFAULT_LOGTIMESTAMPS);
LogInstance().m_log_time_micros = args.GetBoolArg("-logtimemicros", DEFAULT_LOGTIMEMICROS);
@@ -128,24 +128,24 @@ bool StartLogging(const ArgsManager& args)
}
if (!LogInstance().StartLogging()) {
return InitError(strprintf(Untranslated("Could not open debug log file %s"),
- LogInstance().m_file_path.string()));
+ fs::PathToString(LogInstance().m_file_path)));
}
if (!LogInstance().m_log_timestamps)
LogPrintf("Startup time: %s\n", FormatISO8601DateTime(GetTime()));
- LogPrintf("Default data directory %s\n", GetDefaultDataDir().string());
- LogPrintf("Using data directory %s\n", gArgs.GetDataDirNet().string());
+ LogPrintf("Default data directory %s\n", fs::PathToString(GetDefaultDataDir()));
+ LogPrintf("Using data directory %s\n", fs::PathToString(gArgs.GetDataDirNet()));
// Only log conf file usage message if conf file actually exists.
fs::path config_file_path = GetConfigFile(args.GetArg("-conf", BITCOIN_CONF_FILENAME));
if (fs::exists(config_file_path)) {
- LogPrintf("Config file: %s\n", config_file_path.string());
+ LogPrintf("Config file: %s\n", fs::PathToString(config_file_path));
} else if (args.IsArgSet("-conf")) {
// Warn if no conf file exists at path provided by user
- InitWarning(strprintf(_("The specified config file %s does not exist"), config_file_path.string()));
+ InitWarning(strprintf(_("The specified config file %s does not exist"), fs::PathToString(config_file_path)));
} else {
// Not categorizing as "Warning" because it's the default behavior
- LogPrintf("Config file: %s (not found, skipping)\n", config_file_path.string());
+ LogPrintf("Config file: %s (not found, skipping)\n", fs::PathToString(config_file_path));
}
// Log the config arguments to debug.log
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 9a97cad1f8..d4ceb517dd 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -289,7 +289,7 @@ public:
virtual void requestMempoolTransactions(Notifications& notifications) = 0;
//! Check if Taproot has activated
- virtual bool isTaprootActive() const = 0;
+ virtual bool isTaprootActive() = 0;
};
//! Interface to let node manage chain clients (wallets, or maybe tools for
diff --git a/src/ipc/process.cpp b/src/ipc/process.cpp
index 43ed1f1bae..9036b80c45 100644
--- a/src/ipc/process.cpp
+++ b/src/ipc/process.cpp
@@ -30,8 +30,8 @@ public:
return mp::SpawnProcess(pid, [&](int fd) {
fs::path path = argv0_path;
path.remove_filename();
- path.append(new_exe_name);
- return std::vector<std::string>{path.string(), "-ipcfd", strprintf("%i", fd)};
+ path /= fs::PathFromString(new_exe_name);
+ return std::vector<std::string>{fs::PathToString(path), "-ipcfd", strprintf("%i", fd)};
});
}
int waitSpawned(int pid) override { return mp::WaitProcess(pid); }
diff --git a/src/logging.cpp b/src/logging.cpp
index a352e106e5..1efce21bdb 100644
--- a/src/logging.cpp
+++ b/src/logging.cpp
@@ -161,6 +161,7 @@ const CLogCategoryDesc LogCategories[] =
{BCLog::IPC, "ipc"},
{BCLog::LOCK, "lock"},
{BCLog::UTIL, "util"},
+ {BCLog::BLOCKSTORE, "blockstorage"},
{BCLog::ALL, "1"},
{BCLog::ALL, "all"},
};
diff --git a/src/logging.h b/src/logging.h
index 02e64a7c48..f46104364c 100644
--- a/src/logging.h
+++ b/src/logging.h
@@ -61,6 +61,7 @@ namespace BCLog {
IPC = (1 << 23),
LOCK = (1 << 24),
UTIL = (1 << 25),
+ BLOCKSTORE = (1 << 26),
ALL = ~(uint32_t)0,
};
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 0b7df9bd9a..53bc2b5069 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -68,13 +68,14 @@ void CleanupBlockRevFiles()
LogPrintf("Removing unusable blk?????.dat and rev?????.dat files for -reindex with -prune\n");
fs::path blocksdir = gArgs.GetBlocksDirPath();
for (fs::directory_iterator it(blocksdir); it != fs::directory_iterator(); it++) {
+ const std::string path = fs::PathToString(it->path().filename());
if (fs::is_regular_file(*it) &&
- it->path().filename().string().length() == 12 &&
- it->path().filename().string().substr(8,4) == ".dat")
+ path.length() == 12 &&
+ path.substr(8,4) == ".dat")
{
- if (it->path().filename().string().substr(0, 3) == "blk") {
- mapBlockFiles[it->path().filename().string().substr(3, 5)] = it->path();
- } else if (it->path().filename().string().substr(0, 3) == "rev") {
+ if (path.substr(0, 3) == "blk") {
+ mapBlockFiles[path.substr(3, 5)] = it->path();
+ } else if (path.substr(0, 3) == "rev") {
remove(it->path());
}
}
@@ -204,7 +205,7 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune)
FlatFilePos pos(*it, 0);
fs::remove(BlockFileSeq().FileName(pos));
fs::remove(UndoFileSeq().FileName(pos));
- LogPrintf("Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
+ LogPrint(BCLog::BLOCKSTORE, "Prune: %s deleted blk/rev (%05u)\n", __func__, *it);
}
}
@@ -261,7 +262,7 @@ bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight,
if ((int)nFile != nLastBlockFile) {
if (!fKnown) {
- LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
+ LogPrint(BCLog::BLOCKSTORE, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString());
}
FlushBlockFile(!fKnown, finalize_undo);
nLastBlockFile = nFile;
@@ -527,14 +528,14 @@ void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImportFile
for (const fs::path& path : vImportFiles) {
FILE* file = fsbridge::fopen(path, "rb");
if (file) {
- LogPrintf("Importing blocks file %s...\n", path.string());
+ LogPrintf("Importing blocks file %s...\n", fs::PathToString(path));
chainman.ActiveChainstate().LoadExternalBlockFile(file);
if (ShutdownRequested()) {
LogPrintf("Shutdown requested. Exit %s\n", __func__);
return;
}
} else {
- LogPrintf("Warning: Could not open blocks file %s\n", path.string());
+ LogPrintf("Warning: Could not open blocks file %s\n", fs::PathToString(path));
}
}
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index 5b6d8416a7..73f4036057 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -698,7 +698,7 @@ public:
notifications.transactionAddedToMempool(entry.GetSharedTx(), 0 /* mempool_sequence */);
}
}
- bool isTaprootActive() const override
+ bool isTaprootActive() override
{
LOCK(::cs_main);
const CBlockIndex* tip = Assert(m_node.chainman)->ActiveChain().Tip();
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index 2e2061d0a1..d8c21bd833 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -527,7 +527,7 @@ CBlockPolicyEstimator::CBlockPolicyEstimator()
fs::path est_filepath = gArgs.GetDataDirNet() / FEE_ESTIMATES_FILENAME;
CAutoFile est_file(fsbridge::fopen(est_filepath, "rb"), SER_DISK, CLIENT_VERSION);
if (est_file.IsNull() || !Read(est_file)) {
- LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", est_filepath.string());
+ LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", fs::PathToString(est_filepath));
}
}
@@ -887,7 +887,7 @@ void CBlockPolicyEstimator::Flush() {
fs::path est_filepath = gArgs.GetDataDirNet() / FEE_ESTIMATES_FILENAME;
CAutoFile est_file(fsbridge::fopen(est_filepath, "wb"), SER_DISK, CLIENT_VERSION);
if (est_file.IsNull() || !Write(est_file)) {
- LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", est_filepath.string());
+ LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", fs::PathToString(est_filepath));
}
}
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index 9e433584e7..fced397e51 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -22,7 +22,7 @@ CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFeeIn)
// so dust is a spendable txout less than
// 182*dustRelayFee/1000 (in satoshis).
// 546 satoshis at the default rate of 3000 sat/kvB.
- // A typical spendable segwit txout is 31 bytes big, and will
+ // A typical spendable segwit P2WPKH txout is 31 bytes big, and will
// need a CTxIn of at least 67 bytes to spend:
// so dust is a spendable txout less than
// 98*dustRelayFee/1000 (in satoshis).
@@ -34,6 +34,11 @@ CAmount GetDustThreshold(const CTxOut& txout, const CFeeRate& dustRelayFeeIn)
int witnessversion = 0;
std::vector<unsigned char> witnessprogram;
+ // Note this computation is for spending a Segwit v0 P2WPKH output (a 33 bytes
+ // public key + an ECDSA signature). For Segwit v1 Taproot outputs the minimum
+ // satisfaction is lower (a single BIP340 signature) but this computation was
+ // kept to not further reduce the dust level.
+ // See discussion in https://github.com/bitcoin/bitcoin/pull/22779 for details.
if (txout.scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram)) {
// sum the sizes of the parts of a transaction input
// with 75% segwit discount applied to the script size.
diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp
index 610637360b..b68ce39b53 100644
--- a/src/qt/bitcoingui.cpp
+++ b/src/qt/bitcoingui.cpp
@@ -1304,8 +1304,6 @@ void BitcoinGUI::setHDStatus(bool privkeyDisabled, int hdEnabled)
labelWalletHDStatusIcon->setThemedPixmap(privkeyDisabled ? QStringLiteral(":/icons/eye") : hdEnabled ? QStringLiteral(":/icons/hd_enabled") : QStringLiteral(":/icons/hd_disabled"), STATUSBAR_ICONSIZE, STATUSBAR_ICONSIZE);
labelWalletHDStatusIcon->setToolTip(privkeyDisabled ? tr("Private key <b>disabled</b>") : hdEnabled ? tr("HD key generation is <b>enabled</b>") : tr("HD key generation is <b>disabled</b>"));
labelWalletHDStatusIcon->show();
- // eventually disable the QLabel to set its opacity to 50%
- labelWalletHDStatusIcon->setEnabled(hdEnabled);
}
void BitcoinGUI::setEncryptionStatus(int status)
diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp
index 12d3a48d01..4262866f32 100644
--- a/src/qt/guiutil.cpp
+++ b/src/qt/guiutil.cpp
@@ -653,12 +653,12 @@ void setClipboard(const QString& str)
fs::path qstringToBoostPath(const QString &path)
{
- return fs::path(path.toStdString());
+ return fs::u8path(path.toStdString());
}
QString boostPathToQString(const fs::path &path)
{
- return QString::fromStdString(path.string());
+ return QString::fromStdString(path.u8string());
}
QString NetworkToQString(Network net)
diff --git a/src/qt/intro.cpp b/src/qt/intro.cpp
index 4c78fba752..2ca4b6a21e 100644
--- a/src/qt/intro.cpp
+++ b/src/qt/intro.cpp
@@ -263,7 +263,7 @@ bool Intro::showIfNeeded(bool& did_show_intro, int64_t& prune_MiB)
* (to be consistent with bitcoind behavior)
*/
if(dataDir != GUIUtil::getDefaultDataDirectory()) {
- gArgs.SoftSetArg("-datadir", GUIUtil::qstringToBoostPath(dataDir).string()); // use OS locale for path setting
+ gArgs.SoftSetArg("-datadir", fs::PathToString(GUIUtil::qstringToBoostPath(dataDir))); // use OS locale for path setting
}
return true;
}
diff --git a/src/rest.cpp b/src/rest.cpp
index e50ab33e54..3746fd752a 100644
--- a/src/rest.cpp
+++ b/src/rest.cpp
@@ -189,9 +189,10 @@ static bool rest_headers(const std::any& context,
if (path.size() != 2)
return RESTERR(req, HTTP_BAD_REQUEST, "No header count specified. Use /rest/headers/<count>/<hash>.<ext>.");
- long count = strtol(path[0].c_str(), nullptr, 10);
- if (count < 1 || count > 2000)
+ const auto parsed_count{ToIntegral<size_t>(path[0])};
+ if (!parsed_count.has_value() || *parsed_count < 1 || *parsed_count > 2000) {
return RESTERR(req, HTTP_BAD_REQUEST, "Header count out of range: " + path[0]);
+ }
std::string hashStr = path[1];
uint256 hash;
@@ -199,8 +200,8 @@ static bool rest_headers(const std::any& context,
return RESTERR(req, HTTP_BAD_REQUEST, "Invalid hash: " + hashStr);
const CBlockIndex* tip = nullptr;
- std::vector<const CBlockIndex *> headers;
- headers.reserve(count);
+ std::vector<const CBlockIndex*> headers;
+ headers.reserve(*parsed_count);
{
ChainstateManager* maybe_chainman = GetChainman(context, req);
if (!maybe_chainman) return false;
@@ -211,8 +212,9 @@ static bool rest_headers(const std::any& context,
const CBlockIndex* pindex = chainman.m_blockman.LookupBlockIndex(hash);
while (pindex != nullptr && active_chain.Contains(pindex)) {
headers.push_back(pindex);
- if (headers.size() == (unsigned long)count)
+ if (headers.size() == *parsed_count) {
break;
+ }
pindex = active_chain.Next(pindex);
}
}
@@ -260,7 +262,7 @@ static bool rest_headers(const std::any& context,
static bool rest_block(const std::any& context,
HTTPRequest* req,
const std::string& strURIPart,
- bool showTxDetails)
+ TxVerbosity tx_verbosity)
{
if (!CheckWarmup(req))
return false;
@@ -312,7 +314,7 @@ static bool rest_block(const std::any& context,
}
case RetFormat::JSON: {
- UniValue objBlock = blockToJSON(block, tip, pblockindex, showTxDetails);
+ UniValue objBlock = blockToJSON(block, tip, pblockindex, tx_verbosity);
std::string strJSON = objBlock.write() + "\n";
req->WriteHeader("Content-Type", "application/json");
req->WriteReply(HTTP_OK, strJSON);
@@ -327,12 +329,12 @@ static bool rest_block(const std::any& context,
static bool rest_block_extended(const std::any& context, HTTPRequest* req, const std::string& strURIPart)
{
- return rest_block(context, req, strURIPart, true);
+ return rest_block(context, req, strURIPart, TxVerbosity::SHOW_DETAILS_AND_PREVOUT);
}
static bool rest_block_notxdetails(const std::any& context, HTTPRequest* req, const std::string& strURIPart)
{
- return rest_block(context, req, strURIPart, false);
+ return rest_block(context, req, strURIPart, TxVerbosity::SHOW_TXID);
}
// A bit of a hack - dependency on a function defined in rpc/blockchain.cpp
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index ac746de32f..dadd82e03f 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -200,7 +200,7 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex
return result;
}
-UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIndex* blockindex, bool txDetails)
+UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIndex* blockindex, TxVerbosity verbosity)
{
UniValue result = blockheaderToJSON(tip, blockindex);
@@ -208,22 +208,29 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIn
result.pushKV("size", (int)::GetSerializeSize(block, PROTOCOL_VERSION));
result.pushKV("weight", (int)::GetBlockWeight(block));
UniValue txs(UniValue::VARR);
- if (txDetails) {
- CBlockUndo blockUndo;
- const bool have_undo = !IsBlockPruned(blockindex) && UndoReadFromDisk(blockUndo, blockindex);
- for (size_t i = 0; i < block.vtx.size(); ++i) {
- const CTransactionRef& tx = block.vtx.at(i);
- // coinbase transaction (i == 0) doesn't have undo data
- const CTxUndo* txundo = (have_undo && i) ? &blockUndo.vtxundo.at(i - 1) : nullptr;
- UniValue objTx(UniValue::VOBJ);
- TxToUniv(*tx, uint256(), objTx, true, RPCSerializationFlags(), txundo);
- txs.push_back(objTx);
- }
- } else {
- for (const CTransactionRef& tx : block.vtx) {
- txs.push_back(tx->GetHash().GetHex());
- }
+
+ switch (verbosity) {
+ case TxVerbosity::SHOW_TXID:
+ for (const CTransactionRef& tx : block.vtx) {
+ txs.push_back(tx->GetHash().GetHex());
+ }
+ break;
+
+ case TxVerbosity::SHOW_DETAILS:
+ case TxVerbosity::SHOW_DETAILS_AND_PREVOUT:
+ CBlockUndo blockUndo;
+ const bool have_undo = !IsBlockPruned(blockindex) && UndoReadFromDisk(blockUndo, blockindex);
+
+ for (size_t i = 0; i < block.vtx.size(); ++i) {
+ const CTransactionRef& tx = block.vtx.at(i);
+ // coinbase transaction (i.e. i == 0) doesn't have undo data
+ const CTxUndo* txundo = (have_undo && i > 0) ? &blockUndo.vtxundo.at(i - 1) : nullptr;
+ UniValue objTx(UniValue::VOBJ);
+ TxToUniv(*tx, uint256(), objTx, true, RPCSerializationFlags(), txundo, verbosity);
+ txs.push_back(objTx);
+ }
}
+
result.pushKV("tx", txs);
return result;
@@ -931,7 +938,8 @@ static RPCHelpMan getblock()
return RPCHelpMan{"getblock",
"\nIf verbosity is 0, returns a string that is serialized, hex-encoded data for block 'hash'.\n"
"If verbosity is 1, returns an Object with information about block <hash>.\n"
- "If verbosity is 2, returns an Object with information about block <hash> and information about each transaction. \n",
+ "If verbosity is 2, returns an Object with information about block <hash> and information about each transaction.\n"
+ "If verbosity is 3, returns an Object with information about block <hash> and information about each transaction, including prevout information for inputs (only for unpruned blocks in the current best chain).\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash"},
{"verbosity|verbose", RPCArg::Type::NUM, RPCArg::Default{1}, "0 for hex-encoded data, 1 for a json object, and 2 for json object with transaction data"},
@@ -1018,7 +1026,16 @@ static RPCHelpMan getblock()
return strHex;
}
- return blockToJSON(block, tip, pblockindex, verbosity >= 2);
+ TxVerbosity tx_verbosity;
+ if (verbosity == 1) {
+ tx_verbosity = TxVerbosity::SHOW_TXID;
+ } else if (verbosity == 2) {
+ tx_verbosity = TxVerbosity::SHOW_DETAILS;
+ } else {
+ tx_verbosity = TxVerbosity::SHOW_DETAILS_AND_PREVOUT;
+ }
+
+ return blockToJSON(block, tip, pblockindex, tx_verbosity);
},
};
}
@@ -2537,15 +2554,15 @@ static RPCHelpMan dumptxoutset()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- const fs::path path = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), request.params[0].get_str());
+ const fs::path path = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), fs::u8path(request.params[0].get_str()));
// Write to a temporary path and then move into `path` on completion
// to avoid confusion due to an interruption.
- const fs::path temppath = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), request.params[0].get_str() + ".incomplete");
+ const fs::path temppath = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), fs::u8path(request.params[0].get_str() + ".incomplete"));
if (fs::exists(path)) {
throw JSONRPCError(
RPC_INVALID_PARAMETER,
- path.string() + " already exists. If you are sure this is what you want, "
+ path.u8string() + " already exists. If you are sure this is what you want, "
"move it out of the way first");
}
@@ -2555,7 +2572,7 @@ static RPCHelpMan dumptxoutset()
UniValue result = CreateUTXOSnapshot(node, node.chainman->ActiveChainstate(), afile);
fs::rename(temppath, path);
- result.pushKV("path", path.string());
+ result.pushKV("path", path.u8string());
return result;
},
};
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
index 09e471afdd..d9c6761f47 100644
--- a/src/rpc/blockchain.h
+++ b/src/rpc/blockchain.h
@@ -6,6 +6,7 @@
#define BITCOIN_RPC_BLOCKCHAIN_H
#include <consensus/amount.h>
+#include <core_io.h>
#include <streams.h>
#include <sync.h>
@@ -38,7 +39,7 @@ double GetDifficulty(const CBlockIndex* blockindex);
void RPCNotifyBlockChange(const CBlockIndex*);
/** Block description to JSON */
-UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIndex* blockindex, bool txDetails = false) LOCKS_EXCLUDED(cs_main);
+UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIndex* blockindex, TxVerbosity verbosity) LOCKS_EXCLUDED(cs_main);
/** Mempool information to JSON */
UniValue MempoolInfoToJSON(const CTxMemPool& pool);
diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp
index a7866474e1..3245e04cdf 100644
--- a/src/rpc/request.cpp
+++ b/src/rpc/request.cpp
@@ -70,7 +70,7 @@ static fs::path GetAuthCookieFile(bool temp=false)
if (temp) {
arg += ".tmp";
}
- return AbsPathForConfigVal(fs::path(arg));
+ return AbsPathForConfigVal(fs::PathFromString(arg));
}
bool GenerateAuthCookie(std::string *cookie_out)
@@ -87,7 +87,7 @@ bool GenerateAuthCookie(std::string *cookie_out)
fs::path filepath_tmp = GetAuthCookieFile(true);
file.open(filepath_tmp);
if (!file.is_open()) {
- LogPrintf("Unable to open cookie authentication file %s for writing\n", filepath_tmp.string());
+ LogPrintf("Unable to open cookie authentication file %s for writing\n", fs::PathToString(filepath_tmp));
return false;
}
file << cookie;
@@ -95,10 +95,10 @@ bool GenerateAuthCookie(std::string *cookie_out)
fs::path filepath = GetAuthCookieFile(false);
if (!RenameOver(filepath_tmp, filepath)) {
- LogPrintf("Unable to rename cookie authentication file %s to %s\n", filepath_tmp.string(), filepath.string());
+ LogPrintf("Unable to rename cookie authentication file %s to %s\n", fs::PathToString(filepath_tmp), fs::PathToString(filepath));
return false;
}
- LogPrintf("Generated RPC authentication cookie %s\n", filepath.string());
+ LogPrintf("Generated RPC authentication cookie %s\n", fs::PathToString(filepath));
if (cookie_out)
*cookie_out = cookie;
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index 0d02ec5c47..9bcfba3507 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -239,7 +239,7 @@ static RPCHelpMan getrpcinfo()
UniValue result(UniValue::VOBJ);
result.pushKV("active_commands", active_commands);
- const std::string path = LogInstance().m_file_path.string();
+ const std::string path = LogInstance().m_file_path.u8string();
UniValue log_path(UniValue::VSTR, path);
result.pushKV("logpath", log_path);
diff --git a/src/test/fs_tests.cpp b/src/test/fs_tests.cpp
index 526a3c27be..ecb838a7dd 100644
--- a/src/test/fs_tests.cpp
+++ b/src/test/fs_tests.cpp
@@ -11,6 +11,33 @@
BOOST_FIXTURE_TEST_SUITE(fs_tests, BasicTestingSetup)
+BOOST_AUTO_TEST_CASE(fsbridge_pathtostring)
+{
+ std::string u8_str = "fs_tests_₿_🏃";
+ BOOST_CHECK_EQUAL(fs::PathToString(fs::PathFromString(u8_str)), u8_str);
+ BOOST_CHECK_EQUAL(fs::u8path(u8_str).u8string(), u8_str);
+ BOOST_CHECK_EQUAL(fs::PathFromString(u8_str).u8string(), u8_str);
+ BOOST_CHECK_EQUAL(fs::PathToString(fs::u8path(u8_str)), u8_str);
+#ifndef WIN32
+ // On non-windows systems, verify that arbitrary byte strings containing
+ // invalid UTF-8 can be round tripped successfully with PathToString and
+ // PathFromString. On non-windows systems, paths are just byte strings so
+ // these functions do not do any encoding. On windows, paths are Unicode,
+ // and these functions do encoding and decoding, so the behavior of this
+ // test would be undefined.
+ std::string invalid_u8_str = "\xf0";
+ BOOST_CHECK_EQUAL(invalid_u8_str.size(), 1);
+ BOOST_CHECK_EQUAL(fs::PathToString(fs::PathFromString(invalid_u8_str)), invalid_u8_str);
+#endif
+}
+
+BOOST_AUTO_TEST_CASE(fsbridge_stem)
+{
+ std::string test_filename = "fs_tests_₿_🏃.dat";
+ std::string expected_stem = "fs_tests_₿_🏃";
+ BOOST_CHECK_EQUAL(fs::PathToString(fs::PathFromString(test_filename).stem()), expected_stem);
+}
+
BOOST_AUTO_TEST_CASE(fsbridge_fstream)
{
fs::path tmpfolder = m_args.GetDataDirBase();
diff --git a/src/test/fuzz/banman.cpp b/src/test/fuzz/banman.cpp
index 561cc83c72..fbba25c404 100644
--- a/src/test/fuzz/banman.cpp
+++ b/src/test/fuzz/banman.cpp
@@ -48,7 +48,7 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
const bool start_with_corrupted_banlist{fuzzed_data_provider.ConsumeBool()};
bool force_read_and_write_to_err{false};
if (start_with_corrupted_banlist) {
- assert(WriteBinaryFile(banlist_file.string() + ".json",
+ assert(WriteBinaryFile(banlist_file + ".json",
fuzzed_data_provider.ConsumeRandomLengthString()));
} else {
force_read_and_write_to_err = fuzzed_data_provider.ConsumeBool();
@@ -111,5 +111,5 @@ FUZZ_TARGET_INIT(banman, initialize_banman)
assert(banmap == banmap_read);
}
}
- fs::remove(banlist_file.string() + ".json");
+ fs::remove(fs::PathToString(banlist_file + ".json"));
}
diff --git a/src/test/script_parse_tests.cpp b/src/test/script_parse_tests.cpp
index 5b8b6a725f..004c1a9a84 100644
--- a/src/test/script_parse_tests.cpp
+++ b/src/test/script_parse_tests.cpp
@@ -38,7 +38,6 @@ BOOST_AUTO_TEST_CASE(parse_script)
{"'17'", "023137"},
{"ELSE", "67"},
{"NOP10", "b9"},
- {"11111111111111111111", "00"},
};
std::string all_in;
std::string all_out;
@@ -49,6 +48,7 @@ BOOST_AUTO_TEST_CASE(parse_script)
}
BOOST_CHECK_EQUAL(HexStr(ParseScript(all_in)), all_out);
+ BOOST_CHECK_EXCEPTION(ParseScript("11111111111111111111"), std::runtime_error, HasReason("script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF"));
BOOST_CHECK_EXCEPTION(ParseScript("11111111111"), std::runtime_error, HasReason("script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF"));
BOOST_CHECK_EXCEPTION(ParseScript("OP_CHECKSIGADD"), std::runtime_error, HasReason("script parse error: unknown opcode"));
}
diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp
index 340ce33d91..15cba9e3e5 100644
--- a/src/test/settings_tests.cpp
+++ b/src/test/settings_tests.cpp
@@ -80,19 +80,19 @@ BOOST_AUTO_TEST_CASE(ReadWrite)
"dupe": "dupe"
})");
BOOST_CHECK(!util::ReadSettings(path, values, errors));
- std::vector<std::string> dup_keys = {strprintf("Found duplicate key dupe in settings file %s", path.string())};
+ std::vector<std::string> dup_keys = {strprintf("Found duplicate key dupe in settings file %s", fs::PathToString(path))};
BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), dup_keys.begin(), dup_keys.end());
// Check non-kv json files not allowed
WriteText(path, R"("non-kv")");
BOOST_CHECK(!util::ReadSettings(path, values, errors));
- std::vector<std::string> non_kv = {strprintf("Found non-object value \"non-kv\" in settings file %s", path.string())};
+ std::vector<std::string> non_kv = {strprintf("Found non-object value \"non-kv\" in settings file %s", fs::PathToString(path))};
BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), non_kv.begin(), non_kv.end());
// Check invalid json not allowed
WriteText(path, R"(invalid json)");
BOOST_CHECK(!util::ReadSettings(path, values, errors));
- std::vector<std::string> fail_parse = {strprintf("Unable to parse settings file %s", path.string())};
+ std::vector<std::string> fail_parse = {strprintf("Unable to parse settings file %s", fs::PathToString(path))};
BOOST_CHECK_EQUAL_COLLECTIONS(errors.begin(), errors.end(), fail_parse.begin(), fail_parse.end());
}
diff --git a/src/test/util/chainstate.h b/src/test/util/chainstate.h
index 81ea4c38f5..e95573022c 100644
--- a/src/test/util/chainstate.h
+++ b/src/test/util/chainstate.h
@@ -36,7 +36,7 @@ CreateAndActivateUTXOSnapshot(NodeContext& node, const fs::path root, F malleati
UniValue result = CreateUTXOSnapshot(node, node.chainman->ActiveChainstate(), auto_outfile);
BOOST_TEST_MESSAGE(
- "Wrote UTXO snapshot to " << snapshot_path.make_preferred().string() << ": " << result.write());
+ "Wrote UTXO snapshot to " << fs::PathToString(snapshot_path.make_preferred()) << ": " << result.write());
// Read the written snapshot in and then activate it.
//
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index ebefa9974e..a3c7564d76 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -91,8 +91,8 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
extra_args);
util::ThreadRename("test");
fs::create_directories(m_path_root);
- m_args.ForceSetArg("-datadir", m_path_root.string());
- gArgs.ForceSetArg("-datadir", m_path_root.string());
+ m_args.ForceSetArg("-datadir", fs::PathToString(m_path_root));
+ gArgs.ForceSetArg("-datadir", fs::PathToString(m_path_root));
gArgs.ClearPathCache();
{
SetupServerArgs(*m_node.args);
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index b5088d3c33..b1300d06ba 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -51,23 +51,23 @@ BOOST_AUTO_TEST_CASE(util_datadir)
{
// Use local args variable instead of m_args to avoid making assumptions about test setup
ArgsManager args;
- args.ForceSetArg("-datadir", m_path_root.string());
+ args.ForceSetArg("-datadir", fs::PathToString(m_path_root));
const fs::path dd_norm = args.GetDataDirBase();
- args.ForceSetArg("-datadir", dd_norm.string() + "/");
+ args.ForceSetArg("-datadir", fs::PathToString(dd_norm) + "/");
args.ClearPathCache();
BOOST_CHECK_EQUAL(dd_norm, args.GetDataDirBase());
- args.ForceSetArg("-datadir", dd_norm.string() + "/.");
+ args.ForceSetArg("-datadir", fs::PathToString(dd_norm) + "/.");
args.ClearPathCache();
BOOST_CHECK_EQUAL(dd_norm, args.GetDataDirBase());
- args.ForceSetArg("-datadir", dd_norm.string() + "/./");
+ args.ForceSetArg("-datadir", fs::PathToString(dd_norm) + "/./");
args.ClearPathCache();
BOOST_CHECK_EQUAL(dd_norm, args.GetDataDirBase());
- args.ForceSetArg("-datadir", dd_norm.string() + "/.//");
+ args.ForceSetArg("-datadir", fs::PathToString(dd_norm) + "/.//");
args.ClearPathCache();
BOOST_CHECK_EQUAL(dd_norm, args.GetDataDirBase());
}
@@ -1181,13 +1181,13 @@ BOOST_AUTO_TEST_CASE(util_ReadWriteSettings)
{
// Test writing setting.
TestArgsManager args1;
- args1.ForceSetArg("-datadir", m_path_root.string());
+ args1.ForceSetArg("-datadir", fs::PathToString(m_path_root));
args1.LockSettings([&](util::Settings& settings) { settings.rw_settings["name"] = "value"; });
args1.WriteSettingsFile();
// Test reading setting.
TestArgsManager args2;
- args2.ForceSetArg("-datadir", m_path_root.string());
+ args2.ForceSetArg("-datadir", fs::PathToString(m_path_root));
args2.ReadSettingsFile();
args2.LockSettings([&](util::Settings& settings) { BOOST_CHECK_EQUAL(settings.rw_settings["name"].get_str(), "value"); });
diff --git a/src/test/validation_chainstate_tests.cpp b/src/test/validation_chainstate_tests.cpp
index 726c9ebbb8..9bb08f774f 100644
--- a/src/test/validation_chainstate_tests.cpp
+++ b/src/test/validation_chainstate_tests.cpp
@@ -107,20 +107,21 @@ BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup)
curr_tip = ::g_best_block;
- CChainState* background_cs;
-
BOOST_CHECK_EQUAL(chainman.GetAll().size(), 2);
- for (CChainState* cs : chainman.GetAll()) {
- if (cs != &chainman.ActiveChainstate()) {
- background_cs = cs;
+
+ CChainState& background_cs{*[&] {
+ for (CChainState* cs : chainman.GetAll()) {
+ if (cs != &chainman.ActiveChainstate()) {
+ return cs;
+ }
}
- }
- BOOST_CHECK(background_cs);
+ assert(false);
+ }()};
// Create a block to append to the validation chain.
std::vector<CMutableTransaction> noTxns;
CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG;
- CBlock validation_block = this->CreateBlock(noTxns, scriptPubKey, *background_cs);
+ CBlock validation_block = this->CreateBlock(noTxns, scriptPubKey, background_cs);
auto pblock = std::make_shared<const CBlock>(validation_block);
BlockValidationState state;
CBlockIndex* pindex = nullptr;
@@ -133,15 +134,15 @@ BOOST_FIXTURE_TEST_CASE(chainstate_update_tip, TestChain100Setup)
LOCK(::cs_main);
bool checked = CheckBlock(*pblock, state, chainparams.GetConsensus());
BOOST_CHECK(checked);
- bool accepted = background_cs->AcceptBlock(
+ bool accepted = background_cs.AcceptBlock(
pblock, state, &pindex, true, nullptr, &newblock);
BOOST_CHECK(accepted);
}
// UpdateTip is called here
- bool block_added = background_cs->ActivateBestChain(state, pblock);
+ bool block_added = background_cs.ActivateBestChain(state, pblock);
// Ensure tip is as expected
- BOOST_CHECK_EQUAL(background_cs->m_chain.Tip()->GetBlockHash(), validation_block.GetHash());
+ BOOST_CHECK_EQUAL(background_cs.m_chain.Tip()->GetBlockHash(), validation_block.GetHash());
// g_best_block should be unchanged after adding a block to the background
// validation chain.
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index f9caf48df8..55618a5c57 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -318,7 +318,7 @@ TorController::TorController(struct event_base* _base, const std::string& tor_co
// Read service private key if cached
std::pair<bool,std::string> pkf = ReadBinaryFile(GetPrivateKeyFile());
if (pkf.first) {
- LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", GetPrivateKeyFile().string());
+ LogPrint(BCLog::TOR, "tor: Reading cached private key from %s\n", fs::PathToString(GetPrivateKeyFile()));
private_key = pkf.second;
}
}
@@ -356,9 +356,9 @@ void TorController::add_onion_cb(TorControlConnection& _conn, const TorControlRe
service = LookupNumeric(std::string(service_id+".onion"), Params().GetDefaultPort());
LogPrintf("tor: Got service ID %s, advertising service %s\n", service_id, service.ToString());
if (WriteBinaryFile(GetPrivateKeyFile(), private_key)) {
- LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", GetPrivateKeyFile().string());
+ LogPrint(BCLog::TOR, "tor: Cached service private key to %s\n", fs::PathToString(GetPrivateKeyFile()));
} else {
- LogPrintf("tor: Error writing service private key to %s\n", GetPrivateKeyFile().string());
+ LogPrintf("tor: Error writing service private key to %s\n", fs::PathToString(GetPrivateKeyFile()));
}
AddLocal(service, LOCAL_MANUAL);
// ... onion requested - keep connection open
@@ -508,7 +508,7 @@ void TorController::protocolinfo_cb(TorControlConnection& _conn, const TorContro
} else if (methods.count("SAFECOOKIE")) {
// Cookie: hexdump -e '32/1 "%02x""\n"' ~/.tor/control_auth_cookie
LogPrint(BCLog::TOR, "tor: Using SAFECOOKIE authentication, reading cookie authentication from %s\n", cookiefile);
- std::pair<bool,std::string> status_cookie = ReadBinaryFile(cookiefile, TOR_COOKIE_SIZE);
+ std::pair<bool,std::string> status_cookie = ReadBinaryFile(fs::PathFromString(cookiefile), TOR_COOKIE_SIZE);
if (status_cookie.first && status_cookie.second.size() == TOR_COOKIE_SIZE) {
// _conn.Command("AUTHENTICATE " + HexStr(status_cookie.second), std::bind(&TorController::auth_cb, this, std::placeholders::_1, std::placeholders::_2));
cookie = std::vector<uint8_t>(status_cookie.second.begin(), status_cookie.second.end());
diff --git a/src/univalue/.cirrus.yml b/src/univalue/.cirrus.yml
new file mode 100644
index 0000000000..f140fee12b
--- /dev/null
+++ b/src/univalue/.cirrus.yml
@@ -0,0 +1,44 @@
+env:
+ MAKEJOBS: "-j4"
+ RUN_TESTS: "true"
+ BASE_OUTDIR: "$CIRRUS_WORKING_DIR/out_dir_base"
+ DEBIAN_FRONTEND: "noninteractive"
+
+task:
+ container:
+ image: ubuntu:focal
+ cpu: 1
+ memory: 1G
+ greedy: true # https://medium.com/cirruslabs/introducing-greedy-container-instances-29aad06dc2b4
+
+ matrix:
+ - name: "gcc"
+ env:
+ CC: "gcc"
+ CXX: "g++"
+ APT_PKGS: "gcc"
+ - name: "clang"
+ env:
+ CC: "clang"
+ CXX: "clang++"
+ APT_PKGS: "clang"
+ - name: "mingw"
+ env:
+ CC: ""
+ CXX: ""
+ UNIVALUE_CONFIG: "--host=x86_64-w64-mingw32"
+ APT_PKGS: "g++-mingw-w64-x86-64 gcc-mingw-w64-x86-64 binutils-mingw-w64-x86-64"
+ RUN_TESTS: "false"
+
+ install_script:
+ - apt update
+ - apt install -y pkg-config build-essential libtool autotools-dev automake bsdmainutils
+ - apt install -y $APT_PKGS
+ autogen_script:
+ - ./autogen.sh
+ configure_script:
+ - ./configure --cache-file=config.cache --bindir=$BASE_OUTDIR/bin --libdir=$BASE_OUTDIR/lib $UNIVALUE_CONFIG
+ make_script:
+ - make $MAKEJOBS V=1
+ test_script:
+ - if [ "$RUN_TESTS" = "true" ]; then make $MAKEJOBS distcheck; fi
diff --git a/src/univalue/.travis.yml b/src/univalue/.travis.yml
deleted file mode 100644
index 43a1ed362e..0000000000
--- a/src/univalue/.travis.yml
+++ /dev/null
@@ -1,51 +0,0 @@
-language: cpp
-
-compiler:
- - clang
- - gcc
-
-os:
- - linux
- - osx
-
-sudo: false
-
-env:
- global:
- - MAKEJOBS=-j3
- - RUN_TESTS=true
- - BASE_OUTDIR=$TRAVIS_BUILD_DIR/out
-
-cache:
- apt: true
-
-addons:
- apt:
- packages:
- - pkg-config
-
-before_script:
- - if [ -n "$USE_SHELL" ]; then export CONFIG_SHELL="$USE_SHELL"; fi
- - test -n "$USE_SHELL" && eval '"$USE_SHELL" -c "./autogen.sh"' || ./autogen.sh
-
-script:
- - if [ -n "$UNIVALUE_CONFIG" ]; then unset CC; unset CXX; fi
- - OUTDIR=$BASE_OUTDIR/$TRAVIS_PULL_REQUEST/$TRAVIS_JOB_NUMBER-$HOST
- - UNIVALUE_CONFIG_ALL="--prefix=$TRAVIS_BUILD_DIR/depends/$HOST --bindir=$OUTDIR/bin --libdir=$OUTDIR/lib"
- - ./configure --cache-file=config.cache $UNIVALUE_CONFIG_ALL $UNIVALUE_CONFIG || ( cat config.log && false)
- - make -s $MAKEJOBS $GOAL || ( echo "Build failure. Verbose build follows." && make $GOAL ; false )
- - export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib
- - if [ "$RUN_TESTS" = "true" ]; then make $MAKEJOBS distcheck; fi
-
-matrix:
- fast_finish: true
- include:
- - os: linux
- compiler: gcc
- env: UNIVALUE_CONFIG=--host=x86_64-w64-mingw32 RUN_TESTS=false
- addons:
- apt:
- packages:
- - g++-mingw-w64-x86-64
- - gcc-mingw-w64-x86-64
- - binutils-mingw-w64-x86-64
diff --git a/src/univalue/Makefile.am b/src/univalue/Makefile.am
index 0f5ba59954..476f14b922 100644
--- a/src/univalue/Makefile.am
+++ b/src/univalue/Makefile.am
@@ -1,20 +1,17 @@
+include sources.mk
ACLOCAL_AMFLAGS = -I build-aux/m4
-.PHONY: gen
+.PHONY: gen FORCE
.INTERMEDIATE: $(GENBIN)
-include_HEADERS = include/univalue.h
-noinst_HEADERS = lib/univalue_escapes.h lib/univalue_utffilter.h
+include_HEADERS = $(UNIVALUE_DIST_HEADERS_INT)
+noinst_HEADERS = $(UNIVALUE_LIB_HEADERS_INT)
lib_LTLIBRARIES = libunivalue.la
pkgconfigdir = $(libdir)/pkgconfig
pkgconfig_DATA = pc/libunivalue.pc
-libunivalue_la_SOURCES = \
- lib/univalue.cpp \
- lib/univalue_get.cpp \
- lib/univalue_read.cpp \
- lib/univalue_write.cpp
+libunivalue_la_SOURCES = $(UNIVALUE_LIB_SOURCES_INT)
libunivalue_la_LDFLAGS = \
-version-info $(LIBUNIVALUE_CURRENT):$(LIBUNIVALUE_REVISION):$(LIBUNIVALUE_AGE) \
@@ -30,89 +27,32 @@ $(GENBIN): $(GEN_SRCS)
@echo Building $@
$(AM_V_at)c++ -I$(top_srcdir)/include -o $@ $<
-gen: lib/univalue_escapes.h $(GENBIN)
- @echo Updating $<
+gen: $(GENBIN) FORCE
+ @echo Updating lib/univalue_escapes.h
$(AM_V_at)$(GENBIN) > lib/univalue_escapes.h
noinst_PROGRAMS = $(TESTS) test/test_json
-TEST_DATA_DIR=test
-
-test_unitester_SOURCES = test/unitester.cpp
+test_unitester_SOURCES = $(UNIVALUE_TEST_UNITESTER_INT)
test_unitester_LDADD = libunivalue.la
-test_unitester_CXXFLAGS = -I$(top_srcdir)/include -DJSON_TEST_SRC=\"$(srcdir)/$(TEST_DATA_DIR)\"
+test_unitester_CXXFLAGS = -I$(top_srcdir)/include -DJSON_TEST_SRC=\"$(srcdir)/$(UNIVALUE_TEST_DATA_DIR_INT)\"
test_unitester_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
-test_test_json_SOURCES = test/test_json.cpp
+test_test_json_SOURCES = $(UNIVALUE_TEST_JSON_INT)
test_test_json_LDADD = libunivalue.la
test_test_json_CXXFLAGS = -I$(top_srcdir)/include
test_test_json_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
-test_no_nul_SOURCES = test/no_nul.cpp
+test_no_nul_SOURCES = $(UNIVALUE_TEST_NO_NUL_INT)
test_no_nul_LDADD = libunivalue.la
test_no_nul_CXXFLAGS = -I$(top_srcdir)/include
test_no_nul_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
-test_object_SOURCES = test/object.cpp
+test_object_SOURCES = $(UNIVALUE_TEST_OBJECT_INT)
test_object_LDADD = libunivalue.la
test_object_CXXFLAGS = -I$(top_srcdir)/include
test_object_LDFLAGS = -static $(LIBTOOL_APP_LDFLAGS)
-TEST_FILES = \
- $(TEST_DATA_DIR)/fail10.json \
- $(TEST_DATA_DIR)/fail11.json \
- $(TEST_DATA_DIR)/fail12.json \
- $(TEST_DATA_DIR)/fail13.json \
- $(TEST_DATA_DIR)/fail14.json \
- $(TEST_DATA_DIR)/fail15.json \
- $(TEST_DATA_DIR)/fail16.json \
- $(TEST_DATA_DIR)/fail17.json \
- $(TEST_DATA_DIR)/fail18.json \
- $(TEST_DATA_DIR)/fail19.json \
- $(TEST_DATA_DIR)/fail1.json \
- $(TEST_DATA_DIR)/fail20.json \
- $(TEST_DATA_DIR)/fail21.json \
- $(TEST_DATA_DIR)/fail22.json \
- $(TEST_DATA_DIR)/fail23.json \
- $(TEST_DATA_DIR)/fail24.json \
- $(TEST_DATA_DIR)/fail25.json \
- $(TEST_DATA_DIR)/fail26.json \
- $(TEST_DATA_DIR)/fail27.json \
- $(TEST_DATA_DIR)/fail28.json \
- $(TEST_DATA_DIR)/fail29.json \
- $(TEST_DATA_DIR)/fail2.json \
- $(TEST_DATA_DIR)/fail30.json \
- $(TEST_DATA_DIR)/fail31.json \
- $(TEST_DATA_DIR)/fail32.json \
- $(TEST_DATA_DIR)/fail33.json \
- $(TEST_DATA_DIR)/fail34.json \
- $(TEST_DATA_DIR)/fail35.json \
- $(TEST_DATA_DIR)/fail36.json \
- $(TEST_DATA_DIR)/fail37.json \
- $(TEST_DATA_DIR)/fail38.json \
- $(TEST_DATA_DIR)/fail39.json \
- $(TEST_DATA_DIR)/fail40.json \
- $(TEST_DATA_DIR)/fail41.json \
- $(TEST_DATA_DIR)/fail42.json \
- $(TEST_DATA_DIR)/fail44.json \
- $(TEST_DATA_DIR)/fail45.json \
- $(TEST_DATA_DIR)/fail3.json \
- $(TEST_DATA_DIR)/fail4.json \
- $(TEST_DATA_DIR)/fail5.json \
- $(TEST_DATA_DIR)/fail6.json \
- $(TEST_DATA_DIR)/fail7.json \
- $(TEST_DATA_DIR)/fail8.json \
- $(TEST_DATA_DIR)/fail9.json \
- $(TEST_DATA_DIR)/pass1.json \
- $(TEST_DATA_DIR)/pass2.json \
- $(TEST_DATA_DIR)/pass3.json \
- $(TEST_DATA_DIR)/pass4.json \
- $(TEST_DATA_DIR)/round1.json \
- $(TEST_DATA_DIR)/round2.json \
- $(TEST_DATA_DIR)/round3.json \
- $(TEST_DATA_DIR)/round4.json \
- $(TEST_DATA_DIR)/round5.json \
- $(TEST_DATA_DIR)/round6.json \
- $(TEST_DATA_DIR)/round7.json
-
-EXTRA_DIST=$(TEST_FILES) $(GEN_SRCS)
+TEST_FILES = $(UNIVALUE_TEST_FILES_INT)
+
+EXTRA_DIST=$(UNIVALUE_TEST_FILES_INT) $(GEN_SRCS)
diff --git a/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4 b/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4
new file mode 100644
index 0000000000..f7e5137003
--- /dev/null
+++ b/src/univalue/build-aux/m4/ax_cxx_compile_stdcxx.m4
@@ -0,0 +1,962 @@
+# ===========================================================================
+# https://www.gnu.org/software/autoconf-archive/ax_cxx_compile_stdcxx.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_CXX_COMPILE_STDCXX(VERSION, [ext|noext], [mandatory|optional])
+#
+# DESCRIPTION
+#
+# Check for baseline language coverage in the compiler for the specified
+# version of the C++ standard. If necessary, add switches to CXX and
+# CXXCPP to enable support. VERSION may be '11' (for the C++11 standard)
+# or '14' (for the C++14 standard).
+#
+# The second argument, if specified, indicates whether you insist on an
+# extended mode (e.g. -std=gnu++11) or a strict conformance mode (e.g.
+# -std=c++11). If neither is specified, you get whatever works, with
+# preference for no added switch, and then for an extended mode.
+#
+# The third argument, if specified 'mandatory' or if left unspecified,
+# indicates that baseline support for the specified C++ standard is
+# required and that the macro should error out if no mode with that
+# support is found. If specified 'optional', then configuration proceeds
+# regardless, after defining HAVE_CXX${VERSION} if and only if a
+# supporting mode is found.
+#
+# LICENSE
+#
+# Copyright (c) 2008 Benjamin Kosnik <bkoz@redhat.com>
+# Copyright (c) 2012 Zack Weinberg <zackw@panix.com>
+# Copyright (c) 2013 Roy Stogner <roystgnr@ices.utexas.edu>
+# Copyright (c) 2014, 2015 Google Inc.; contributed by Alexey Sokolov <sokolov@google.com>
+# Copyright (c) 2015 Paul Norman <penorman@mac.com>
+# Copyright (c) 2015 Moritz Klammler <moritz@klammler.eu>
+# Copyright (c) 2016, 2018 Krzesimir Nowak <qdlacz@gmail.com>
+# Copyright (c) 2019 Enji Cooper <yaneurabeya@gmail.com>
+# Copyright (c) 2020 Jason Merrill <jason@redhat.com>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 12
+
+dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
+dnl (serial version number 13).
+
+AC_DEFUN([AX_CXX_COMPILE_STDCXX], [dnl
+ m4_if([$1], [11], [ax_cxx_compile_alternatives="11 0x"],
+ [$1], [14], [ax_cxx_compile_alternatives="14 1y"],
+ [$1], [17], [ax_cxx_compile_alternatives="17 1z"],
+ [m4_fatal([invalid first argument `$1' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$2], [], [],
+ [$2], [ext], [],
+ [$2], [noext], [],
+ [m4_fatal([invalid second argument `$2' to AX_CXX_COMPILE_STDCXX])])dnl
+ m4_if([$3], [], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [mandatory], [ax_cxx_compile_cxx$1_required=true],
+ [$3], [optional], [ax_cxx_compile_cxx$1_required=false],
+ [m4_fatal([invalid third argument `$3' to AX_CXX_COMPILE_STDCXX])])
+ AC_LANG_PUSH([C++])dnl
+ ac_success=no
+
+ m4_if([$2], [], [dnl
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features by default,
+ ax_cv_cxx_compile_cxx$1,
+ [AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [ax_cv_cxx_compile_cxx$1=yes],
+ [ax_cv_cxx_compile_cxx$1=no])])
+ if test x$ax_cv_cxx_compile_cxx$1 = xyes; then
+ ac_success=yes
+ fi])
+
+ m4_if([$2], [noext], [], [dnl
+ if test x$ac_success = xno; then
+ for alternative in ${ax_cxx_compile_alternatives}; do
+ switch="-std=gnu++${alternative}"
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ if test -n "$CXXCPP" ; then
+ CXXCPP="$CXXCPP $switch"
+ fi
+ ac_success=yes
+ break
+ fi
+ done
+ fi])
+
+ m4_if([$2], [ext], [], [dnl
+ if test x$ac_success = xno; then
+ dnl HP's aCC needs +std=c++11 according to:
+ dnl http://h21007.www2.hp.com/portal/download/files/unprot/aCxx/PDF_Release_Notes/769149-001.pdf
+ dnl Cray's crayCC needs "-h std=c++11"
+ for alternative in ${ax_cxx_compile_alternatives}; do
+ for switch in -std=c++${alternative} +std=c++${alternative} "-h std=c++${alternative}"; do
+ cachevar=AS_TR_SH([ax_cv_cxx_compile_cxx$1_$switch])
+ AC_CACHE_CHECK(whether $CXX supports C++$1 features with $switch,
+ $cachevar,
+ [ac_save_CXX="$CXX"
+ CXX="$CXX $switch"
+ AC_COMPILE_IFELSE([AC_LANG_SOURCE([_AX_CXX_COMPILE_STDCXX_testbody_$1])],
+ [eval $cachevar=yes],
+ [eval $cachevar=no])
+ CXX="$ac_save_CXX"])
+ if eval test x\$$cachevar = xyes; then
+ CXX="$CXX $switch"
+ if test -n "$CXXCPP" ; then
+ CXXCPP="$CXXCPP $switch"
+ fi
+ ac_success=yes
+ break
+ fi
+ done
+ if test x$ac_success = xyes; then
+ break
+ fi
+ done
+ fi])
+ AC_LANG_POP([C++])
+ if test x$ax_cxx_compile_cxx$1_required = xtrue; then
+ if test x$ac_success = xno; then
+ AC_MSG_ERROR([*** A compiler with support for C++$1 language features is required.])
+ fi
+ fi
+ if test x$ac_success = xno; then
+ HAVE_CXX$1=0
+ AC_MSG_NOTICE([No compiler with C++$1 support was found])
+ else
+ HAVE_CXX$1=1
+ AC_DEFINE(HAVE_CXX$1,1,
+ [define if the compiler supports basic C++$1 syntax])
+ fi
+ AC_SUBST(HAVE_CXX$1)
+])
+
+
+dnl Test body for checking C++11 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_11],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+)
+
+
+dnl Test body for checking C++14 support
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_14],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
+)
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_17],
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_11
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_14
+ _AX_CXX_COMPILE_STDCXX_testbody_new_in_17
+)
+
+dnl Tests for new features in C++11
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
+
+// If the compiler admits that it is not ready for C++11, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201103L
+
+#error "This is not a C++11 compiler"
+
+#else
+
+namespace cxx11
+{
+
+ namespace test_static_assert
+ {
+
+ template <typename T>
+ struct check
+ {
+ static_assert(sizeof(int) <= sizeof(T), "not big enough");
+ };
+
+ }
+
+ namespace test_final_override
+ {
+
+ struct Base
+ {
+ virtual ~Base() {}
+ virtual void f() {}
+ };
+
+ struct Derived : public Base
+ {
+ virtual ~Derived() override {}
+ virtual void f() override {}
+ };
+
+ }
+
+ namespace test_double_right_angle_brackets
+ {
+
+ template < typename T >
+ struct check {};
+
+ typedef check<void> single_type;
+ typedef check<check<void>> double_type;
+ typedef check<check<check<void>>> triple_type;
+ typedef check<check<check<check<void>>>> quadruple_type;
+
+ }
+
+ namespace test_decltype
+ {
+
+ int
+ f()
+ {
+ int a = 1;
+ decltype(a) b = 2;
+ return a + b;
+ }
+
+ }
+
+ namespace test_type_deduction
+ {
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static const bool value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static const bool value = true;
+ };
+
+ template < typename T1, typename T2 >
+ auto
+ add(T1 a1, T2 a2) -> decltype(a1 + a2)
+ {
+ return a1 + a2;
+ }
+
+ int
+ test(const int c, volatile int v)
+ {
+ static_assert(is_same<int, decltype(0)>::value == true, "");
+ static_assert(is_same<int, decltype(c)>::value == false, "");
+ static_assert(is_same<int, decltype(v)>::value == false, "");
+ auto ac = c;
+ auto av = v;
+ auto sumi = ac + av + 'x';
+ auto sumf = ac + av + 1.0;
+ static_assert(is_same<int, decltype(ac)>::value == true, "");
+ static_assert(is_same<int, decltype(av)>::value == true, "");
+ static_assert(is_same<int, decltype(sumi)>::value == true, "");
+ static_assert(is_same<int, decltype(sumf)>::value == false, "");
+ static_assert(is_same<int, decltype(add(c, v))>::value == true, "");
+ return (sumf > 0.0) ? sumi : add(c, v);
+ }
+
+ }
+
+ namespace test_noexcept
+ {
+
+ int f() { return 0; }
+ int g() noexcept { return 0; }
+
+ static_assert(noexcept(f()) == false, "");
+ static_assert(noexcept(g()) == true, "");
+
+ }
+
+ namespace test_constexpr
+ {
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c_r(const CharT *const s, const unsigned long acc) noexcept
+ {
+ return *s ? strlen_c_r(s + 1, acc + 1) : acc;
+ }
+
+ template < typename CharT >
+ unsigned long constexpr
+ strlen_c(const CharT *const s) noexcept
+ {
+ return strlen_c_r(s, 0UL);
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("1") == 1UL, "");
+ static_assert(strlen_c("example") == 7UL, "");
+ static_assert(strlen_c("another\0example") == 7UL, "");
+
+ }
+
+ namespace test_rvalue_references
+ {
+
+ template < int N >
+ struct answer
+ {
+ static constexpr int value = N;
+ };
+
+ answer<1> f(int&) { return answer<1>(); }
+ answer<2> f(const int&) { return answer<2>(); }
+ answer<3> f(int&&) { return answer<3>(); }
+
+ void
+ test()
+ {
+ int i = 0;
+ const int c = 0;
+ static_assert(decltype(f(i))::value == 1, "");
+ static_assert(decltype(f(c))::value == 2, "");
+ static_assert(decltype(f(0))::value == 3, "");
+ }
+
+ }
+
+ namespace test_uniform_initialization
+ {
+
+ struct test
+ {
+ static const int zero {};
+ static const int one {1};
+ };
+
+ static_assert(test::zero == 0, "");
+ static_assert(test::one == 1, "");
+
+ }
+
+ namespace test_lambdas
+ {
+
+ void
+ test1()
+ {
+ auto lambda1 = [](){};
+ auto lambda2 = lambda1;
+ lambda1();
+ lambda2();
+ }
+
+ int
+ test2()
+ {
+ auto a = [](int i, int j){ return i + j; }(1, 2);
+ auto b = []() -> int { return '0'; }();
+ auto c = [=](){ return a + b; }();
+ auto d = [&](){ return c; }();
+ auto e = [a, &b](int x) mutable {
+ const auto identity = [](int y){ return y; };
+ for (auto i = 0; i < a; ++i)
+ a += b--;
+ return x + identity(a + b);
+ }(0);
+ return a + b + c + d + e;
+ }
+
+ int
+ test3()
+ {
+ const auto nullary = [](){ return 0; };
+ const auto unary = [](int x){ return x; };
+ using nullary_t = decltype(nullary);
+ using unary_t = decltype(unary);
+ const auto higher1st = [](nullary_t f){ return f(); };
+ const auto higher2nd = [unary](nullary_t f1){
+ return [unary, f1](unary_t f2){ return f2(unary(f1())); };
+ };
+ return higher1st(nullary) + higher2nd(nullary)(unary);
+ }
+
+ }
+
+ namespace test_variadic_templates
+ {
+
+ template <int...>
+ struct sum;
+
+ template <int N0, int... N1toN>
+ struct sum<N0, N1toN...>
+ {
+ static constexpr auto value = N0 + sum<N1toN...>::value;
+ };
+
+ template <>
+ struct sum<>
+ {
+ static constexpr auto value = 0;
+ };
+
+ static_assert(sum<>::value == 0, "");
+ static_assert(sum<1>::value == 1, "");
+ static_assert(sum<23>::value == 23, "");
+ static_assert(sum<1, 2>::value == 3, "");
+ static_assert(sum<5, 5, 11>::value == 21, "");
+ static_assert(sum<2, 3, 5, 7, 11, 13>::value == 41, "");
+
+ }
+
+ // http://stackoverflow.com/questions/13728184/template-aliases-and-sfinae
+ // Clang 3.1 fails with headers of libstd++ 4.8.3 when using std::function
+ // because of this.
+ namespace test_template_alias_sfinae
+ {
+
+ struct foo {};
+
+ template<typename T>
+ using member = typename T::member_type;
+
+ template<typename T>
+ void func(...) {}
+
+ template<typename T>
+ void func(member<T>*) {}
+
+ void test();
+
+ void test() { func<foo>(0); }
+
+ }
+
+} // namespace cxx11
+
+#endif // __cplusplus >= 201103L
+
+]])
+
+
+dnl Tests for new features in C++14
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
+
+// If the compiler admits that it is not ready for C++14, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201402L
+
+#error "This is not a C++14 compiler"
+
+#else
+
+namespace cxx14
+{
+
+ namespace test_polymorphic_lambdas
+ {
+
+ int
+ test()
+ {
+ const auto lambda = [](auto&&... args){
+ const auto istiny = [](auto x){
+ return (sizeof(x) == 1UL) ? 1 : 0;
+ };
+ const int aretiny[] = { istiny(args)... };
+ return aretiny[0];
+ };
+ return lambda(1, 1L, 1.0f, '1');
+ }
+
+ }
+
+ namespace test_binary_literals
+ {
+
+ constexpr auto ivii = 0b0000000000101010;
+ static_assert(ivii == 42, "wrong value");
+
+ }
+
+ namespace test_generalized_constexpr
+ {
+
+ template < typename CharT >
+ constexpr unsigned long
+ strlen_c(const CharT *const s) noexcept
+ {
+ auto length = 0UL;
+ for (auto p = s; *p; ++p)
+ ++length;
+ return length;
+ }
+
+ static_assert(strlen_c("") == 0UL, "");
+ static_assert(strlen_c("x") == 1UL, "");
+ static_assert(strlen_c("test") == 4UL, "");
+ static_assert(strlen_c("another\0test") == 7UL, "");
+
+ }
+
+ namespace test_lambda_init_capture
+ {
+
+ int
+ test()
+ {
+ auto x = 0;
+ const auto lambda1 = [a = x](int b){ return a + b; };
+ const auto lambda2 = [a = lambda1(x)](){ return a; };
+ return lambda2();
+ }
+
+ }
+
+ namespace test_digit_separators
+ {
+
+ constexpr auto ten_million = 100'000'000;
+ static_assert(ten_million == 100000000, "");
+
+ }
+
+ namespace test_return_type_deduction
+ {
+
+ auto f(int& x) { return x; }
+ decltype(auto) g(int& x) { return x; }
+
+ template < typename T1, typename T2 >
+ struct is_same
+ {
+ static constexpr auto value = false;
+ };
+
+ template < typename T >
+ struct is_same<T, T>
+ {
+ static constexpr auto value = true;
+ };
+
+ int
+ test()
+ {
+ auto x = 0;
+ static_assert(is_same<int, decltype(f(x))>::value, "");
+ static_assert(is_same<int&, decltype(g(x))>::value, "");
+ return x;
+ }
+
+ }
+
+} // namespace cxx14
+
+#endif // __cplusplus >= 201402L
+
+]])
+
+
+dnl Tests for new features in C++17
+
+m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[
+
+// If the compiler admits that it is not ready for C++17, why torture it?
+// Hopefully, this will speed up the test.
+
+#ifndef __cplusplus
+
+#error "This is not a C++ compiler"
+
+#elif __cplusplus < 201703L
+
+#error "This is not a C++17 compiler"
+
+#else
+
+#include <initializer_list>
+#include <utility>
+#include <type_traits>
+
+namespace cxx17
+{
+
+ namespace test_constexpr_lambdas
+ {
+
+ constexpr int foo = [](){return 42;}();
+
+ }
+
+ namespace test::nested_namespace::definitions
+ {
+
+ }
+
+ namespace test_fold_expression
+ {
+
+ template<typename... Args>
+ int multiply(Args... args)
+ {
+ return (args * ... * 1);
+ }
+
+ template<typename... Args>
+ bool all(Args... args)
+ {
+ return (args && ...);
+ }
+
+ }
+
+ namespace test_extended_static_assert
+ {
+
+ static_assert (true);
+
+ }
+
+ namespace test_auto_brace_init_list
+ {
+
+ auto foo = {5};
+ auto bar {5};
+
+ static_assert(std::is_same<std::initializer_list<int>, decltype(foo)>::value);
+ static_assert(std::is_same<int, decltype(bar)>::value);
+ }
+
+ namespace test_typename_in_template_template_parameter
+ {
+
+ template<template<typename> typename X> struct D;
+
+ }
+
+ namespace test_fallthrough_nodiscard_maybe_unused_attributes
+ {
+
+ int f1()
+ {
+ return 42;
+ }
+
+ [[nodiscard]] int f2()
+ {
+ [[maybe_unused]] auto unused = f1();
+
+ switch (f1())
+ {
+ case 17:
+ f1();
+ [[fallthrough]];
+ case 42:
+ f1();
+ }
+ return f1();
+ }
+
+ }
+
+ namespace test_extended_aggregate_initialization
+ {
+
+ struct base1
+ {
+ int b1, b2 = 42;
+ };
+
+ struct base2
+ {
+ base2() {
+ b3 = 42;
+ }
+ int b3;
+ };
+
+ struct derived : base1, base2
+ {
+ int d;
+ };
+
+ derived d1 {{1, 2}, {}, 4}; // full initialization
+ derived d2 {{}, {}, 4}; // value-initialized bases
+
+ }
+
+ namespace test_general_range_based_for_loop
+ {
+
+ struct iter
+ {
+ int i;
+
+ int& operator* ()
+ {
+ return i;
+ }
+
+ const int& operator* () const
+ {
+ return i;
+ }
+
+ iter& operator++()
+ {
+ ++i;
+ return *this;
+ }
+ };
+
+ struct sentinel
+ {
+ int i;
+ };
+
+ bool operator== (const iter& i, const sentinel& s)
+ {
+ return i.i == s.i;
+ }
+
+ bool operator!= (const iter& i, const sentinel& s)
+ {
+ return !(i == s);
+ }
+
+ struct range
+ {
+ iter begin() const
+ {
+ return {0};
+ }
+
+ sentinel end() const
+ {
+ return {5};
+ }
+ };
+
+ void f()
+ {
+ range r {};
+
+ for (auto i : r)
+ {
+ [[maybe_unused]] auto v = i;
+ }
+ }
+
+ }
+
+ namespace test_lambda_capture_asterisk_this_by_value
+ {
+
+ struct t
+ {
+ int i;
+ int foo()
+ {
+ return [*this]()
+ {
+ return i;
+ }();
+ }
+ };
+
+ }
+
+ namespace test_enum_class_construction
+ {
+
+ enum class byte : unsigned char
+ {};
+
+ byte foo {42};
+
+ }
+
+ namespace test_constexpr_if
+ {
+
+ template <bool cond>
+ int f ()
+ {
+ if constexpr(cond)
+ {
+ return 13;
+ }
+ else
+ {
+ return 42;
+ }
+ }
+
+ }
+
+ namespace test_selection_statement_with_initializer
+ {
+
+ int f()
+ {
+ return 13;
+ }
+
+ int f2()
+ {
+ if (auto i = f(); i > 0)
+ {
+ return 3;
+ }
+
+ switch (auto i = f(); i + 4)
+ {
+ case 17:
+ return 2;
+
+ default:
+ return 1;
+ }
+ }
+
+ }
+
+ namespace test_template_argument_deduction_for_class_templates
+ {
+
+ template <typename T1, typename T2>
+ struct pair
+ {
+ pair (T1 p1, T2 p2)
+ : m1 {p1},
+ m2 {p2}
+ {}
+
+ T1 m1;
+ T2 m2;
+ };
+
+ void f()
+ {
+ [[maybe_unused]] auto p = pair{13, 42u};
+ }
+
+ }
+
+ namespace test_non_type_auto_template_parameters
+ {
+
+ template <auto n>
+ struct B
+ {};
+
+ B<5> b1;
+ B<'a'> b2;
+
+ }
+
+ namespace test_structured_bindings
+ {
+
+ int arr[2] = { 1, 2 };
+ std::pair<int, int> pr = { 1, 2 };
+
+ auto f1() -> int(&)[2]
+ {
+ return arr;
+ }
+
+ auto f2() -> std::pair<int, int>&
+ {
+ return pr;
+ }
+
+ struct S
+ {
+ int x1 : 2;
+ volatile double y1;
+ };
+
+ S f3()
+ {
+ return {};
+ }
+
+ auto [ x1, y1 ] = f1();
+ auto& [ xr1, yr1 ] = f1();
+ auto [ x2, y2 ] = f2();
+ auto& [ xr2, yr2 ] = f2();
+ const auto [ x3, y3 ] = f3();
+
+ }
+
+ namespace test_exception_spec_type_system
+ {
+
+ struct Good {};
+ struct Bad {};
+
+ void g1() noexcept;
+ void g2();
+
+ template<typename T>
+ Bad
+ f(T*, T*);
+
+ template<typename T1, typename T2>
+ Good
+ f(T1*, T2*);
+
+ static_assert (std::is_same_v<Good, decltype(f(g1, g2))>);
+
+ }
+
+ namespace test_inline_variables
+ {
+
+ template<class T> void f(T)
+ {}
+
+ template<class T> inline T g(T)
+ {
+ return T{};
+ }
+
+ template<> inline void f<>(int)
+ {}
+
+ template<> int g<>(int)
+ {
+ return 5;
+ }
+
+ }
+
+} // namespace cxx17
+
+#endif // __cplusplus < 201703L
+
+]])
diff --git a/src/univalue/configure.ac b/src/univalue/configure.ac
index 8298332ac1..495b25a53d 100644
--- a/src/univalue/configure.ac
+++ b/src/univalue/configure.ac
@@ -1,7 +1,7 @@
m4_define([libunivalue_major_version], [1])
m4_define([libunivalue_minor_version], [1])
-m4_define([libunivalue_micro_version], [3])
-m4_define([libunivalue_interface_age], [3])
+m4_define([libunivalue_micro_version], [4])
+m4_define([libunivalue_interface_age], [4])
# If you need a modifier for the version number.
# Normally empty, but can be used to make "fixup" releases.
m4_define([libunivalue_extraversion], [])
@@ -14,7 +14,7 @@ m4_define([libunivalue_age], [m4_eval(libunivalue_binary_age - libunivalue_inter
m4_define([libunivalue_version], [libunivalue_major_version().libunivalue_minor_version().libunivalue_micro_version()libunivalue_extraversion()])
-AC_INIT([univalue], [1.0.3],
+AC_INIT([univalue], [1.0.4],
[http://github.com/jgarzik/univalue/])
dnl make the compilation flags quiet unless V=1 is used
@@ -45,6 +45,9 @@ AC_SUBST(LIBUNIVALUE_AGE)
LT_INIT
LT_LANG([C++])
+dnl Require C++11 compiler (no GNU extensions)
+AX_CXX_COMPILE_STDCXX([11], [noext], [mandatory], [nodefault])
+
case $host in
*mingw*)
LIBTOOL_APP_LDFLAGS="$LIBTOOL_APP_LDFLAGS -all-static"
diff --git a/src/univalue/gen/gen.cpp b/src/univalue/gen/gen.cpp
index 85fe20924a..b8a6c73f4e 100644
--- a/src/univalue/gen/gen.cpp
+++ b/src/univalue/gen/gen.cpp
@@ -1,6 +1,6 @@
// Copyright 2014 BitPay Inc.
// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
//
// To re-create univalue_escapes.h:
@@ -45,7 +45,7 @@ static void outputEscape()
for (unsigned int i = 0; i < 256; i++) {
if (escapes[i].empty()) {
- printf("\tNULL,\n");
+ printf("\tnullptr,\n");
} else {
printf("\t\"");
diff --git a/src/univalue/include/univalue.h b/src/univalue/include/univalue.h
index 048e162f7d..fc5cf402be 100644
--- a/src/univalue/include/univalue.h
+++ b/src/univalue/include/univalue.h
@@ -1,7 +1,7 @@
// Copyright 2014 BitPay Inc.
// Copyright 2015 Bitcoin Core Developers
// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
#ifndef __UNIVALUE_H__
#define __UNIVALUE_H__
@@ -14,8 +14,6 @@
#include <map>
#include <cassert>
-#include <sstream> // .get_int64()
-
class UniValue {
public:
enum VType { VNULL, VOBJ, VARR, VSTR, VNUM, VBOOL, };
diff --git a/src/univalue/lib/univalue.cpp b/src/univalue/lib/univalue.cpp
index 4c9c15d63e..c4e59fae74 100644
--- a/src/univalue/lib/univalue.cpp
+++ b/src/univalue/lib/univalue.cpp
@@ -1,7 +1,7 @@
// Copyright 2014 BitPay Inc.
// Copyright 2015 Bitcoin Core Developers
// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
#include <stdint.h>
#include <iomanip>
@@ -178,17 +178,19 @@ bool UniValue::findKey(const std::string& key, size_t& retIdx) const
bool UniValue::checkObject(const std::map<std::string,UniValue::VType>& t) const
{
- if (typ != VOBJ)
+ if (typ != VOBJ) {
return false;
+ }
- for (std::map<std::string,UniValue::VType>::const_iterator it = t.begin();
- it != t.end(); ++it) {
+ for (const auto& object: t) {
size_t idx = 0;
- if (!findKey(it->first, idx))
+ if (!findKey(object.first, idx)) {
return false;
+ }
- if (values.at(idx).getType() != it->second)
+ if (values.at(idx).getType() != object.second) {
return false;
+ }
}
return true;
@@ -228,7 +230,7 @@ const char *uvTypeName(UniValue::VType t)
}
// not reached
- return NULL;
+ return nullptr;
}
const UniValue& find_value(const UniValue& obj, const std::string& name)
diff --git a/src/univalue/lib/univalue_escapes.h b/src/univalue/lib/univalue_escapes.h
index 74596aab6d..3f714f8e5b 100644
--- a/src/univalue/lib/univalue_escapes.h
+++ b/src/univalue/lib/univalue_escapes.h
@@ -34,229 +34,229 @@ static const char *escapes[256] = {
"\\u001d",
"\\u001e",
"\\u001f",
- NULL,
- NULL,
+ nullptr,
+ nullptr,
"\\\"",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
"\\\\",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
"\\u007f",
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
- NULL,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
+ nullptr,
};
#endif // BITCOIN_UNIVALUE_UNIVALUE_ESCAPES_H
diff --git a/src/univalue/lib/univalue_get.cpp b/src/univalue/lib/univalue_get.cpp
index 0ad6146545..5af89a3561 100644
--- a/src/univalue/lib/univalue_get.cpp
+++ b/src/univalue/lib/univalue_get.cpp
@@ -1,7 +1,7 @@
// Copyright 2014 BitPay Inc.
// Copyright 2015 Bitcoin Core Developers
// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
#include <stdint.h>
#include <errno.h>
@@ -11,6 +11,7 @@
#include <vector>
#include <limits>
#include <string>
+#include <sstream>
#include "univalue.h"
@@ -31,7 +32,7 @@ bool ParseInt32(const std::string& str, int32_t *out)
{
if (!ParsePrechecks(str))
return false;
- char *endp = NULL;
+ char *endp = nullptr;
errno = 0; // strtol will not set errno if valid
long int n = strtol(str.c_str(), &endp, 10);
if(out) *out = (int32_t)n;
@@ -47,7 +48,7 @@ bool ParseInt64(const std::string& str, int64_t *out)
{
if (!ParsePrechecks(str))
return false;
- char *endp = NULL;
+ char *endp = nullptr;
errno = 0; // strtoll will not set errno if valid
long long int n = strtoll(str.c_str(), &endp, 10);
if(out) *out = (int64_t)n;
diff --git a/src/univalue/lib/univalue_read.cpp b/src/univalue/lib/univalue_read.cpp
index 5c6a1acf75..be39bfe57a 100644
--- a/src/univalue/lib/univalue_read.cpp
+++ b/src/univalue/lib/univalue_read.cpp
@@ -1,6 +1,6 @@
// Copyright 2014 BitPay Inc.
// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
#include <string.h>
#include <vector>
@@ -227,7 +227,7 @@ enum jtokentype getJsonToken(std::string& tokenVal, unsigned int& consumed,
}
else {
- writer.push_back(*raw);
+ writer.push_back(static_cast<unsigned char>(*raw));
raw++;
}
}
@@ -244,7 +244,7 @@ enum jtokentype getJsonToken(std::string& tokenVal, unsigned int& consumed,
}
}
-enum expect_bits {
+enum expect_bits : unsigned {
EXP_OBJ_NAME = (1U << 0),
EXP_COLON = (1U << 1),
EXP_ARR_VALUE = (1U << 2),
diff --git a/src/univalue/lib/univalue_utffilter.h b/src/univalue/lib/univalue_utffilter.h
index 20d4043009..c24ac58eaf 100644
--- a/src/univalue/lib/univalue_utffilter.h
+++ b/src/univalue/lib/univalue_utffilter.h
@@ -1,6 +1,6 @@
// Copyright 2016 Wladimir J. van der Laan
// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
#ifndef UNIVALUE_UTFFILTER_H
#define UNIVALUE_UTFFILTER_H
diff --git a/src/univalue/lib/univalue_write.cpp b/src/univalue/lib/univalue_write.cpp
index 827eb9b271..3a2c580c7f 100644
--- a/src/univalue/lib/univalue_write.cpp
+++ b/src/univalue/lib/univalue_write.cpp
@@ -1,9 +1,8 @@
// Copyright 2014 BitPay Inc.
// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
#include <iomanip>
-#include <sstream>
#include <stdio.h>
#include "univalue.h"
#include "univalue_escapes.h"
@@ -14,13 +13,13 @@ static std::string json_escape(const std::string& inS)
outS.reserve(inS.size() * 2);
for (unsigned int i = 0; i < inS.size(); i++) {
- unsigned char ch = inS[i];
+ unsigned char ch = static_cast<unsigned char>(inS[i]);
const char *escStr = escapes[ch];
if (escStr)
outS += escStr;
else
- outS += ch;
+ outS += static_cast<char>(ch);
}
return outS;
diff --git a/src/univalue/sources.mk b/src/univalue/sources.mk
new file mode 100644
index 0000000000..efab6d277f
--- /dev/null
+++ b/src/univalue/sources.mk
@@ -0,0 +1,95 @@
+# - All variables are namespaced with UNIVALUE_ to avoid colliding with
+# downstream makefiles.
+# - All Variables ending in _HEADERS or _SOURCES confuse automake, so the
+# _INT postfix is applied.
+# - Convenience variables, for example a UNIVALUE_TEST_DIR should not be used
+# as they interfere with automatic dependency generation
+# - The %reldir% is the relative path from the Makefile.am. This allows
+# downstreams to use these variables without having to manually account for
+# the path change.
+
+UNIVALUE_INCLUDE_DIR_INT = %reldir%/include
+
+UNIVALUE_DIST_HEADERS_INT =
+UNIVALUE_DIST_HEADERS_INT += %reldir%/include/univalue.h
+
+UNIVALUE_LIB_HEADERS_INT =
+UNIVALUE_LIB_HEADERS_INT += %reldir%/lib/univalue_utffilter.h
+UNIVALUE_LIB_HEADERS_INT += %reldir%/lib/univalue_escapes.h
+
+UNIVALUE_LIB_SOURCES_INT =
+UNIVALUE_LIB_SOURCES_INT += %reldir%/lib/univalue.cpp
+UNIVALUE_LIB_SOURCES_INT += %reldir%/lib/univalue_get.cpp
+UNIVALUE_LIB_SOURCES_INT += %reldir%/lib/univalue_read.cpp
+UNIVALUE_LIB_SOURCES_INT += %reldir%/lib/univalue_write.cpp
+
+UNIVALUE_TEST_DATA_DIR_INT = %reldir%/test
+
+UNIVALUE_TEST_UNITESTER_INT =
+UNIVALUE_TEST_UNITESTER_INT += %reldir%/test/unitester.cpp
+
+UNIVALUE_TEST_JSON_INT =
+UNIVALUE_TEST_JSON_INT += %reldir%/test/test_json.cpp
+
+UNIVALUE_TEST_NO_NUL_INT =
+UNIVALUE_TEST_NO_NUL_INT += %reldir%/test/no_nul.cpp
+
+UNIVALUE_TEST_OBJECT_INT =
+UNIVALUE_TEST_OBJECT_INT += %reldir%/test/object.cpp
+
+UNIVALUE_TEST_FILES_INT =
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail1.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail2.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail3.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail4.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail5.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail6.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail7.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail8.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail9.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail10.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail11.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail12.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail13.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail14.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail15.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail16.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail17.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail18.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail19.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail20.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail21.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail22.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail23.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail24.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail25.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail26.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail27.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail28.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail29.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail30.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail31.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail32.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail33.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail34.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail35.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail36.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail37.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail38.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail39.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail40.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail41.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail42.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail44.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/fail45.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/pass1.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/pass2.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/pass3.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/pass4.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/round1.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/round2.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/round3.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/round4.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/round5.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/round6.json
+UNIVALUE_TEST_FILES_INT += %reldir%/test/round7.json
diff --git a/src/univalue/test/object.cpp b/src/univalue/test/object.cpp
index ccc1344836..c2f52f83ac 100644
--- a/src/univalue/test/object.cpp
+++ b/src/univalue/test/object.cpp
@@ -1,7 +1,7 @@
// Copyright (c) 2014 BitPay Inc.
// Copyright (c) 2014-2016 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
#include <stdint.h>
#include <vector>
diff --git a/src/univalue/test/unitester.cpp b/src/univalue/test/unitester.cpp
index 2308afbcdf..02e1a83c6d 100644
--- a/src/univalue/test/unitester.cpp
+++ b/src/univalue/test/unitester.cpp
@@ -1,6 +1,6 @@
// Copyright 2014 BitPay Inc.
// Distributed under the MIT/X11 software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+// file COPYING or https://opensource.org/licenses/mit-license.php.
#include <stdlib.h>
#include <stdio.h>
@@ -58,7 +58,7 @@ static void runtest_file(const char *filename_)
std::string basename(filename_);
std::string filename = srcdir + "/" + basename;
FILE *f = fopen(filename.c_str(), "r");
- assert(f != NULL);
+ assert(f != nullptr);
std::string jdata;
diff --git a/src/util/asmap.cpp b/src/util/asmap.cpp
index 5695c62012..b696c65e9d 100644
--- a/src/util/asmap.cpp
+++ b/src/util/asmap.cpp
@@ -201,7 +201,7 @@ std::vector<bool> DecodeAsmap(fs::path path)
}
fseek(filestr, 0, SEEK_END);
int length = ftell(filestr);
- LogPrintf("Opened asmap file %s (%d bytes) from disk\n", path, length);
+ LogPrintf("Opened asmap file %s (%d bytes) from disk\n", fs::quoted(fs::PathToString(path)), length);
fseek(filestr, 0, SEEK_SET);
uint8_t cur_byte;
for (int i = 0; i < length; ++i) {
@@ -211,7 +211,7 @@ std::vector<bool> DecodeAsmap(fs::path path)
}
}
if (!SanityCheckASMap(bits, 128)) {
- LogPrintf("Sanity check of asmap file %s failed\n", path);
+ LogPrintf("Sanity check of asmap file %s failed\n", fs::quoted(fs::PathToString(path)));
return {};
}
return bits;
diff --git a/src/util/settings.cpp b/src/util/settings.cpp
index 846b34089d..7fb35c073e 100644
--- a/src/util/settings.cpp
+++ b/src/util/settings.cpp
@@ -66,24 +66,24 @@ bool ReadSettings(const fs::path& path, std::map<std::string, SettingsValue>& va
fsbridge::ifstream file;
file.open(path);
if (!file.is_open()) {
- errors.emplace_back(strprintf("%s. Please check permissions.", path.string()));
+ errors.emplace_back(strprintf("%s. Please check permissions.", fs::PathToString(path)));
return false;
}
SettingsValue in;
if (!in.read(std::string{std::istreambuf_iterator<char>(file), std::istreambuf_iterator<char>()})) {
- errors.emplace_back(strprintf("Unable to parse settings file %s", path.string()));
+ errors.emplace_back(strprintf("Unable to parse settings file %s", fs::PathToString(path)));
return false;
}
if (file.fail()) {
- errors.emplace_back(strprintf("Failed reading settings file %s", path.string()));
+ errors.emplace_back(strprintf("Failed reading settings file %s", fs::PathToString(path)));
return false;
}
file.close(); // Done with file descriptor. Release while copying data.
if (!in.isObject()) {
- errors.emplace_back(strprintf("Found non-object value %s in settings file %s", in.write(), path.string()));
+ errors.emplace_back(strprintf("Found non-object value %s in settings file %s", in.write(), fs::PathToString(path)));
return false;
}
@@ -92,7 +92,7 @@ bool ReadSettings(const fs::path& path, std::map<std::string, SettingsValue>& va
for (size_t i = 0; i < in_keys.size(); ++i) {
auto inserted = values.emplace(in_keys[i], in_values[i]);
if (!inserted.second) {
- errors.emplace_back(strprintf("Found duplicate key %s in settings file %s", in_keys[i], path.string()));
+ errors.emplace_back(strprintf("Found duplicate key %s in settings file %s", in_keys[i], fs::PathToString(path)));
}
}
return errors.empty();
@@ -109,7 +109,7 @@ bool WriteSettings(const fs::path& path,
fsbridge::ofstream file;
file.open(path);
if (file.fail()) {
- errors.emplace_back(strprintf("Error: Unable to open settings file %s for writing", path.string()));
+ errors.emplace_back(strprintf("Error: Unable to open settings file %s for writing", fs::PathToString(path)));
return false;
}
file << out.write(/* prettyIndent= */ 1, /* indentLevel= */ 4) << std::endl;
diff --git a/src/util/strencodings.h b/src/util/strencodings.h
index 1f7762aeef..eedb5ec2f8 100644
--- a/src/util/strencodings.h
+++ b/src/util/strencodings.h
@@ -72,7 +72,7 @@ void SplitHostPort(std::string in, uint16_t& portOut, std::string& hostOut);
// LocaleIndependentAtoi is provided for backwards compatibility reasons.
//
-// New code should use the ParseInt64/ParseUInt64/ParseInt32/ParseUInt32 functions
+// New code should use ToIntegral or the ParseInt* functions
// which provide parse error feedback.
//
// The goal of LocaleIndependentAtoi is to replicate the exact defined behaviour
@@ -125,7 +125,7 @@ constexpr inline bool IsSpace(char c) noexcept {
/**
* Convert string to integral type T. Leading whitespace, a leading +, or any
* trailing character fail the parsing. The required format expressed as regex
- * is `-?[0-9]+`.
+ * is `-?[0-9]+`. The minus sign is only permitted for signed integer types.
*
* @returns std::nullopt if the entire string could not be parsed, or if the
* parsed value is not in the range representable by the type T.
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 79c08816fa..12d7dc49b2 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -98,7 +98,7 @@ bool LockDirectory(const fs::path& directory, const std::string lockfile_name, b
fs::path pathLockFile = directory / lockfile_name;
// If a lock for this directory already exists in the map, don't try to re-lock it
- if (dir_locks.count(pathLockFile.string())) {
+ if (dir_locks.count(fs::PathToString(pathLockFile))) {
return true;
}
@@ -107,11 +107,11 @@ bool LockDirectory(const fs::path& directory, const std::string lockfile_name, b
if (file) fclose(file);
auto lock = std::make_unique<fsbridge::FileLock>(pathLockFile);
if (!lock->TryLock()) {
- return error("Error while attempting to lock directory %s: %s", directory.string(), lock->GetReason());
+ return error("Error while attempting to lock directory %s: %s", fs::PathToString(directory), lock->GetReason());
}
if (!probe_only) {
// Lock successful and we're not just probing, put it into the map
- dir_locks.emplace(pathLockFile.string(), std::move(lock));
+ dir_locks.emplace(fs::PathToString(pathLockFile), std::move(lock));
}
return true;
}
@@ -119,7 +119,7 @@ bool LockDirectory(const fs::path& directory, const std::string lockfile_name, b
void UnlockDirectory(const fs::path& directory, const std::string& lockfile_name)
{
LOCK(cs_dir_locks);
- dir_locks.erase((directory / lockfile_name).string());
+ dir_locks.erase(fs::PathToString(directory / lockfile_name));
}
void ReleaseDirectoryLocks()
@@ -242,7 +242,7 @@ namespace {
fs::path StripRedundantLastElementsOfPath(const fs::path& path)
{
auto result = path;
- while (result.filename().string() == ".") {
+ while (fs::PathToString(result.filename()) == ".") {
result = result.parent_path();
}
@@ -402,7 +402,7 @@ const fs::path& ArgsManager::GetBlocksDirPath() const
if (!path.empty()) return path;
if (IsArgSet("-blocksdir")) {
- path = fs::system_complete(GetArg("-blocksdir", ""));
+ path = fs::system_complete(fs::PathFromString(GetArg("-blocksdir", "")));
if (!fs::is_directory(path)) {
path = "";
return path;
@@ -411,7 +411,7 @@ const fs::path& ArgsManager::GetBlocksDirPath() const
path = GetDataDirBase();
}
- path /= BaseParams().DataDir();
+ path /= fs::PathFromString(BaseParams().DataDir());
path /= "blocks";
fs::create_directories(path);
path = StripRedundantLastElementsOfPath(path);
@@ -429,7 +429,7 @@ const fs::path& ArgsManager::GetDataDir(bool net_specific) const
std::string datadir = GetArg("-datadir", "");
if (!datadir.empty()) {
- path = fs::system_complete(datadir);
+ path = fs::system_complete(fs::PathFromString(datadir));
if (!fs::is_directory(path)) {
path = "";
return path;
@@ -438,7 +438,7 @@ const fs::path& ArgsManager::GetDataDir(bool net_specific) const
path = GetDefaultDataDir();
}
if (net_specific)
- path /= BaseParams().DataDir();
+ path /= fs::PathFromString(BaseParams().DataDir());
if (fs::create_directories(path)) {
// This is the first run, create wallets subdirectory too
@@ -517,7 +517,7 @@ bool ArgsManager::GetSettingsPath(fs::path* filepath, bool temp) const
}
if (filepath) {
std::string settings = GetArg("-settings", BITCOIN_SETTINGS_FILENAME);
- *filepath = fsbridge::AbsPathJoin(GetDataDirNet(), temp ? settings + ".tmp" : settings);
+ *filepath = fsbridge::AbsPathJoin(GetDataDirNet(), fs::PathFromString(temp ? settings + ".tmp" : settings));
}
return true;
}
@@ -572,7 +572,7 @@ bool ArgsManager::WriteSettingsFile(std::vector<std::string>* errors) const
return false;
}
if (!RenameOver(path_tmp, path)) {
- SaveErrors({strprintf("Failed renaming settings file %s to %s\n", path_tmp.string(), path.string())}, errors);
+ SaveErrors({strprintf("Failed renaming settings file %s to %s\n", fs::PathToString(path_tmp), fs::PathToString(path))}, errors);
return false;
}
return true;
@@ -809,12 +809,12 @@ fs::path GetDefaultDataDir()
bool CheckDataDirOption()
{
std::string datadir = gArgs.GetArg("-datadir", "");
- return datadir.empty() || fs::is_directory(fs::system_complete(datadir));
+ return datadir.empty() || fs::is_directory(fs::system_complete(fs::PathFromString(datadir)));
}
fs::path GetConfigFile(const std::string& confPath)
{
- return AbsPathForConfigVal(fs::path(confPath), false);
+ return AbsPathForConfigVal(fs::PathFromString(confPath), false);
}
static bool GetConfigOptions(std::istream& stream, const std::string& filepath, std::string& error, std::vector<std::pair<std::string, std::string>>& options, std::list<SectionInfo>& sections)
@@ -1065,7 +1065,7 @@ bool RenameOver(fs::path src, fs::path dest)
return MoveFileExW(src.wstring().c_str(), dest.wstring().c_str(),
MOVEFILE_REPLACE_EXISTING) != 0;
#else
- int rc = std::rename(src.string().c_str(), dest.string().c_str());
+ int rc = std::rename(src.c_str(), dest.c_str());
return (rc == 0);
#endif /* WIN32 */
}
diff --git a/src/validation.cpp b/src/validation.cpp
index c21a87e6d3..ff71020ebb 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1877,14 +1877,13 @@ bool CChainState::ConnectBlock(const CBlock& block, BlockValidationState& state,
int64_t nTime5 = GetTimeMicros(); nTimeIndex += nTime5 - nTime4;
LogPrint(BCLog::BENCH, " - Index writing: %.2fms [%.2fs (%.2fms/blk)]\n", MILLI * (nTime5 - nTime4), nTimeIndex * MICRO, nTimeIndex * MILLI / nBlocksTotal);
- TRACE7(validation, block_connected,
- block.GetHash().ToString().c_str(),
+ TRACE6(validation, block_connected,
+ block.GetHash().data(),
pindex->nHeight,
block.vtx.size(),
nInputs,
nSigOpsCost,
- GetTimeMicros() - nTimeStart, // in microseconds (µs)
- block.GetHash().data()
+ GetTimeMicros() - nTimeStart // in microseconds (µs)
);
return true;
@@ -2029,8 +2028,8 @@ bool CChainState::FlushStateToDisk(
}
// Flush best chain related state. This can only be done if the blocks / block index write was also done.
if (fDoFullFlush && !CoinsTip().GetBestBlock().IsNull()) {
- LOG_TIME_SECONDS(strprintf("write coins cache to disk (%d coins, %.2fkB)",
- coins_count, coins_mem_usage / 1000));
+ LOG_TIME_MILLIS_WITH_CATEGORY(strprintf("write coins cache to disk (%d coins, %.2fkB)",
+ coins_count, coins_mem_usage / 1000), BCLog::BENCH);
// Typical Coin structures on disk are around 48 bytes in size.
// Pushing a new one to the database can cause it to be written
@@ -3205,7 +3204,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
if (ppindex)
*ppindex = pindex;
if (pindex->nStatus & BLOCK_FAILED_MASK) {
- LogPrintf("ERROR: %s: block %s is marked invalid\n", __func__, hash.ToString());
+ LogPrint(BCLog::VALIDATION, "%s: block %s is marked invalid\n", __func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_CACHED_INVALID, "duplicate");
}
return true;
@@ -3220,16 +3219,18 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
CBlockIndex* pindexPrev = nullptr;
BlockMap::iterator mi = m_block_index.find(block.hashPrevBlock);
if (mi == m_block_index.end()) {
- LogPrintf("ERROR: %s: prev block not found\n", __func__);
+ LogPrint(BCLog::VALIDATION, "%s: %s prev block not found\n", __func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_MISSING_PREV, "prev-blk-not-found");
}
pindexPrev = (*mi).second;
if (pindexPrev->nStatus & BLOCK_FAILED_MASK) {
- LogPrintf("ERROR: %s: prev block invalid\n", __func__);
+ LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
- if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime()))
- return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString());
+ if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime())) {
+ LogPrint(BCLog::VALIDATION, "%s: Consensus::ContextualCheckBlockHeader: %s, %s\n", __func__, hash.ToString(), state.ToString());
+ return false;
+ }
/* Determine if this block descends from any block which has been found
* invalid (m_failed_blocks), then mark pindexPrev and any blocks between
@@ -3264,7 +3265,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS
setDirtyBlockIndex.insert(invalid_walk);
invalid_walk = invalid_walk->pprev;
}
- LogPrintf("ERROR: %s: prev block invalid\n", __func__);
+ LogPrint(BCLog::VALIDATION, "%s: %s prev block invalid\n", __func__, hash.ToString());
return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk");
}
}
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 2eb4d3106c..2290e119fd 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -61,9 +61,9 @@ bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId& rhs) const
std::shared_ptr<BerkeleyEnvironment> GetBerkeleyEnv(const fs::path& env_directory)
{
LOCK(cs_db);
- auto inserted = g_dbenvs.emplace(env_directory.string(), std::weak_ptr<BerkeleyEnvironment>());
+ auto inserted = g_dbenvs.emplace(fs::PathToString(env_directory), std::weak_ptr<BerkeleyEnvironment>());
if (inserted.second) {
- auto env = std::make_shared<BerkeleyEnvironment>(env_directory.string());
+ auto env = std::make_shared<BerkeleyEnvironment>(env_directory);
inserted.first->second = env;
return env;
}
@@ -101,7 +101,7 @@ void BerkeleyEnvironment::Close()
if (error_file) fclose(error_file);
- UnlockDirectory(strPath, ".walletlock");
+ UnlockDirectory(fs::PathFromString(strPath), ".walletlock");
}
void BerkeleyEnvironment::Reset()
@@ -111,7 +111,7 @@ void BerkeleyEnvironment::Reset()
fMockDb = false;
}
-BerkeleyEnvironment::BerkeleyEnvironment(const fs::path& dir_path) : strPath(dir_path.string())
+BerkeleyEnvironment::BerkeleyEnvironment(const fs::path& dir_path) : strPath(fs::PathToString(dir_path))
{
Reset();
}
@@ -129,24 +129,24 @@ bool BerkeleyEnvironment::Open(bilingual_str& err)
return true;
}
- fs::path pathIn = strPath;
+ fs::path pathIn = fs::PathFromString(strPath);
TryCreateDirectories(pathIn);
if (!LockDirectory(pathIn, ".walletlock")) {
LogPrintf("Cannot obtain a lock on wallet directory %s. Another instance of bitcoin may be using it.\n", strPath);
- err = strprintf(_("Error initializing wallet database environment %s!"), Directory());
+ err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory())));
return false;
}
fs::path pathLogDir = pathIn / "database";
TryCreateDirectories(pathLogDir);
fs::path pathErrorFile = pathIn / "db.log";
- LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", pathLogDir.string(), pathErrorFile.string());
+ LogPrintf("BerkeleyEnvironment::Open: LogDir=%s ErrorFile=%s\n", fs::PathToString(pathLogDir), fs::PathToString(pathErrorFile));
unsigned int nEnvFlags = 0;
if (gArgs.GetBoolArg("-privdb", DEFAULT_WALLET_PRIVDB))
nEnvFlags |= DB_PRIVATE;
- dbenv->set_lg_dir(pathLogDir.string().c_str());
+ dbenv->set_lg_dir(fs::PathToString(pathLogDir).c_str());
dbenv->set_cachesize(0, 0x100000, 1); // 1 MiB should be enough for just the wallet
dbenv->set_lg_bsize(0x10000);
dbenv->set_lg_max(1048576);
@@ -173,7 +173,7 @@ bool BerkeleyEnvironment::Open(bilingual_str& err)
LogPrintf("BerkeleyEnvironment::Open: Error %d closing failed database environment: %s\n", ret2, DbEnv::strerror(ret2));
}
Reset();
- err = strprintf(_("Error initializing wallet database environment %s!"), Directory());
+ err = strprintf(_("Error initializing wallet database environment %s!"), fs::quoted(fs::PathToString(Directory())));
if (ret == DB_RUNRECOVERY) {
err += Untranslated(" ") + _("This error could occur if this wallet was not shutdown cleanly and was last loaded using a build with a newer version of Berkeley DB. If so, please use the software that last loaded this wallet");
}
@@ -261,7 +261,7 @@ bool BerkeleyDatabase::Verify(bilingual_str& errorStr)
fs::path file_path = walletDir / strFile;
LogPrintf("Using BerkeleyDB version %s\n", BerkeleyDatabaseVersion());
- LogPrintf("Using wallet %s\n", file_path.string());
+ LogPrintf("Using wallet %s\n", fs::PathToString(file_path));
if (!env->Open(errorStr)) {
return false;
@@ -274,7 +274,7 @@ bool BerkeleyDatabase::Verify(bilingual_str& errorStr)
Db db(env->dbenv.get(), 0);
int result = db.verify(strFile.c_str(), nullptr, nullptr, 0);
if (result != 0) {
- errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), file_path);
+ errorStr = strprintf(_("%s corrupt. Try using the wallet tool bitcoin-wallet to salvage or restoring a backup."), fs::quoted(fs::PathToString(file_path)));
return false;
}
}
@@ -566,7 +566,7 @@ void BerkeleyEnvironment::Flush(bool fShutdown)
dbenv->log_archive(&listp, DB_ARCH_REMOVE);
Close();
if (!fMockDb) {
- fs::remove_all(fs::path(strPath) / "database");
+ fs::remove_all(fs::PathFromString(strPath) / "database");
}
}
}
@@ -614,21 +614,21 @@ bool BerkeleyDatabase::Backup(const std::string& strDest) const
// Copy wallet file
fs::path pathSrc = env->Directory() / strFile;
- fs::path pathDest(strDest);
+ fs::path pathDest(fs::PathFromString(strDest));
if (fs::is_directory(pathDest))
- pathDest /= strFile;
+ pathDest /= fs::PathFromString(strFile);
try {
if (fs::equivalent(pathSrc, pathDest)) {
- LogPrintf("cannot backup to wallet source file %s\n", pathDest.string());
+ LogPrintf("cannot backup to wallet source file %s\n", fs::PathToString(pathDest));
return false;
}
fs::copy_file(pathSrc, pathDest, fs::copy_option::overwrite_if_exists);
- LogPrintf("copied %s to %s\n", strFile, pathDest.string());
+ LogPrintf("copied %s to %s\n", strFile, fs::PathToString(pathDest));
return true;
} catch (const fs::filesystem_error& e) {
- LogPrintf("error copying %s to %s - %s\n", strFile, pathDest.string(), fsbridge::get_filesystem_error_message(e));
+ LogPrintf("error copying %s to %s - %s\n", strFile, fs::PathToString(pathDest), fsbridge::get_filesystem_error_message(e));
return false;
}
}
@@ -828,10 +828,10 @@ std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, con
std::unique_ptr<BerkeleyDatabase> db;
{
LOCK(cs_db); // Lock env.m_databases until insert in BerkeleyDatabase constructor
- std::string data_filename = data_file.filename().string();
+ std::string data_filename = fs::PathToString(data_file.filename());
std::shared_ptr<BerkeleyEnvironment> env = GetBerkeleyEnv(data_file.parent_path());
if (env->m_databases.count(data_filename)) {
- error = Untranslated(strprintf("Refusing to load database. Data file '%s' is already loaded.", (env->Directory() / data_filename).string()));
+ error = Untranslated(strprintf("Refusing to load database. Data file '%s' is already loaded.", fs::PathToString(env->Directory() / data_filename)));
status = DatabaseStatus::FAILED_ALREADY_LOADED;
return nullptr;
}
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
index a8209587d7..b666a8e73a 100644
--- a/src/wallet/bdb.h
+++ b/src/wallet/bdb.h
@@ -63,7 +63,7 @@ public:
bool IsMock() const { return fMockDb; }
bool IsInitialized() const { return fDbEnvInit; }
- fs::path Directory() const { return strPath; }
+ fs::path Directory() const { return fs::PathFromString(strPath); }
bool Open(bilingual_str& error);
void Close();
@@ -141,7 +141,7 @@ public:
bool Verify(bilingual_str& error);
/** Return path to main database filename */
- std::string Filename() override { return (env->Directory() / strFile).string(); }
+ std::string Filename() override { return fs::PathToString(env->Directory() / strFile); }
std::string Format() override { return "bdb"; }
/**
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index 8d5316e0af..c74c69ed09 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -12,7 +12,7 @@
std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
{
- const size_t offset = wallet_dir.string().size() + (wallet_dir == wallet_dir.root_name() ? 0 : 1);
+ const size_t offset = wallet_dir.native().size() + (wallet_dir == wallet_dir.root_name() ? 0 : 1);
std::vector<fs::path> paths;
boost::system::error_code ec;
@@ -20,9 +20,9 @@ std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
if (ec) {
if (fs::is_directory(*it)) {
it.no_push();
- LogPrintf("%s: %s %s -- skipping.\n", __func__, ec.message(), it->path().string());
+ LogPrintf("%s: %s %s -- skipping.\n", __func__, ec.message(), fs::PathToString(it->path()));
} else {
- LogPrintf("%s: %s %s\n", __func__, ec.message(), it->path().string());
+ LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(it->path()));
}
continue;
}
@@ -30,7 +30,8 @@ std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
try {
// Get wallet path relative to walletdir by removing walletdir from the wallet path.
// This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60.
- const fs::path path = it->path().string().substr(offset);
+ const auto path_str = it->path().native().substr(offset);
+ const fs::path path{path_str.begin(), path_str.end()};
if (it->status().type() == fs::directory_file &&
(IsBDBFile(BDBDataFile(it->path())) || IsSQLiteFile(SQLiteDataFile(it->path())))) {
@@ -50,7 +51,7 @@ std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
}
}
} catch (const std::exception& e) {
- LogPrintf("%s: Error scanning %s: %s\n", __func__, it->path().string(), e.what());
+ LogPrintf("%s: Error scanning %s: %s\n", __func__, fs::PathToString(it->path()), e.what());
it.no_push();
}
}
@@ -85,7 +86,7 @@ bool IsBDBFile(const fs::path& path)
// This check also prevents opening lock files.
boost::system::error_code ec;
auto size = fs::file_size(path, ec);
- if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
+ if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(path));
if (size < 4096) return false;
fsbridge::ifstream file(path, std::ios::binary);
@@ -109,7 +110,7 @@ bool IsSQLiteFile(const fs::path& path)
// A SQLite Database file is at least 512 bytes.
boost::system::error_code ec;
auto size = fs::file_size(path, ec);
- if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
+ if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), fs::PathToString(path));
if (size < 512) return false;
fsbridge::ifstream file(path, std::ios::binary);
diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp
index c39c0c7e73..08d94b76d9 100644
--- a/src/wallet/dump.cpp
+++ b/src/wallet/dump.cpp
@@ -19,16 +19,16 @@ bool DumpWallet(CWallet& wallet, bilingual_str& error)
return false;
}
- fs::path path = dump_filename;
+ fs::path path = fs::PathFromString(dump_filename);
path = fs::absolute(path);
if (fs::exists(path)) {
- error = strprintf(_("File %s already exists. If you are sure this is what you want, move it out of the way first."), path.string());
+ error = strprintf(_("File %s already exists. If you are sure this is what you want, move it out of the way first."), fs::PathToString(path));
return false;
}
fsbridge::ofstream dump_file;
dump_file.open(path);
if (dump_file.fail()) {
- error = strprintf(_("Unable to open %s for writing"), path.string());
+ error = strprintf(_("Unable to open %s for writing"), fs::PathToString(path));
return false;
}
@@ -114,10 +114,10 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
return false;
}
- fs::path dump_path = dump_filename;
+ fs::path dump_path = fs::PathFromString(dump_filename);
dump_path = fs::absolute(dump_path);
if (!fs::exists(dump_path)) {
- error = strprintf(_("Dump file %s does not exist."), dump_path.string());
+ error = strprintf(_("Dump file %s does not exist."), fs::PathToString(dump_path));
return false;
}
fsbridge::ifstream dump_file(dump_path);
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index 7d0cdb6934..57f1a6a67a 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -551,13 +551,13 @@ public:
}
std::string getWalletDir() override
{
- return GetWalletDir().string();
+ return fs::PathToString(GetWalletDir());
}
std::vector<std::string> listWalletDir() override
{
std::vector<std::string> paths;
for (auto& path : ListDatabases(GetWalletDir())) {
- paths.push_back(path.string());
+ paths.push_back(fs::PathToString(path));
}
return paths;
}
diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp
index 1b841026b8..7ef5a0cf55 100644
--- a/src/wallet/load.cpp
+++ b/src/wallet/load.cpp
@@ -25,25 +25,25 @@ bool VerifyWallets(WalletContext& context)
ArgsManager& args = *Assert(context.args);
if (args.IsArgSet("-walletdir")) {
- fs::path wallet_dir = args.GetArg("-walletdir", "");
+ fs::path wallet_dir = fs::PathFromString(args.GetArg("-walletdir", ""));
boost::system::error_code error;
// The canonical path cleans the path, preventing >1 Berkeley environment instances for the same directory
fs::path canonical_wallet_dir = fs::canonical(wallet_dir, error);
if (error || !fs::exists(wallet_dir)) {
- chain.initError(strprintf(_("Specified -walletdir \"%s\" does not exist"), wallet_dir.string()));
+ chain.initError(strprintf(_("Specified -walletdir \"%s\" does not exist"), fs::PathToString(wallet_dir)));
return false;
} else if (!fs::is_directory(wallet_dir)) {
- chain.initError(strprintf(_("Specified -walletdir \"%s\" is not a directory"), wallet_dir.string()));
+ chain.initError(strprintf(_("Specified -walletdir \"%s\" is not a directory"), fs::PathToString(wallet_dir)));
return false;
// The canonical path transforms relative paths into absolute ones, so we check the non-canonical version
} else if (!wallet_dir.is_absolute()) {
- chain.initError(strprintf(_("Specified -walletdir \"%s\" is a relative path"), wallet_dir.string()));
+ chain.initError(strprintf(_("Specified -walletdir \"%s\" is a relative path"), fs::PathToString(wallet_dir)));
return false;
}
- args.ForceSetArg("-walletdir", canonical_wallet_dir.string());
+ args.ForceSetArg("-walletdir", fs::PathToString(canonical_wallet_dir));
}
- LogPrintf("Using wallet directory %s\n", GetWalletDir().string());
+ LogPrintf("Using wallet directory %s\n", fs::PathToString(GetWalletDir()));
chain.initMessage(_("Verifying wallet(s)…").translated);
@@ -70,7 +70,7 @@ bool VerifyWallets(WalletContext& context)
for (const auto& wallet : chain.getSettingsList("wallet")) {
const auto& wallet_file = wallet.get_str();
- const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), wallet_file);
+ const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), fs::PathFromString(wallet_file));
if (!wallet_paths.insert(path).second) {
chain.initWarning(strprintf(_("Ignoring duplicate -wallet %s."), wallet_file));
@@ -102,7 +102,7 @@ bool LoadWallets(WalletContext& context)
std::set<fs::path> wallet_paths;
for (const auto& wallet : chain.getSettingsList("wallet")) {
const auto& name = wallet.get_str();
- if (!wallet_paths.insert(name).second) {
+ if (!wallet_paths.insert(fs::PathFromString(name)).second) {
continue;
}
DatabaseOptions options;
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 4d7fb2d38c..9b09bc23d6 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -550,7 +550,7 @@ RPCHelpMan importwallet()
EnsureWalletIsUnlocked(*pwallet);
fsbridge::ifstream file;
- file.open(request.params[0].get_str(), std::ios::in | std::ios::ate);
+ file.open(fs::u8path(request.params[0].get_str()), std::ios::in | std::ios::ate);
if (!file.is_open()) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Cannot open wallet dump file");
}
@@ -745,7 +745,7 @@ RPCHelpMan dumpwallet()
EnsureWalletIsUnlocked(wallet);
- fs::path filepath = request.params[0].get_str();
+ fs::path filepath = fs::u8path(request.params[0].get_str());
filepath = fs::absolute(filepath);
/* Prevent arbitrary files from being overwritten. There have been reports
@@ -754,7 +754,7 @@ RPCHelpMan dumpwallet()
* It may also avoid other security issues.
*/
if (fs::exists(filepath)) {
- throw JSONRPCError(RPC_INVALID_PARAMETER, filepath.string() + " already exists. If you are sure this is what you want, move it out of the way first");
+ throw JSONRPCError(RPC_INVALID_PARAMETER, filepath.u8string() + " already exists. If you are sure this is what you want, move it out of the way first");
}
fsbridge::ofstream file;
@@ -844,7 +844,7 @@ RPCHelpMan dumpwallet()
file.close();
UniValue reply(UniValue::VOBJ);
- reply.pushKV("filename", filepath.string());
+ reply.pushKV("filename", filepath.u8string());
return reply;
},
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 8b481bc29c..6959466d1d 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -1854,7 +1854,7 @@ static RPCHelpMan keypoolrefill()
"\nFills the keypool."+
HELP_REQUIRING_PASSPHRASE,
{
- {"newsize", RPCArg::Type::NUM, RPCArg::Default{100}, "The new keypool size"},
+ {"newsize", RPCArg::Type::NUM, RPCArg::DefaultHint{strprintf("%u, or as set by -keypool", DEFAULT_KEYPOOL_SIZE)}, "The new keypool size"},
},
RPCResult{RPCResult::Type::NONE, "", ""},
RPCExamples{
@@ -1893,6 +1893,33 @@ static RPCHelpMan keypoolrefill()
}
+static RPCHelpMan newkeypool()
+{
+ return RPCHelpMan{"newkeypool",
+ "\nEntirely clears and refills the keypool."+
+ HELP_REQUIRING_PASSPHRASE,
+ {},
+ RPCResult{RPCResult::Type::NONE, "", ""},
+ RPCExamples{
+ HelpExampleCli("newkeypool", "")
+ + HelpExampleRpc("newkeypool", "")
+ },
+ [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
+{
+ std::shared_ptr<CWallet> const pwallet = GetWalletForJSONRPCRequest(request);
+ if (!pwallet) return NullUniValue;
+
+ LOCK(pwallet->cs_wallet);
+
+ LegacyScriptPubKeyMan& spk_man = EnsureLegacyScriptPubKeyMan(*pwallet, true);
+ spk_man.NewKeyPool();
+
+ return NullUniValue;
+},
+ };
+}
+
+
static RPCHelpMan walletpassphrase()
{
return RPCHelpMan{"walletpassphrase",
@@ -2555,7 +2582,7 @@ static RPCHelpMan listwalletdir()
UniValue wallets(UniValue::VARR);
for (const auto& path : ListDatabases(GetWalletDir())) {
UniValue wallet(UniValue::VOBJ);
- wallet.pushKV("name", path.string());
+ wallet.pushKV("name", path.u8string());
wallets.push_back(wallet);
}
@@ -2856,7 +2883,7 @@ static RPCHelpMan restorewallet()
WalletContext& context = EnsureWalletContext(request.context);
- std::string backup_file = request.params[1].get_str();
+ auto backup_file = fs::u8path(request.params[1].get_str());
if (!fs::exists(backup_file)) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Backup file does not exist");
@@ -2864,14 +2891,14 @@ static RPCHelpMan restorewallet()
std::string wallet_name = request.params[0].get_str();
- const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), wallet_name);
+ const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), fs::u8path(wallet_name));
if (fs::exists(wallet_path)) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "Wallet name already exists.");
}
if (!TryCreateDirectories(wallet_path)) {
- throw JSONRPCError(RPC_WALLET_ERROR, strprintf("Failed to create database path '%s'. Database already exists.", wallet_path.string()));
+ throw JSONRPCError(RPC_WALLET_ERROR, strprintf("Failed to create database path '%s'. Database already exists.", wallet_path.u8string()));
}
auto wallet_file = wallet_path / "wallet.dat";
@@ -4875,6 +4902,7 @@ static const CRPCCommand commands[] =
{ "wallet", &listwallets, },
{ "wallet", &loadwallet, },
{ "wallet", &lockunspent, },
+ { "wallet", &newkeypool, },
{ "wallet", &removeprunedfunds, },
{ "wallet", &rescanblockchain, },
{ "wallet", &send, },
diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp
index fdfb36bb0a..619ebc8b4f 100644
--- a/src/wallet/scriptpubkeyman.cpp
+++ b/src/wallet/scriptpubkeyman.cpp
@@ -489,7 +489,7 @@ bool LegacyScriptPubKeyMan::Upgrade(int prev_version, int new_version, bilingual
}
// Regenerate the keypool if upgraded to HD
if (hd_upgrade) {
- if (!TopUp()) {
+ if (!NewKeyPool()) {
error = _("Unable to generate keys");
return false;
}
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index 815d17967c..650e083e8e 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -67,7 +67,7 @@ static void SetPragma(sqlite3* db, const std::string& key, const std::string& va
}
SQLiteDatabase::SQLiteDatabase(const fs::path& dir_path, const fs::path& file_path, bool mock)
- : WalletDatabase(), m_mock(mock), m_dir_path(dir_path.string()), m_file_path(file_path.string())
+ : WalletDatabase(), m_mock(mock), m_dir_path(fs::PathToString(dir_path)), m_file_path(fs::PathToString(file_path))
{
{
LOCK(g_sqlite_mutex);
@@ -206,7 +206,7 @@ void SQLiteDatabase::Open()
if (m_db == nullptr) {
if (!m_mock) {
- TryCreateDirectories(m_dir_path);
+ TryCreateDirectories(fs::PathFromString(m_dir_path));
}
int ret = sqlite3_open_v2(m_file_path.c_str(), &m_db, flags, nullptr);
if (ret != SQLITE_OK) {
diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp
index 16cb7e0baf..dba3f35025 100644
--- a/src/wallet/test/db_tests.cpp
+++ b/src/wallet/test/db_tests.cpp
@@ -16,7 +16,7 @@ BOOST_FIXTURE_TEST_SUITE(db_tests, BasicTestingSetup)
static std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& path, std::string& database_filename)
{
fs::path data_file = BDBDataFile(path);
- database_filename = data_file.filename().string();
+ database_filename = fs::PathToString(data_file.filename());
return GetBerkeleyEnv(data_file.parent_path());
}
@@ -25,11 +25,7 @@ BOOST_AUTO_TEST_CASE(getwalletenv_file)
std::string test_name = "test_name.dat";
const fs::path datadir = gArgs.GetDataDirNet();
fs::path file_path = datadir / test_name;
-#if BOOST_VERSION >= 107700
- std::ofstream f(BOOST_FILESYSTEM_C_STR(file_path));
-#else
- std::ofstream f(file_path.BOOST_FILESYSTEM_C_STR);
-#endif // BOOST_VERSION >= 107700
+ fs::ofstream f(file_path);
f.close();
std::string filename;
diff --git a/src/wallet/test/init_test_fixture.cpp b/src/wallet/test/init_test_fixture.cpp
index 53c972c46d..170675c035 100644
--- a/src/wallet/test/init_test_fixture.cpp
+++ b/src/wallet/test/init_test_fixture.cpp
@@ -32,11 +32,7 @@ InitWalletDirTestingSetup::InitWalletDirTestingSetup(const std::string& chainNam
fs::create_directories(m_walletdir_path_cases["default"]);
fs::create_directories(m_walletdir_path_cases["custom"]);
fs::create_directories(m_walletdir_path_cases["relative"]);
-#if BOOST_VERSION >= 107700
- std::ofstream f(BOOST_FILESYSTEM_C_STR(m_walletdir_path_cases["file"]));
-#else
- std::ofstream f(m_walletdir_path_cases["file"].BOOST_FILESYSTEM_C_STR);
-#endif // BOOST_VERSION >= 107700
+ fs::ofstream f(m_walletdir_path_cases["file"]);
f.close();
}
@@ -50,5 +46,5 @@ InitWalletDirTestingSetup::~InitWalletDirTestingSetup()
void InitWalletDirTestingSetup::SetWalletDir(const fs::path& walletdir_path)
{
- gArgs.ForceSetArg("-walletdir", walletdir_path.string());
+ gArgs.ForceSetArg("-walletdir", fs::PathToString(walletdir_path));
}
diff --git a/src/wallet/test/init_tests.cpp b/src/wallet/test/init_tests.cpp
index 45e1b8c4b8..222c2bf4b7 100644
--- a/src/wallet/test/init_tests.cpp
+++ b/src/wallet/test/init_tests.cpp
@@ -17,7 +17,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_default)
SetWalletDir(m_walletdir_path_cases["default"]);
bool result = m_wallet_client->verify();
BOOST_CHECK(result == true);
- fs::path walletdir = gArgs.GetArg("-walletdir", "");
+ fs::path walletdir = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]);
BOOST_CHECK_EQUAL(walletdir, expected_path);
}
@@ -27,7 +27,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_custom)
SetWalletDir(m_walletdir_path_cases["custom"]);
bool result = m_wallet_client->verify();
BOOST_CHECK(result == true);
- fs::path walletdir = gArgs.GetArg("-walletdir", "");
+ fs::path walletdir = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
fs::path expected_path = fs::canonical(m_walletdir_path_cases["custom"]);
BOOST_CHECK_EQUAL(walletdir, expected_path);
}
@@ -67,7 +67,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing)
SetWalletDir(m_walletdir_path_cases["trailing"]);
bool result = m_wallet_client->verify();
BOOST_CHECK(result == true);
- fs::path walletdir = gArgs.GetArg("-walletdir", "");
+ fs::path walletdir = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]);
BOOST_CHECK_EQUAL(walletdir, expected_path);
}
@@ -77,7 +77,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing2)
SetWalletDir(m_walletdir_path_cases["trailing2"]);
bool result = m_wallet_client->verify();
BOOST_CHECK(result == true);
- fs::path walletdir = gArgs.GetArg("-walletdir", "");
+ fs::path walletdir = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]);
BOOST_CHECK_EQUAL(walletdir, expected_path);
}
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 9938380369..94b5abfba7 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -258,7 +258,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
SetMockTime(KEY_TIME);
m_coinbase_txns.emplace_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
- std::string backup_file = (gArgs.GetDataDirNet() / "wallet.backup").string();
+ std::string backup_file = fs::PathToString(gArgs.GetDataDirNet() / "wallet.backup");
// Import key into wallet and call dumpwallet to create backup file.
{
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 180d9d652a..a749ab8897 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -2515,16 +2515,16 @@ std::unique_ptr<WalletDatabase> MakeWalletDatabase(const std::string& name, cons
// 2. Path to an existing directory.
// 3. Path to a symlink to a directory.
// 4. For backwards compatibility, the name of a data file in -walletdir.
- const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), name);
+ const fs::path wallet_path = fsbridge::AbsPathJoin(GetWalletDir(), fs::PathFromString(name));
fs::file_type path_type = fs::symlink_status(wallet_path).type();
if (!(path_type == fs::file_not_found || path_type == fs::directory_file ||
(path_type == fs::symlink_file && fs::is_directory(wallet_path)) ||
- (path_type == fs::regular_file && fs::path(name).filename() == name))) {
+ (path_type == fs::regular_file && fs::PathFromString(name).filename() == fs::PathFromString(name)))) {
error_string = Untranslated(strprintf(
"Invalid -wallet path '%s'. -wallet path should point to a directory where wallet.dat and "
"database/log.?????????? files can be stored, a location where such a directory could be created, "
"or (for backwards compatibility) the name of an existing data file in -walletdir (%s)",
- name, GetWalletDir()));
+ name, fs::quoted(fs::PathToString(GetWalletDir()))));
status = DatabaseStatus::FAILED_BAD_PATH;
return nullptr;
}
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index 8ff09a0878..a6839f1f78 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -1106,7 +1106,7 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
try {
exists = fs::symlink_status(path).type() != fs::file_not_found;
} catch (const fs::filesystem_error& e) {
- error = Untranslated(strprintf("Failed to access database path '%s': %s", path.string(), fsbridge::get_filesystem_error_message(e)));
+ error = Untranslated(strprintf("Failed to access database path '%s': %s", fs::PathToString(path), fsbridge::get_filesystem_error_message(e)));
status = DatabaseStatus::FAILED_BAD_PATH;
return nullptr;
}
@@ -1118,33 +1118,33 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
}
if (IsSQLiteFile(SQLiteDataFile(path))) {
if (format) {
- error = Untranslated(strprintf("Failed to load database path '%s'. Data is in ambiguous format.", path.string()));
+ error = Untranslated(strprintf("Failed to load database path '%s'. Data is in ambiguous format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
format = DatabaseFormat::SQLITE;
}
} else if (options.require_existing) {
- error = Untranslated(strprintf("Failed to load database path '%s'. Path does not exist.", path.string()));
+ error = Untranslated(strprintf("Failed to load database path '%s'. Path does not exist.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_NOT_FOUND;
return nullptr;
}
if (!format && options.require_existing) {
- error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in recognized format.", path.string()));
+ error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in recognized format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
if (format && options.require_create) {
- error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", path.string()));
+ error = Untranslated(strprintf("Failed to create database path '%s'. Database already exists.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_ALREADY_EXISTS;
return nullptr;
}
// A db already exists so format is set, but options also specifies the format, so make sure they agree
if (format && options.require_format && format != options.require_format) {
- error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in required format.", path.string()));
+ error = Untranslated(strprintf("Failed to load database path '%s'. Data is not in required format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
@@ -1166,7 +1166,7 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
#ifdef USE_SQLITE
return MakeSQLiteDatabase(path, options, status, error);
#endif
- error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support SQLite database format.", path.string()));
+ error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support SQLite database format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
@@ -1174,7 +1174,7 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
#ifdef USE_BDB
return MakeBerkeleyDatabase(path, options, status, error);
#endif
- error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support Berkeley DB database format.", path.string()));
+ error = Untranslated(strprintf("Failed to open database path '%s'. Build does not support Berkeley DB database format.", fs::PathToString(path)));
status = DatabaseStatus::FAILED_BAD_FORMAT;
return nullptr;
}
diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp
index e3cb5cee5d..788679bbeb 100644
--- a/src/wallet/wallettool.cpp
+++ b/src/wallet/wallettool.cpp
@@ -125,7 +125,7 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command)
return false;
}
const std::string name = args.GetArg("-wallet", "");
- const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), name);
+ const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), fs::PathFromString(name));
if (command == "create") {
DatabaseOptions options;
diff --git a/src/wallet/walletutil.cpp b/src/wallet/walletutil.cpp
index 1c518daba6..7f813432b3 100644
--- a/src/wallet/walletutil.cpp
+++ b/src/wallet/walletutil.cpp
@@ -12,7 +12,7 @@ fs::path GetWalletDir()
fs::path path;
if (gArgs.IsArgSet("-walletdir")) {
- path = gArgs.GetArg("-walletdir", "");
+ path = fs::PathFromString(gArgs.GetArg("-walletdir", ""));
if (!fs::is_directory(path)) {
// If the path specified doesn't exist, we return the deliberately
// invalid empty string.
diff --git a/test/README.md b/test/README.md
index acd68d8d8f..c9e15c4968 100644
--- a/test/README.md
+++ b/test/README.md
@@ -275,12 +275,15 @@ Use the `-v` option for verbose output.
#### Dependencies
-| Lint test | Dependency | Version [used by CI](../ci/lint/04_install.sh) | Installation
-|-----------|:----------:|:-------------------------------------------:|--------------
-| [`lint-python.sh`](lint/lint-python.sh) | [flake8](https://gitlab.com/pycqa/flake8) | [3.8.3](https://github.com/bitcoin/bitcoin/pull/19348) | `pip3 install flake8==3.8.3`
-| [`lint-python.sh`](lint/lint-python.sh) | [mypy](https://github.com/python/mypy) | [0.781](https://github.com/bitcoin/bitcoin/pull/19348) | `pip3 install mypy==0.781`
-| [`lint-shell.sh`](lint/lint-shell.sh) | [ShellCheck](https://github.com/koalaman/shellcheck) | [0.7.2](https://github.com/bitcoin/bitcoin/pull/21749) | [details...](https://github.com/koalaman/shellcheck#installing)
-| [`lint-spelling.sh`](lint/lint-spelling.sh) | [codespell](https://github.com/codespell-project/codespell) | [2.0.0](https://github.com/bitcoin/bitcoin/pull/20817) | `pip3 install codespell==2.0.0`
+| Lint test | Dependency |
+|-----------|:----------:|
+| [`lint-python.sh`](lint/lint-python.sh) | [flake8](https://gitlab.com/pycqa/flake8)
+| [`lint-python.sh`](lint/lint-python.sh) | [mypy](https://github.com/python/mypy)
+| [`lint-python.sh`](lint/lint-python.sh) | [pyzmq](https://github.com/zeromq/pyzmq)
+| [`lint-shell.sh`](lint/lint-shell.sh) | [ShellCheck](https://github.com/koalaman/shellcheck)
+| [`lint-spelling.sh`](lint/lint-spelling.sh) | [codespell](https://github.com/codespell-project/codespell)
+
+In use versions and install instructions are available in the [CI setup](../ci/lint/04_install.sh).
Please be aware that on Linux distributions all dependencies are usually available as packages, but could be outdated.
diff --git a/test/functional/combine_logs.py b/test/functional/combine_logs.py
index 00f2833f55..71dfb4c01a 100755
--- a/test/functional/combine_logs.py
+++ b/test/functional/combine_logs.py
@@ -188,7 +188,7 @@ def print_logs_plain(log_events, colors):
def print_logs_html(log_events):
"""Renders the iterator of log events into html."""
try:
- import jinja2
+ import jinja2 #type:ignore
except ImportError:
print("jinja2 not found. Try `pip install jinja2`")
sys.exit(1)
diff --git a/test/functional/data/__init__.py b/test/functional/data/__init__.py
new file mode 100644
index 0000000000..e69de29bb2
--- /dev/null
+++ b/test/functional/data/__init__.py
diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py
index e0716fc54a..868bb42604 100755
--- a/test/functional/interface_rest.py
+++ b/test/functional/interface_rest.py
@@ -279,6 +279,13 @@ class RESTTest (BitcoinTestFramework):
json_obj = self.test_rest_request(f"/headers/5/{bb_hash}")
assert_equal(len(json_obj), 5) # now we should have 5 header objects
+ # Test number parsing
+ for num in ['5a', '-5', '0', '2001', '99999999999999999999999999999999999']:
+ assert_equal(
+ bytes(f'Header count out of range: {num}\r\n', 'ascii'),
+ self.test_rest_request(f"/headers/{num}/{bb_hash}", ret_type=RetType.BYTES, status=400),
+ )
+
self.log.info("Test tx inclusion in the /mempool and /block URIs")
# Make 3 tx and mine them on node 1
@@ -311,6 +318,15 @@ class RESTTest (BitcoinTestFramework):
if 'coinbase' not in tx['vin'][0]}
assert_equal(non_coinbase_txs, set(txs))
+ # Verify that the non-coinbase tx has "prevout" key set
+ for tx_obj in json_obj["tx"]:
+ for vin in tx_obj["vin"]:
+ if "coinbase" not in vin:
+ assert "prevout" in vin
+ assert_equal(vin["prevout"]["generated"], False)
+ else:
+ assert "prevout" not in vin
+
# Check the same but without tx details
json_obj = self.test_rest_request(f"/block/notxdetails/{newblockhash[0]}")
for tx in txs:
diff --git a/test/functional/mempool_package_limits.py b/test/functional/mempool_package_limits.py
index 2217628858..89a5c83826 100755
--- a/test/functional/mempool_package_limits.py
+++ b/test/functional/mempool_package_limits.py
@@ -244,7 +244,7 @@ class MempoolPackageLimitsTest(BitcoinTestFramework):
assert_equal(txres["package-error"], "package-mempool-limits")
# Clear mempool and check that the package passes now
- node.generate(1)
+ self.generate(node, 1)
assert all([res["allowed"] for res in node.testmempoolaccept(rawtxs=package_hex)])
def test_anc_count_limits(self):
diff --git a/test/functional/mining_prioritisetransaction.py b/test/functional/mining_prioritisetransaction.py
index da85ee54be..35274d3500 100755
--- a/test/functional/mining_prioritisetransaction.py
+++ b/test/functional/mining_prioritisetransaction.py
@@ -13,7 +13,7 @@ from test_framework.util import assert_equal, assert_raises_rpc_error, create_co
class PrioritiseTransactionTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
- self.num_nodes = 2
+ self.num_nodes = 1
self.extra_args = [[
"-printpriority=1",
"-acceptnonstdtxn=1",
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index c3c6ade684..eea9ee26cb 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -434,17 +434,55 @@ class BlockchainTest(BitcoinTestFramework):
miniwallet.send_self_transfer(fee_rate=fee_per_kb, from_node=node)
blockhash = self.generate(node, 1)[0]
- self.log.info("Test getblock with verbosity 1 doesn't include fee")
- block = node.getblock(blockhash, 1)
- assert 'fee' not in block['tx'][1]
-
- self.log.info('Test getblock with verbosity 2 includes expected fee')
- block = node.getblock(blockhash, 2)
- tx = block['tx'][1]
- assert 'fee' in tx
- assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)
-
- self.log.info("Test getblock with verbosity 2 still works with pruned Undo data")
+ def assert_fee_not_in_block(verbosity):
+ block = node.getblock(blockhash, verbosity)
+ assert 'fee' not in block['tx'][1]
+
+ def assert_fee_in_block(verbosity):
+ block = node.getblock(blockhash, verbosity)
+ tx = block['tx'][1]
+ assert 'fee' in tx
+ assert_equal(tx['fee'], tx['vsize'] * fee_per_byte)
+
+ def assert_vin_contains_prevout(verbosity):
+ block = node.getblock(blockhash, verbosity)
+ tx = block["tx"][1]
+ total_vin = Decimal("0.00000000")
+ total_vout = Decimal("0.00000000")
+ for vin in tx["vin"]:
+ assert "prevout" in vin
+ assert_equal(set(vin["prevout"].keys()), set(("value", "height", "generated", "scriptPubKey")))
+ assert_equal(vin["prevout"]["generated"], True)
+ total_vin += vin["prevout"]["value"]
+ for vout in tx["vout"]:
+ total_vout += vout["value"]
+ assert_equal(total_vin, total_vout + tx["fee"])
+
+ def assert_vin_does_not_contain_prevout(verbosity):
+ block = node.getblock(blockhash, verbosity)
+ tx = block["tx"][1]
+ if isinstance(tx, str):
+ # In verbosity level 1, only the transaction hashes are written
+ pass
+ else:
+ for vin in tx["vin"]:
+ assert "prevout" not in vin
+
+ self.log.info("Test that getblock with verbosity 1 doesn't include fee")
+ assert_fee_not_in_block(1)
+
+ self.log.info('Test that getblock with verbosity 2 and 3 includes expected fee')
+ assert_fee_in_block(2)
+ assert_fee_in_block(3)
+
+ self.log.info("Test that getblock with verbosity 1 and 2 does not include prevout")
+ assert_vin_does_not_contain_prevout(1)
+ assert_vin_does_not_contain_prevout(2)
+
+ self.log.info("Test that getblock with verbosity 3 includes prevout")
+ assert_vin_contains_prevout(3)
+
+ self.log.info("Test that getblock with verbosity 2 and 3 still works with pruned Undo data")
datadir = get_datadir_path(self.options.tmpdir, 0)
self.log.info("Test getblock with invalid verbosity type returns proper error message")
@@ -458,8 +496,10 @@ class BlockchainTest(BitcoinTestFramework):
# Move instead of deleting so we can restore chain state afterwards
move_block_file('rev00000.dat', 'rev_wrong')
- block = node.getblock(blockhash, 2)
- assert 'fee' not in block['tx'][1]
+ assert_fee_not_in_block(2)
+ assert_fee_not_in_block(3)
+ assert_vin_does_not_contain_prevout(2)
+ assert_vin_does_not_contain_prevout(3)
# Restore chain state
move_block_file('rev_wrong', 'rev00000.dat')
diff --git a/test/functional/rpc_misc.py b/test/functional/rpc_misc.py
index ac2a7a309b..e32e562bce 100755
--- a/test/functional/rpc_misc.py
+++ b/test/functional/rpc_misc.py
@@ -57,7 +57,7 @@ class RpcMiscTest(BitcoinTestFramework):
self.log.info("test logging rpc and help")
# Test logging RPC returns the expected number of logging categories.
- assert_equal(len(node.logging()), 26)
+ assert_equal(len(node.logging()), 27)
# Test toggling a logging category on/off/on with the logging RPC.
assert_equal(node.logging()['qt'], True)
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index d87d0cacfd..727ac6aed9 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -628,19 +628,19 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass):
self.sync_all()
def generate(self, generator, *args, **kwargs):
- blocks = generator.generate(*args, **kwargs)
+ blocks = generator.generate(*args, invalid_call=False, **kwargs)
return blocks
def generateblock(self, generator, *args, **kwargs):
- blocks = generator.generateblock(*args, **kwargs)
+ blocks = generator.generateblock(*args, invalid_call=False, **kwargs)
return blocks
def generatetoaddress(self, generator, *args, **kwargs):
- blocks = generator.generatetoaddress(*args, **kwargs)
+ blocks = generator.generatetoaddress(*args, invalid_call=False, **kwargs)
return blocks
def generatetodescriptor(self, generator, *args, **kwargs):
- blocks = generator.generatetodescriptor(*args, **kwargs)
+ blocks = generator.generatetodescriptor(*args, invalid_call=False, **kwargs)
return blocks
def sync_blocks(self, nodes=None, wait=1, timeout=60):
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index f9e2cfa2f5..e8ff41a46d 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -297,9 +297,21 @@ class TestNode():
time.sleep(1.0 / poll_per_s)
self._raise_assertion_error("Unable to retrieve cookie credentials after {}s".format(self.rpc_timeout))
- def generate(self, nblocks, maxtries=1000000):
+ def generate(self, nblocks, maxtries=1000000, **kwargs):
self.log.debug("TestNode.generate() dispatches `generate` call to `generatetoaddress`")
- return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries)
+ return self.generatetoaddress(nblocks=nblocks, address=self.get_deterministic_priv_key().address, maxtries=maxtries, **kwargs)
+
+ def generateblock(self, *args, invalid_call, **kwargs):
+ assert not invalid_call
+ return self.__getattr__('generateblock')(*args, **kwargs)
+
+ def generatetoaddress(self, *args, invalid_call, **kwargs):
+ assert not invalid_call
+ return self.__getattr__('generatetoaddress')(*args, **kwargs)
+
+ def generatetodescriptor(self, *args, invalid_call, **kwargs):
+ assert not invalid_call
+ return self.__getattr__('generatetodescriptor')(*args, **kwargs)
def get_wallet_rpc(self, wallet_name):
if self.use_cli:
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index b0b578e544..81aad20079 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -111,9 +111,9 @@ class MiniWallet:
break
tx.vin[0].scriptSig = CScript([der_sig + bytes(bytearray([SIGHASH_ALL]))])
- def generate(self, num_blocks):
+ def generate(self, num_blocks, **kwargs):
"""Generate blocks with coinbase outputs to the internal address, and append the outputs to the internal list"""
- blocks = self._test_node.generatetodescriptor(num_blocks, self.get_descriptor())
+ blocks = self._test_node.generatetodescriptor(num_blocks, self.get_descriptor(), **kwargs)
for b in blocks:
cb_tx = self._test_node.getblock(blockhash=b, verbosity=2)['tx'][0]
self._utxos.append({'txid': cb_tx['txid'], 'vout': 0, 'value': cb_tx['vout'][0]['value']})
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index c5f08b27f2..b91b294108 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -40,7 +40,7 @@ except UnicodeDecodeError:
CROSS = "x "
CIRCLE = "o "
-if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393):
+if os.name != 'nt' or sys.getwindowsversion() >= (10, 0, 14393): #type:ignore
if os.name == 'nt':
import ctypes
kernel32 = ctypes.windll.kernel32 # type: ignore
@@ -207,6 +207,7 @@ BASE_SCRIPTS = [
'feature_assumevalid.py',
'example_test.py',
'wallet_txn_doublespend.py --legacy-wallet',
+ 'wallet_multisig_descriptor_psbt.py',
'wallet_txn_doublespend.py --descriptors',
'feature_backwards_compatibility.py --legacy-wallet',
'feature_backwards_compatibility.py --descriptors',
diff --git a/test/functional/wallet_abandonconflict.py b/test/functional/wallet_abandonconflict.py
index 70ca5a2fe4..d6766097f6 100755
--- a/test/functional/wallet_abandonconflict.py
+++ b/test/functional/wallet_abandonconflict.py
@@ -120,6 +120,14 @@ class AbandonConflictTest(BitcoinTestFramework):
assert_equal(newbalance, balance + Decimal("30"))
balance = newbalance
+ self.log.info("Check abandoned transactions in listsinceblock")
+ listsinceblock = self.nodes[0].listsinceblock()
+ txAB1_listsinceblock = [d for d in listsinceblock['transactions'] if d['txid'] == txAB1 and d['category'] == 'send']
+ for tx in txAB1_listsinceblock:
+ assert_equal(tx['abandoned'], True)
+ assert_equal(tx['confirmations'], 0)
+ assert_equal(tx['trusted'], False)
+
# Verify that even with a low min relay fee, the tx is not reaccepted from wallet on startup once abandoned
self.restart_node(0, extra_args=["-minrelaytxfee=0.00001"])
assert self.nodes[0].getmempoolinfo()['loaded']
diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py
index 17a4c79da3..4ec44a8a6c 100755
--- a/test/functional/wallet_descriptor.py
+++ b/test/functional/wallet_descriptor.py
@@ -84,7 +84,7 @@ class WalletDescriptorTest(BitcoinTestFramework):
send_wrpc = self.nodes[0].get_wallet_rpc("desc1")
# Generate some coins
- self.generatetoaddress(send_wrpc, COINBASE_MATURITY + 1, send_wrpc.getnewaddress())
+ self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, send_wrpc.getnewaddress())
# Make transactions
self.log.info("Test sending and receiving")
diff --git a/test/functional/wallet_importdescriptors.py b/test/functional/wallet_importdescriptors.py
index d86c3737fe..c8f9664885 100755
--- a/test/functional/wallet_importdescriptors.py
+++ b/test/functional/wallet_importdescriptors.py
@@ -74,7 +74,7 @@ class ImportDescriptorsTest(BitcoinTestFramework):
assert_equal(wpriv.getwalletinfo()['keypoolsize'], 0)
self.log.info('Mining coins')
- self.generatetoaddress(w0, COINBASE_MATURITY + 1, w0.getnewaddress())
+ self.generatetoaddress(self.nodes[0], COINBASE_MATURITY + 1, w0.getnewaddress())
# RPC importdescriptors -----------------------------------------------
@@ -405,7 +405,7 @@ class ImportDescriptorsTest(BitcoinTestFramework):
solvable=True,
ismine=True)
txid = w0.sendtoaddress(address, 49.99995540)
- self.generatetoaddress(w0, 6, w0.getnewaddress())
+ self.generatetoaddress(self.nodes[0], 6, w0.getnewaddress())
self.sync_blocks()
tx = wpriv.createrawtransaction([{"txid": txid, "vout": 0}], {w0.getnewaddress(): 49.999})
signed_tx = wpriv.signrawtransactionwithwallet(tx)
diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py
index c714993234..79235646b0 100755
--- a/test/functional/wallet_keypool.py
+++ b/test/functional/wallet_keypool.py
@@ -138,6 +138,20 @@ class KeyPoolTest(BitcoinTestFramework):
assert_equal(wi['keypoolsize_hd_internal'], 100)
assert_equal(wi['keypoolsize'], 100)
+ if not self.options.descriptors:
+ # Check that newkeypool entirely flushes the keypool
+ start_keypath = nodes[0].getaddressinfo(nodes[0].getnewaddress())['hdkeypath']
+ start_change_keypath = nodes[0].getaddressinfo(nodes[0].getrawchangeaddress())['hdkeypath']
+ # flush keypool and get new addresses
+ nodes[0].newkeypool()
+ end_keypath = nodes[0].getaddressinfo(nodes[0].getnewaddress())['hdkeypath']
+ end_change_keypath = nodes[0].getaddressinfo(nodes[0].getrawchangeaddress())['hdkeypath']
+ # The new keypath index should be 100 more than the old one
+ new_index = int(start_keypath.rsplit('/', 1)[1][:-1]) + 100
+ new_change_index = int(start_change_keypath.rsplit('/', 1)[1][:-1]) + 100
+ assert_equal(end_keypath, "m/0'/0'/" + str(new_index) + "'")
+ assert_equal(end_change_keypath, "m/0'/1'/" + str(new_change_index) + "'")
+
# create a blank wallet
nodes[0].createwallet(wallet_name='w2', blank=True, disable_private_keys=True)
w2 = nodes[0].get_wallet_rpc('w2')
diff --git a/test/functional/wallet_multisig_descriptor_psbt.py b/test/functional/wallet_multisig_descriptor_psbt.py
new file mode 100755
index 0000000000..ed855d2525
--- /dev/null
+++ b/test/functional/wallet_multisig_descriptor_psbt.py
@@ -0,0 +1,163 @@
+#!/usr/bin/env python3
+# Copyright (c) 2021 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test a basic M-of-N multisig setup between multiple people using descriptor wallets and PSBTs, as well as a signing flow.
+
+This is meant to be documentation as much as functional tests, so it is kept as simple and readable as possible.
+"""
+
+from test_framework.address import base58_to_byte
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_approx,
+ assert_equal,
+)
+
+
+class WalletMultisigDescriptorPSBTTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 3
+ self.setup_clean_chain = True
+ self.wallet_names = []
+ self.extra_args = [["-keypool=100"]] * self.num_nodes
+
+ def skip_test_if_missing_module(self):
+ self.skip_if_no_wallet()
+ self.skip_if_no_sqlite()
+
+ @staticmethod
+ def _get_xpub(wallet):
+ """Extract the wallet's xpubs using `listdescriptors` and pick the one from the `pkh` descriptor since it's least likely to be accidentally reused (legacy addresses)."""
+ descriptor = next(filter(lambda d: d["desc"].startswith("pkh"), wallet.listdescriptors()["descriptors"]))
+ return descriptor["desc"].split("]")[-1].split("/")[0]
+
+ @staticmethod
+ def _check_psbt(psbt, to, value, multisig):
+ """Helper function for any of the N participants to check the psbt with decodepsbt and verify it is OK before signing."""
+ tx = multisig.decodepsbt(psbt)["tx"]
+ amount = 0
+ for vout in tx["vout"]:
+ address = vout["scriptPubKey"]["address"]
+ assert_equal(multisig.getaddressinfo(address)["ischange"], address != to)
+ if address == to:
+ amount += vout["value"]
+ assert_approx(amount, float(value), vspan=0.001)
+
+ def participants_create_multisigs(self, xpubs):
+ """The multisig is created by importing the following descriptors. The resulting wallet is watch-only and every participant can do this."""
+ # some simple validation
+ assert_equal(len(xpubs), self.N)
+ # a sanity-check/assertion, this will throw if the base58 checksum of any of the provided xpubs are invalid
+ for xpub in xpubs:
+ base58_to_byte(xpub)
+
+ for i, node in enumerate(self.nodes):
+ node.createwallet(wallet_name=f"{self.name}_{i}", blank=True, descriptors=True, disable_private_keys=True)
+ multisig = node.get_wallet_rpc(f"{self.name}_{i}")
+ external = multisig.getdescriptorinfo(f"wsh(sortedmulti({self.M},{f'/0/*,'.join(xpubs)}/0/*))")
+ internal = multisig.getdescriptorinfo(f"wsh(sortedmulti({self.M},{f'/1/*,'.join(xpubs)}/1/*))")
+ result = multisig.importdescriptors([
+ { # receiving addresses (internal: False)
+ "desc": external["descriptor"],
+ "active": True,
+ "internal": False,
+ "timestamp": "now",
+ },
+ { # change addresses (internal: True)
+ "desc": internal["descriptor"],
+ "active": True,
+ "internal": True,
+ "timestamp": "now",
+ },
+ ])
+ assert all(r["success"] for r in result)
+ yield multisig
+
+ def run_test(self):
+ self.M = 2
+ self.N = self.num_nodes
+ self.name = f"{self.M}_of_{self.N}_multisig"
+ self.log.info(f"Testing {self.name}...")
+
+ participants = {
+ # Every participant generates an xpub. The most straightforward way is to create a new descriptor wallet.
+ # This wallet will be the participant's `signer` for the resulting multisig. Avoid reusing this wallet for any other purpose (for privacy reasons).
+ "signers": [node.get_wallet_rpc(node.createwallet(wallet_name=f"participant_{self.nodes.index(node)}", descriptors=True)["name"]) for node in self.nodes],
+ # After participants generate and exchange their xpubs they will each create their own watch-only multisig.
+ # Note: these multisigs are all the same, this justs highlights that each participant can independently verify everything on their own node.
+ "multisigs": []
+ }
+
+ self.log.info("Generate and exchange xpubs...")
+ xpubs = [self._get_xpub(signer) for signer in participants["signers"]]
+
+ self.log.info("Every participant imports the following descriptors to create the watch-only multisig...")
+ participants["multisigs"] = list(self.participants_create_multisigs(xpubs))
+
+ self.log.info("Check that every participant's multisig generates the same addresses...")
+ for _ in range(10): # we check that the first 10 generated addresses are the same for all participant's multisigs
+ receive_addresses = [multisig.getnewaddress() for multisig in participants["multisigs"]]
+ all(address == receive_addresses[0] for address in receive_addresses)
+ change_addresses = [multisig.getrawchangeaddress() for multisig in participants["multisigs"]]
+ all(address == change_addresses[0] for address in change_addresses)
+
+ self.log.info("Get a mature utxo to send to the multisig...")
+ coordinator_wallet = participants["signers"][0]
+ self.generatetoaddress(self.nodes[0], 101, coordinator_wallet.getnewaddress())
+
+ deposit_amount = 6.15
+ multisig_receiving_address = participants["multisigs"][0].getnewaddress()
+ self.log.info("Send funds to the resulting multisig receiving address...")
+ coordinator_wallet.sendtoaddress(multisig_receiving_address, deposit_amount)
+ self.generate(self.nodes[0], 1)
+ self.sync_all()
+ for participant in participants["multisigs"]:
+ assert_approx(participant.getbalance(), deposit_amount, vspan=0.001)
+
+ self.log.info("Send a transaction from the multisig!")
+ to = participants["signers"][self.N - 1].getnewaddress()
+ value = 1
+ self.log.info("First, make a sending transaction, created using `walletcreatefundedpsbt` (anyone can initiate this)...")
+ psbt = participants["multisigs"][0].walletcreatefundedpsbt(inputs=[], outputs={to: value}, options={"feeRate": 0.00010})
+
+ psbts = []
+ self.log.info("Now at least M users check the psbt with decodepsbt and (if OK) signs it with walletprocesspsbt...")
+ for m in range(self.M):
+ signers_multisig = participants["multisigs"][m]
+ self._check_psbt(psbt["psbt"], to, value, signers_multisig)
+ signing_wallet = participants["signers"][m]
+ partially_signed_psbt = signing_wallet.walletprocesspsbt(psbt["psbt"])
+ psbts.append(partially_signed_psbt["psbt"])
+
+ self.log.info("Finally, collect the signed PSBTs with combinepsbt, finalizepsbt, then broadcast the resulting transaction...")
+ combined = coordinator_wallet.combinepsbt(psbts)
+ finalized = coordinator_wallet.finalizepsbt(combined)
+ coordinator_wallet.sendrawtransaction(finalized["hex"])
+
+ self.log.info("Check that balances are correct after the transaction has been included in a block.")
+ self.generate(self.nodes[0], 1)
+ self.sync_all()
+ assert_approx(participants["multisigs"][0].getbalance(), deposit_amount - value, vspan=0.001)
+ assert_equal(participants["signers"][self.N - 1].getbalance(), value)
+
+ self.log.info("Send another transaction from the multisig, this time with a daisy chained signing flow (one after another in series)!")
+ psbt = participants["multisigs"][0].walletcreatefundedpsbt(inputs=[], outputs={to: value}, options={"feeRate": 0.00010})
+ for m in range(self.M):
+ signers_multisig = participants["multisigs"][m]
+ self._check_psbt(psbt["psbt"], to, value, signers_multisig)
+ signing_wallet = participants["signers"][m]
+ psbt = signing_wallet.walletprocesspsbt(psbt["psbt"])
+ assert_equal(psbt["complete"], m == self.M - 1)
+ finalized = coordinator_wallet.finalizepsbt(psbt["psbt"])
+ coordinator_wallet.sendrawtransaction(finalized["hex"])
+
+ self.log.info("Check that balances are correct after the transaction has been included in a block.")
+ self.generate(self.nodes[0], 1)
+ self.sync_all()
+ assert_approx(participants["multisigs"][0].getbalance(), deposit_amount - (value * 2), vspan=0.001)
+ assert_equal(participants["signers"][self.N - 1].getbalance(), value * 2)
+
+
+if __name__ == "__main__":
+ WalletMultisigDescriptorPSBTTest().main()
diff --git a/test/functional/wallet_transactiontime_rescan.py b/test/functional/wallet_transactiontime_rescan.py
index 78859e6131..afa5139da7 100755
--- a/test/functional/wallet_transactiontime_rescan.py
+++ b/test/functional/wallet_transactiontime_rescan.py
@@ -63,7 +63,7 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
# generate some btc to create transactions and check blockcount
initial_mine = COINBASE_MATURITY + 1
- minernode.generatetoaddress(initial_mine, m1)
+ self.generatetoaddress(minernode, initial_mine, m1)
assert_equal(minernode.getblockcount(), initial_mine + 200)
# synchronize nodes and time
@@ -76,7 +76,7 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
miner_wallet.sendtoaddress(wo1, 10)
# generate blocks and check blockcount
- minernode.generatetoaddress(COINBASE_MATURITY, m1)
+ self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 300)
# synchronize nodes and time
@@ -89,7 +89,7 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
miner_wallet.sendtoaddress(wo2, 5)
# generate blocks and check blockcount
- minernode.generatetoaddress(COINBASE_MATURITY, m1)
+ self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 400)
# synchronize nodes and time
@@ -102,7 +102,7 @@ class TransactionTimeRescanTest(BitcoinTestFramework):
miner_wallet.sendtoaddress(wo3, 1)
# generate more blocks and check blockcount
- minernode.generatetoaddress(COINBASE_MATURITY, m1)
+ self.generatetoaddress(minernode, COINBASE_MATURITY, m1)
assert_equal(minernode.getblockcount(), initial_mine + 500)
self.log.info('Check user\'s final balance and transaction count')
diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py
index ed98db55c9..5800880830 100755
--- a/test/functional/wallet_upgradewallet.py
+++ b/test/functional/wallet_upgradewallet.py
@@ -234,18 +234,13 @@ class UpgradeWalletTest(BitcoinTestFramework):
assert_equal(1, hd_chain_version)
seed_id = bytearray(seed_id)
seed_id.reverse()
- old_kvs = new_kvs
- # First 2 keys should still be non-HD
- for i in range(0, 2):
- info = wallet.getaddressinfo(wallet.getnewaddress())
- assert 'hdkeypath' not in info
- assert 'hdseedid' not in info
- # Next key should be HD
+
+ # New keys (including change) should be HD (the two old keys have been flushed)
info = wallet.getaddressinfo(wallet.getnewaddress())
assert_equal(seed_id.hex(), info['hdseedid'])
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
prev_seed_id = info['hdseedid']
- # Change key should be the same keypool
+ # Change key should be HD and from the same keypool
info = wallet.getaddressinfo(wallet.getrawchangeaddress())
assert_equal(prev_seed_id, info['hdseedid'])
assert_equal('m/0\'/0\'/1\'', info['hdkeypath'])
@@ -291,14 +286,7 @@ class UpgradeWalletTest(BitcoinTestFramework):
hd_chain_version, external_counter, seed_id, internal_counter = struct.unpack('<iI20sI', hd_chain)
assert_equal(2, hd_chain_version)
assert_equal(2, internal_counter)
- # Drain the keypool by fetching one external key and one change key. Should still be the same keypool
- info = wallet.getaddressinfo(wallet.getnewaddress())
- assert 'hdseedid' not in info
- assert 'hdkeypath' not in info
- info = wallet.getaddressinfo(wallet.getrawchangeaddress())
- assert 'hdseedid' not in info
- assert 'hdkeypath' not in info
- # The next addresses are HD and should be on different HD chains
+ # The next addresses are HD and should be on different HD chains (the one remaining key in each pool should have been flushed)
info = wallet.getaddressinfo(wallet.getnewaddress())
ext_id = info['hdseedid']
assert_equal('m/0\'/0\'/0\'', info['hdkeypath'])
diff --git a/test/lint/lint-locale-dependence.sh b/test/lint/lint-locale-dependence.sh
index 712c4f0d5b..b119cffec8 100755
--- a/test/lint/lint-locale-dependence.sh
+++ b/test/lint/lint-locale-dependence.sh
@@ -38,12 +38,10 @@ export LC_ALL=C
# https://stackoverflow.com/a/34878283 for more details.
# TODO: Reduce KNOWN_VIOLATIONS by replacing uses of locale dependent stoul/strtol with locale
-# independent ToIntegral<T>(...).
+# independent ToIntegral<T>(...) or the ParseInt*() functions.
# TODO: Reduce KNOWN_VIOLATIONS by replacing uses of locale dependent snprintf with strprintf.
KNOWN_VIOLATIONS=(
- "src/bitcoin-tx.cpp.*stoul"
"src/dbwrapper.cpp:.*vsnprintf"
- "src/rest.cpp:.*strtol"
"src/test/dbwrapper_tests.cpp:.*snprintf"
"src/test/fuzz/locale.cpp"
"src/test/fuzz/string.cpp"
diff --git a/test/lint/lint-python.sh b/test/lint/lint-python.sh
index c448fa6f9a..3d22407fd1 100755
--- a/test/lint/lint-python.sh
+++ b/test/lint/lint-python.sh
@@ -102,7 +102,7 @@ if ! PYTHONWARNINGS="ignore" flake8 --ignore=B,C,E,F,I,N,W --select=$(IFS=","; e
EXIT_CODE=1
fi
-if ! mypy --ignore-missing-imports --show-error-codes $(git ls-files "test/functional/*.py" "contrib/devtools/*.py"); then
+if ! mypy --show-error-codes $(git ls-files "test/functional/*.py" "contrib/devtools/*.py"); then
EXIT_CODE=1
fi
diff --git a/test/util/data/bitcoin-util-test.json b/test/util/data/bitcoin-util-test.json
index a648c0287a..cca5732aa1 100644
--- a/test/util/data/bitcoin-util-test.json
+++ b/test/util/data/bitcoin-util-test.json
@@ -295,6 +295,12 @@
"description": "Create a new transaction with a single output script (OP_DROP) in a P2SH, wrapped in a P2SH (output as json)"
},
{ "exec": "./bitcoin-tx",
+ "args": ["-create", "outscript=0:999999999999999999999999999999"],
+ "return_code": 1,
+ "error_txt": "error: script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF",
+ "description": "Try to parse an output script with a decimal number above the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
"args": ["-create", "outscript=0:9999999999"],
"return_code": 1,
"error_txt": "error: script parse error: decimal numeric value only allowed in the range -0xFFFFFFFF...0xFFFFFFFF",
@@ -512,6 +518,30 @@
{ "exec": "./bitcoin-tx",
"args":
["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:11aa"],
+ "return_code": 1,
+ "error_txt": "error: invalid TX sequence id '11aa'",
+ "description": "Try to parse a sequence number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:-1"],
+ "return_code": 1,
+ "error_txt": "error: invalid TX sequence id '-1'",
+ "description": "Try to parse a sequence number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967296"],
+ "return_code": 1,
+ "error_txt": "error: invalid TX sequence id '4294967296'",
+ "description": "Try to parse a sequence number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
"in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967293",
"outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"],
"output_cmp": "txcreatedata_seq0.hex",
@@ -519,6 +549,14 @@
},
{ "exec": "./bitcoin-tx",
"args":
+ ["-create",
+ "in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0: 4294967293 ",
+ "outaddr=0.18:13tuJJDR2RgArmgfv6JScSdreahzgc4T6o"],
+ "output_cmp": "txcreatedata_seq0.hex",
+ "description": "Creates a new transaction with one input with sequence number (+whitespace) and one address output"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
["-json",
"-create",
"in=5897de6bd6027a475eadd57019d4e6872c396d0716c4875a5f1a6fcfdf385c1f:0:4294967293",
@@ -542,14 +580,26 @@
"description": "Adds a new input with sequence number to a transaction (output in json)"
},
{ "exec": "./bitcoin-tx",
+ "args": ["-create", "outmultisig=1:-2:3:02a5:021:02df", "nversion=1"],
+ "return_code": 1,
+ "error_txt": "error: invalid multisig required number '-2'",
+ "description": "Try to parse a multisig number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
+ "args": ["-create", "outmultisig=1:2:3a:02a5:021:02df", "nversion=1"],
+ "return_code": 1,
+ "error_txt": "error: invalid multisig total number '3a'",
+ "description": "Try to parse a multisig number outside the allowed range"
+ },
+ { "exec": "./bitcoin-tx",
"args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"],
"output_cmp": "txcreatemultisig1.hex",
"description": "Creates a new transaction with a single 2-of-3 multisig output"
},
{ "exec": "./bitcoin-tx",
- "args": ["-json", "-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"],
+ "args": ["-json", "-create", "outmultisig=1: 2 : 3 :02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485", "nversion=1"],
"output_cmp": "txcreatemultisig1.json",
- "description": "Creates a new transaction with a single 2-of-3 multisig output (output in json)"
+ "description": "Creates a new transaction with a single 2-of-3 multisig output (with whitespace, output in json)"
},
{ "exec": "./bitcoin-tx",
"args": ["-create", "outmultisig=1:2:3:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397:021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d:02df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb485:S", "nversion=1"],