diff options
184 files changed, 2110 insertions, 1170 deletions
diff --git a/.gitignore b/.gitignore index 3e5d284aa3..1173edfaa7 100644 --- a/.gitignore +++ b/.gitignore @@ -119,7 +119,9 @@ releases /*.info test_bitcoin.coverage/ total.coverage/ +fuzz.coverage/ coverage_percent.txt +/cov_tool_wrapper.sh #build tests linux-coverage-build diff --git a/.travis.yml b/.travis.yml index f9932cfaca..f1cee7133f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -80,7 +80,7 @@ jobs: QEMU_USER_CMD="" - stage: test - name: 'Win64 [GOAL: deploy] [unit tests, no gui, no functional tests]' + name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]' env: >- FILE_ENV="./ci/test/00_setup_env_win64.sh" diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 11a0f4bac7..2e11474382 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -158,7 +158,7 @@ the pull request affects. Valid areas as: Examples: consensus: Add new opcode for BIP-XXXX OP_CHECKAWESOMESIG - net: Automatically create hidden service, listen on Tor + net: Automatically create onion service, listen on Tor qt: Add feed bump button log: Fix typo in log message diff --git a/Makefile.am b/Makefile.am index 75a164f49e..1d6358b1d5 100644 --- a/Makefile.am +++ b/Makefile.am @@ -65,10 +65,10 @@ OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_FANCY_PLIST) $(OSX_INSTALLER_ICONS) \ $(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \ $(top_srcdir)/contrib/macdeploy/detached-sig-create.sh -COVERAGE_INFO = baseline.info \ +COVERAGE_INFO = $(COV_TOOL_WRAPPER) baseline.info \ test_bitcoin_filtered.info total_coverage.info \ baseline_filtered.info functional_test.info functional_test_filtered.info \ - test_bitcoin_coverage.info test_bitcoin.info fuzz.info fuzz_coverage.info + test_bitcoin_coverage.info test_bitcoin.info fuzz.info fuzz_filtered.info fuzz_coverage.info dist-hook: -$(GIT) archive --format=tar HEAD -- src/clientversion.cpp | $(AMTAR) -C $(top_distdir) -xf - @@ -192,7 +192,11 @@ LCOV_FILTER_PATTERN = \ -p "src/secp256k1" \ -p "depends" -baseline.info: +$(COV_TOOL_WRAPPER): + @echo 'exec $(COV_TOOL) "$$@"' > $(COV_TOOL_WRAPPER) + @chmod +x $(COV_TOOL_WRAPPER) + +baseline.info: $(COV_TOOL_WRAPPER) $(LCOV) -c -i -d $(abs_builddir)/src -o $@ baseline_filtered.info: baseline.info diff --git a/build-aux/m4/ax_boost_process.m4 b/build-aux/m4/ax_boost_process.m4 new file mode 100644 index 0000000000..5d20e67464 --- /dev/null +++ b/build-aux/m4/ax_boost_process.m4 @@ -0,0 +1,121 @@ +# =========================================================================== +# https://www.gnu.org/software/autoconf-archive/ax_boost_process.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_BOOST_PROCESS +# +# DESCRIPTION +# +# Test for Process library from the Boost C++ libraries. The macro +# requires a preceding call to AX_BOOST_BASE. Further documentation is +# available at <http://randspringer.de/boost/index.html>. +# +# This macro calls: +# +# AC_SUBST(BOOST_PROCESS_LIB) +# +# And sets: +# +# HAVE_BOOST_PROCESS +# +# LICENSE +# +# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de> +# Copyright (c) 2008 Michael Tindal +# Copyright (c) 2008 Daniel Casimiro <dan.casimiro@gmail.com> +# +# Copying and distribution of this file, with or without modification, are +# permitted in any medium without royalty provided the copyright notice +# and this notice are preserved. This file is offered as-is, without any +# warranty. + +#serial 2 + +AC_DEFUN([AX_BOOST_PROCESS], +[ + AC_ARG_WITH([boost-process], + AS_HELP_STRING([--with-boost-process@<:@=special-lib@:>@], + [use the Process library from boost - it is possible to specify a certain library for the linker + e.g. --with-boost-process=boost_process-gcc-mt ]), + [ + if test "$withval" = "no"; then + want_boost_process="no" + elif test "$withval" = "yes"; then + want_boost_process="yes" + ax_boost_user_process_lib="" + else + want_boost_process="yes" + ax_boost_user_process_lib="$withval" + fi + ], + [want_boost_process="yes"] + ) + + if test "x$want_boost_process" = "xyes"; then + AC_REQUIRE([AC_PROG_CC]) + AC_REQUIRE([AC_CANONICAL_BUILD]) + CPPFLAGS_SAVED="$CPPFLAGS" + CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" + export CPPFLAGS + + LDFLAGS_SAVED="$LDFLAGS" + LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" + export LDFLAGS + + AC_CACHE_CHECK(whether the Boost::Process library is available, + ax_cv_boost_process, + [AC_LANG_PUSH([C++]) + CXXFLAGS_SAVE=$CXXFLAGS + CXXFLAGS= + + AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/process.hpp>]], + [[boost::process::child* child = new boost::process::child; delete child;]])], + ax_cv_boost_process=yes, ax_cv_boost_process=no) + CXXFLAGS=$CXXFLAGS_SAVE + AC_LANG_POP([C++]) + ]) + if test "x$ax_cv_boost_process" = "xyes"; then + AC_SUBST(BOOST_CPPFLAGS) + + AC_DEFINE(HAVE_BOOST_PROCESS,,[define if the Boost::Process library is available]) + BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` + + LDFLAGS_SAVE=$LDFLAGS + if test "x$ax_boost_user_process_lib" = "x"; then + for libextension in `ls -r $BOOSTLIBDIR/libboost_process* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do + ax_lib=${libextension} + AC_CHECK_LIB($ax_lib, exit, + [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], + [link_process="no"]) + done + if test "x$link_process" != "xyes"; then + for libextension in `ls -r $BOOSTLIBDIR/boost_process* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do + ax_lib=${libextension} + AC_CHECK_LIB($ax_lib, exit, + [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], + [link_process="no"]) + done + fi + + else + for ax_lib in $ax_boost_user_process_lib boost_process-$ax_boost_user_process_lib; do + AC_CHECK_LIB($ax_lib, exit, + [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], + [link_process="no"]) + done + + fi + if test "x$ax_lib" = "x"; then + AC_MSG_ERROR(Could not find a version of the Boost::Process library!) + fi + if test "x$link_process" = "xno"; then + AC_MSG_ERROR(Could not link against $ax_lib !) + fi + fi + + CPPFLAGS="$CPPFLAGS_SAVED" + LDFLAGS="$LDFLAGS_SAVED" + fi +]) diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h index fbbe1a2156..9d0b50a0b4 100644 --- a/build_msvc/bitcoin_config.h +++ b/build_msvc/bitcoin_config.h @@ -47,6 +47,9 @@ /* define if the Boost::Filesystem library is available */ #define HAVE_BOOST_FILESYSTEM /**/ +/* define if the Boost::Process library is available */ +#define HAVE_BOOST_PROCESS /**/ + /* define if the Boost::System library is available */ #define HAVE_BOOST_SYSTEM /**/ diff --git a/build_msvc/vcpkg-packages.txt b/build_msvc/vcpkg-packages.txt index 307f295f08..edce8576c3 100644 --- a/build_msvc/vcpkg-packages.txt +++ b/build_msvc/vcpkg-packages.txt @@ -1 +1 @@ -berkeleydb boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion
\ No newline at end of file +berkeleydb boost-filesystem boost-multi-index boost-process boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh index b70a581532..2e445c126d 100644 --- a/ci/test/00_setup_env_arm.sh +++ b/ci/test/00_setup_env_arm.sh @@ -25,4 +25,4 @@ export RUN_FUNCTIONAL_TESTS=true export GOAL="install" # -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1" # This could be removed once the ABI change warning does not show up by default -export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror" +export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror --with-boost-process" diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh index 5688799f9e..e58003ab19 100644 --- a/ci/test/00_setup_env_i686_centos.sh +++ b/ci/test/00_setup_env_i686_centos.sh @@ -11,5 +11,5 @@ export CONTAINER_NAME=ci_i686_centos_7 export DOCKER_NAME_TAG=centos:7 export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python36-zmq which patch lbzip2 dash" export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports" +export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports --with-boost-process" export CONFIG_SHELL="/bin/dash" diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh index 7ec66eeb4f..b62f1603f4 100644 --- a/ci/test/00_setup_env_mac.sh +++ b/ci/test/00_setup_env_mac.sh @@ -14,4 +14,4 @@ export XCODE_BUILD_ID=11C505 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" -export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror" +export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process" diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh index b8a9ccaae5..5fb127b762 100644 --- a/ci/test/00_setup_env_mac_host.sh +++ b/ci/test/00_setup_env_mac_host.sh @@ -10,7 +10,7 @@ export HOST=x86_64-apple-darwin16 export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well) export PIP_PACKAGES="zmq" export GOAL="install" -export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror" +export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process" export NO_DEPENDS=1 export OSX_SDK="" export CCACHE_SIZE=300M diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index d57c673069..5995964f17 100644 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -12,4 +12,4 @@ export DOCKER_NAME_TAG=ubuntu:20.04 export NO_DEPENDS=1 export TEST_RUNNER_EXTRA="--timeout-factor=4" # Increase timeout because sanitizers slow down export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++" +export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++ --with-boost-process" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index 31f437f0e8..a32de4a6b5 100644 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -14,5 +14,5 @@ export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export RUN_FUZZ_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++" +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++ --with-boost-process" export CCACHE_SIZE=200M diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh index 786f0f927f..522a5d9fc2 100644 --- a/ci/test/00_setup_env_native_multiprocess.sh +++ b/ci/test/00_setup_env_native_multiprocess.sh @@ -11,5 +11,5 @@ export DOCKER_NAME_TAG=ubuntu:20.04 export PACKAGES="cmake python3" export DEP_OPTS="MULTIPROCESS=1" export GOAL="install" -export BITCOIN_CONFIG="" +export BITCOIN_CONFIG="--with-boost-process" export TEST_RUNNER_ENV="BITCOIND=bitcoin-node" diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh index 1a0b14b62b..0a09bfe230 100644 --- a/ci/test/00_setup_env_native_nowallet.sh +++ b/ci/test/00_setup_env_native_nowallet.sh @@ -11,4 +11,4 @@ export DOCKER_NAME_TAG=ubuntu:16.04 # Use xenial to have one config run the tes export PACKAGES="python3-zmq clang-3.8 llvm-3.8" # Use clang-3.8 to test C++11 compatibility, see doc/dependencies.md export DEP_OPTS="NO_WALLET=1" export GOAL="install" -export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8" +export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8 --with-boost-process" diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh index 6e2ff729a2..f9d869b4fd 100644 --- a/ci/test/00_setup_env_native_qt5.sh +++ b/ci/test/00_setup_env_native_qt5.sh @@ -16,4 +16,4 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true" export RUN_UNIT_TESTS="false" export GOAL="install" export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.1 v0.18.1 v0.19.1" -export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\"" +export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index 5695c43ec3..fc18483425 100644 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -12,4 +12,4 @@ export PACKAGES="clang llvm libc++abi-dev libc++-dev python3-zmq" export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'" export TEST_RUNNER_EXTRA="--exclude feature_block --timeout-factor=4" # Increase timeout because sanitizers slow down. Low memory on Travis machines, exclude feature_block. export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++'" +export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++' --with-boost-process" diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh index c180d023de..fe330920d0 100644 --- a/ci/test/00_setup_env_s390x.sh +++ b/ci/test/00_setup_env_s390x.sh @@ -22,4 +22,4 @@ export DOCKER_NAME_TAG="debian:buster" export RUN_UNIT_TESTS=true export RUN_FUNCTIONAL_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb" +export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb --with-boost-process" diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index eb8b870dd6..2b351dff6d 100644 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -13,4 +13,4 @@ export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64" export RUN_FUNCTIONAL_TESTS=false export RUN_SECURITY_TESTS="true" export GOAL="deploy" -export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests" +export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --without-boost-process" diff --git a/configure.ac b/configure.ac index f11f2b2059..2acd702600 100644 --- a/configure.ac +++ b/configure.ac @@ -105,6 +105,7 @@ AC_PATH_TOOL(AR, ar) AC_PATH_TOOL(RANLIB, ranlib) AC_PATH_TOOL(STRIP, strip) AC_PATH_TOOL(GCOV, gcov) +AC_PATH_TOOL(LLVM_COV, llvm-cov) AC_PATH_PROG(LCOV, lcov) dnl Python 3.5 is specified in .python-version and should be used if available, see doc/dependencies.md AC_PATH_PROGS([PYTHON], [python3.5 python3.6 python3.7 python3.8 python3 python]) @@ -680,16 +681,37 @@ if test x$use_lcov = xyes; then if test x$LCOV = x; then AC_MSG_ERROR("lcov testing requested but lcov not found") fi - if test x$GCOV = x; then - AC_MSG_ERROR("lcov testing requested but gcov not found") - fi if test x$PYTHON = x; then AC_MSG_ERROR("lcov testing requested but python not found") fi if test x$GENHTML = x; then AC_MSG_ERROR("lcov testing requested but genhtml not found") fi - LCOV="$LCOV --gcov-tool=$GCOV" + + AC_MSG_CHECKING([whether compiler is Clang]) + AC_PREPROC_IFELSE([AC_LANG_SOURCE([[ + #if defined(__clang__) && defined(__llvm__) + // Compiler is Clang + #else + # error Compiler is not Clang + #endif + ]])],[ + AC_MSG_RESULT([yes]) + if test x$LLVM_COV = x; then + AC_MSG_ERROR([lcov testing requested but llvm-cov not found]) + fi + COV_TOOL="$LLVM_COV gcov" + ],[ + AC_MSG_RESULT([no]) + if test x$GCOV = x; then + AC_MSG_ERROR([lcov testing requested but gcov not found]) + fi + COV_TOOL="$GCOV" + ]) + AC_SUBST(COV_TOOL) + AC_SUBST(COV_TOOL_WRAPPER, "cov_tool_wrapper.sh") + LCOV="$LCOV --gcov-tool $(pwd)/$COV_TOOL_WRAPPER" + AX_CHECK_LINK_FLAG([[--coverage]], [LDFLAGS="$LDFLAGS --coverage"], [AC_MSG_ERROR("lcov testing requested but --coverage linker flag does not work")]) AX_CHECK_COMPILE_FLAG([--coverage],[CXXFLAGS="$CXXFLAGS --coverage"], @@ -1180,9 +1202,9 @@ fi if test x$use_boost = xyes; then dnl Minimum required Boost version -define(MINIMUM_REQUIRED_BOOST, 1.47.0) +define(MINIMUM_REQUIRED_BOOST, 1.58.0) -dnl Check for boost libs +dnl Check for Boost libs AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST]) if test x$want_boost = xno; then AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]]) @@ -1191,30 +1213,15 @@ AX_BOOST_SYSTEM AX_BOOST_FILESYSTEM AX_BOOST_THREAD +dnl Opt-in to boost-process +AS_IF([ test x$with_boost_process != x ], [ AX_BOOST_PROCESS ], [ ax_cv_boost_process=no ] ) + dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic dnl counter implementations. In 1.63 and later the std::atomic approach is default. m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS" -if test x$use_reduce_exports = xyes; then - AC_MSG_CHECKING([for working boost reduced exports]) - TEMP_CPPFLAGS="$CPPFLAGS" - CPPFLAGS="$BOOST_CPPFLAGS $CPPFLAGS" - AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[ - @%:@include <boost/version.hpp> - ]], [[ - #if BOOST_VERSION >= 104900 - // Everything is okay - #else - # error Boost version is too old - #endif - ]])],[ - AC_MSG_RESULT(yes) - ],[ - AC_MSG_ERROR([boost versions < 1.49 are known to be broken with reduced exports. Use --disable-reduce-exports.]) - ]) - CPPFLAGS="$TEMP_CPPFLAGS" -fi +BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB" fi if test x$use_reduce_exports = xyes; then @@ -1228,7 +1235,6 @@ if test x$use_tests = xyes; then AC_MSG_ERROR(hexdump is required for tests) fi - if test x$use_boost = xyes; then AX_BOOST_UNIT_TEST_FRAMEWORK @@ -1254,48 +1260,6 @@ if test x$use_tests = xyes; then fi fi -if test x$use_boost = xyes; then - -BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB" - - -dnl If boost (prior to 1.57) was built without c++11, it emulated scoped enums -dnl using c++98 constructs. Unfortunately, this implementation detail leaked into -dnl the abi. This was fixed in 1.57. - -dnl When building against that installed version using c++11, the headers pick up -dnl on the native c++11 scoped enum support and enable it, however it will fail to -dnl link. This can be worked around by disabling c++11 scoped enums if linking will -dnl fail. -dnl BOOST_NO_SCOPED_ENUMS was changed to BOOST_NO_CXX11_SCOPED_ENUMS in 1.51. - -TEMP_LIBS="$LIBS" -LIBS="$BOOST_LIBS $LIBS" -TEMP_CPPFLAGS="$CPPFLAGS" -CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" -AC_MSG_CHECKING([for mismatched boost c++11 scoped enums]) -AC_LINK_IFELSE([AC_LANG_PROGRAM([[ - #include <boost/config.hpp> - #include <boost/version.hpp> - #if !defined(BOOST_NO_SCOPED_ENUMS) && !defined(BOOST_NO_CXX11_SCOPED_ENUMS) && BOOST_VERSION < 105700 - #define BOOST_NO_SCOPED_ENUMS - #define BOOST_NO_CXX11_SCOPED_ENUMS - #define CHECK - #endif - #include <boost/filesystem.hpp> - ]],[[ - #if defined(CHECK) - boost::filesystem::copy_file("foo", "bar"); - #else - choke; - #endif - ]])], - [AC_MSG_RESULT(mismatched); BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_SCOPED_ENUMS -DBOOST_NO_CXX11_SCOPED_ENUMS"], [AC_MSG_RESULT(ok)]) -LIBS="$TEMP_LIBS" -CPPFLAGS="$TEMP_CPPFLAGS" - -fi - dnl libevent check if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then @@ -1683,6 +1647,7 @@ esac echo echo "Options used to compile and link:" +echo " boost process = $ax_cv_boost_process" echo " multiprocess = $build_multiprocess" echo " with wallet = $enable_wallet" echo " with gui / qt = $bitcoin_enable_qt" diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml index 0ed1e16f7e..e86ff83798 100644 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ b/contrib/gitian-descriptors/gitian-linux.yml @@ -7,31 +7,29 @@ suites: architectures: - "amd64" packages: -- "curl" -- "g++-aarch64-linux-gnu" -- "g++-8-aarch64-linux-gnu" -- "gcc-8-aarch64-linux-gnu" -- "binutils-aarch64-linux-gnu" -- "g++-arm-linux-gnueabihf" -- "g++-8-arm-linux-gnueabihf" -- "gcc-8-arm-linux-gnueabihf" -- "binutils-arm-linux-gnueabihf" -- "g++-riscv64-linux-gnu" -- "g++-8-riscv64-linux-gnu" -- "gcc-8-riscv64-linux-gnu" -- "binutils-riscv64-linux-gnu" -- "g++-8-multilib" -- "gcc-8-multilib" -- "binutils-gold" -- "git" -- "pkg-config" +# Common dependencies. - "autoconf" -- "libtool" - "automake" -- "faketime" +- "binutils" - "bsdmainutils" - "ca-certificates" +- "curl" +- "faketime" +- "git" +- "libtool" +- "patch" +- "pkg-config" - "python3" +# Cross compilation HOSTS: +# - arm-linux-gnueabihf +- "binutils-arm-linux-gnueabihf" +- "g++-8-arm-linux-gnueabihf" +# - aarch64-linux-gnu +- "binutils-aarch64-linux-gnu" +- "g++-8-aarch64-linux-gnu" +# - riscv64-linux-gnu +- "binutils-riscv64-linux-gnu" +- "g++-8-riscv64-linux-gnu" remotes: - "url": "https://github.com/bitcoin/bitcoin.git" "dir": "bitcoin" @@ -93,45 +91,11 @@ script: | create_per-host_faketime_wrappers "2000-01-01 12:00:00" export PATH=${WRAP_DIR}:${PATH} - EXTRA_INCLUDES_BASE=$WRAP_DIR/extra_includes - mkdir -p $EXTRA_INCLUDES_BASE - - # x86 needs /usr/include/i386-linux-gnu/asm pointed to /usr/include/x86_64-linux-gnu/asm, - # but we can't write there. Instead, create a link here and force it to be included in the - # search paths by wrapping gcc/g++. - - mkdir -p $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu - rm -f $WRAP_DIR/extra_includes/i686-pc-linux-gnu/asm - ln -s /usr/include/x86_64-linux-gnu/asm $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu/asm - - for prog in gcc g++; do - rm -f ${WRAP_DIR}/${prog} - cat << EOF > ${WRAP_DIR}/${prog} - #!/usr/bin/env bash - REAL="$(which -a ${prog}-8 | grep -v ${WRAP_DIR}/${prog} | head -1)" - for var in "\$@" - do - if [ "\$var" = "-m32" ]; then - export C_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu" - export CPLUS_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu" - break - fi - done - \$REAL \$@ - EOF - chmod +x ${WRAP_DIR}/${prog} - done - cd bitcoin BASEPREFIX="${PWD}/depends" # Build dependencies for each host for i in $HOSTS; do - EXTRA_INCLUDES="$EXTRA_INCLUDES_BASE/$i" - if [ -d "$EXTRA_INCLUDES" ]; then - export HOST_ID_SALT="$EXTRA_INCLUDES" - fi make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" - unset HOST_ID_SALT done # Faketime for binaries diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp index d2652119b4..ece02dc24e 100644 --- a/contrib/valgrind.supp +++ b/contrib/valgrind.supp @@ -123,7 +123,6 @@ Memcheck:Cond ... fun:_ZN5boost10filesystem6detail11unique_pathERKNS0_4pathEPNS_6system10error_codeE - fun:unique_path } { Suppress boost warning diff --git a/depends/README.md b/depends/README.md index 11733024b1..2356e8be59 100644 --- a/depends/README.md +++ b/depends/README.md @@ -110,9 +110,9 @@ The following can be set when running make: `make FOO=bar` <dt>BUILD_ID_SALT</dt> <dd>Optional salt to use when generating build package ids</dd> <dt>FORCE_USE_SYSTEM_CLANG</dt> -<dd>(EXPERTS ONLY) When cross-compiling for macOS, use clang found in the -system's <code>$PATH</code> rather than the default prebuilt release of clang -from llvm.org</dd> +<dd>(EXPERTS ONLY) When cross-compiling for macOS, use Clang found in the +system's <code>$PATH</code> rather than the default prebuilt release of Clang +from llvm.org. Clang 8 or later is required.</dd> </dl> If some packages are not built, for example `make NO_WALLET=1`, the appropriate diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk index 3a7e605b4f..4f6b543aff 100644 --- a/depends/packages/boost.mk +++ b/depends/packages/boost.mk @@ -31,7 +31,9 @@ $(package)_cxxflags_linux=-fPIC $(package)_cxxflags_android=-fPIC endef +# Fix unused variable in boost_process, can be removed after upgrading to 1.72 define $(package)_preprocess_cmds + sed -i.old "s/int ret_sig = 0;//" boost/process/detail/posix/wait_group.hpp && \ echo "using $($(package)_toolset_$(host_os)) : : $($(package)_cxx) : <cxxflags>\"$($(package)_cxxflags) $($(package)_cppflags)\" <linkflags>\"$($(package)_ldflags)\" <archiver>\"$($(package)_archiver_$(host_os))\" <striper>\"$(host_STRIP)\" <ranlib>\"$(host_RANLIB)\" <rc>\"$(host_WINDRES)\" : ;" > user-config.jam endef diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in index 7e307ab7c8..2f79168212 100644 --- a/doc/Doxyfile.in +++ b/doc/Doxyfile.in @@ -2073,7 +2073,7 @@ INCLUDE_FILE_PATTERNS = # recursively expanded use the := operator instead of the = operator. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -PREDEFINED = +PREDEFINED = HAVE_BOOST_PROCESS # If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this # tag can be used to specify a list of macro names that should be expanded. The diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md index a0cfe84a3e..40d8e330e2 100644 --- a/doc/JSON-RPC-interface.md +++ b/doc/JSON-RPC-interface.md @@ -60,7 +60,7 @@ RPC interface will be abused. are sent as clear text that can be read by anyone on your network path. Additionally, the RPC interface has not been hardened to withstand arbitrary Internet traffic, so changing the above settings - to expose it to the Internet (even using something like a Tor hidden + to expose it to the Internet (even using something like a Tor onion service) could expose you to unconsidered vulnerabilities. See `bitcoind -help` for more information about these settings and other settings described in this document. diff --git a/doc/bips.md b/doc/bips.md index b96862297f..456fea7a5a 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -1,4 +1,4 @@ -BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.19.0**): +BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**): * [`BIP 9`](https://github.com/bitcoin/bips/blob/master/bip-0009.mediawiki): The changes allowing multiple soft-forks to be deployed in parallel have been implemented since **v0.12.1** ([PR #7575](https://github.com/bitcoin/bitcoin/pull/7575)) * [`BIP 11`](https://github.com/bitcoin/bips/blob/master/bip-0011.mediawiki): Multisig outputs are standard since **v0.6.0** ([PR #669](https://github.com/bitcoin/bitcoin/pull/669)). @@ -42,3 +42,4 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.19.0**): * [`BIP 173`](https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki): Bech32 addresses for native Segregated Witness outputs are supported as of **v0.16.0** ([PR 11167](https://github.com/bitcoin/bitcoin/pull/11167)). Bech32 addresses are generated by default as of **v0.20.0** ([PR 16884](https://github.com/bitcoin/bitcoin/pull/16884)). * [`BIP 174`](https://github.com/bitcoin/bips/blob/master/bip-0174.mediawiki): RPCs to operate on Partially Signed Bitcoin Transactions (PSBT) are present as of **v0.17.0** ([PR 13557](https://github.com/bitcoin/bitcoin/pull/13557)). * [`BIP 176`](https://github.com/bitcoin/bips/blob/master/bip-0176.mediawiki): Bits Denomination [QT only] is supported as of **v0.16.0** ([PR 12035](https://github.com/bitcoin/bitcoin/pull/12035)). +* [`BIP 339`](https://github.com/bitcoin/bips/blob/master/bip-0339.mediawiki): Relay of transactions by wtxid is supported as of **v0.21.0** ([PR 18044](https://github.com/bitcoin/bitcoin/pull/18044)). diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md index 53c647ae34..584ee43d48 100644 --- a/doc/build-openbsd.md +++ b/doc/build-openbsd.md @@ -1,6 +1,6 @@ OpenBSD build guide ====================== -(updated for OpenBSD 6.4) +(updated for OpenBSD 6.7) This guide describes how to build bitcoind and command-line utilities on OpenBSD. @@ -15,7 +15,7 @@ Run the following as root to install the base dependencies for building: pkg_add git gmake libevent libtool boost pkg_add autoconf # (select highest version, e.g. 2.69) pkg_add automake # (select highest version, e.g. 1.16) -pkg_add python # (select highest version, e.g. 3.6) +pkg_add python # (select highest version, e.g. 3.8) git clone https://github.com/bitcoin/bitcoin.git ``` @@ -23,10 +23,10 @@ git clone https://github.com/bitcoin/bitcoin.git See [dependencies.md](dependencies.md) for a complete overview. **Important**: From OpenBSD 6.2 onwards a C++11-supporting clang compiler is -part of the base image, and while building it is necessary to make sure that this -compiler is used and not ancient g++ 4.2.1. This is done by appending -`CC=cc CXX=c++` to configuration commands. Mixing different compilers -within the same executable will result in linker errors. +part of the base image, and while building it is necessary to make sure that +this compiler is used and not ancient g++ 4.2.1. This is done by appending +`CC=cc CC_FOR_BUILD=cc CXX=c++` to configuration commands. Mixing different +compilers within the same executable will result in errors. ### Building BerkeleyDB @@ -77,7 +77,7 @@ To configure with wallet: To configure without wallet: ```bash -./configure --disable-wallet --with-gui=no CC=cc CXX=c++ MAKE=gmake +./configure --disable-wallet --with-gui=no CC=cc CC_FOR_BUILD=cc CXX=c++ MAKE=gmake ``` Build and run the tests: diff --git a/doc/dependencies.md b/doc/dependencies.md index 0cb5311e8b..92dea65309 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -6,7 +6,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct | Dependency | Version used | Minimum required | CVEs | Shared | [Bundled Qt library](https://doc.qt.io/qt-5/configure-options.html#third-party-libraries) | | --- | --- | --- | --- | --- | --- | | Berkeley DB | [4.8.30](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.x | No | | | -| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.47.0](https://github.com/bitcoin/bitcoin/pull/8920) | No | | | +| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.58.0](https://github.com/bitcoin/bitcoin/pull/19667) | No | | | | Clang | | [3.3+](https://releases.llvm.org/download.html) (C++11 support) | | | | | Expat | [2.2.7](https://libexpat.github.io/) | | No | Yes | | | fontconfig | [2.12.1](https://www.freedesktop.org/software/fontconfig/release/) | | No | Yes | | diff --git a/doc/files.md b/doc/files.md index 5475826329..52e094a60b 100644 --- a/doc/files.md +++ b/doc/files.md @@ -56,7 +56,7 @@ Subdirectory | File(s) | Description `./` | `fee_estimates.dat` | Stores statistics used to estimate minimum transaction fees and priorities required for confirmation `./` | `guisettings.ini.bak` | Backup of former [GUI settings](#gui-settings) after `-resetguisettings` option is used `./` | `mempool.dat` | Dump of the mempool's transactions -`./` | `onion_private_key` | Cached Tor hidden service private key for `-listenonion` option +`./` | `onion_private_key` | Cached Tor onion service private key for `-listenonion` option `./` | `peers.dat` | Peer IP address database (custom format) `./` | `settings.json` | Read-write settings set through GUI or RPC interfaces, augmenting manual settings from [bitcoin.conf](bitcoin-conf.md). File is created automatically if read-write settings storage is not disabled with `-nosettings` option. Path can be specified with `-settings` option `./` | `.cookie` | Session RPC authentication cookie; if used, created at start and deleted on shutdown; can be specified by `-rpccookiefile` option diff --git a/doc/release-notes/release-notes-0.20.1.md b/doc/release-notes/release-notes-0.20.1.md new file mode 100644 index 0000000000..9fbb29cb82 --- /dev/null +++ b/doc/release-notes/release-notes-0.20.1.md @@ -0,0 +1,158 @@ +0.20.1 Release Notes +==================== + +Bitcoin Core version 0.20.1 is now available from: + + <https://bitcoincore.org/bin/bitcoin-core-0.20.1/> + +This minor release includes various bug fixes and performance +improvements, as well as updated translations. + +Please report bugs using the issue tracker at GitHub: + + <https://github.com/bitcoin/bitcoin/issues> + +To receive security and update notifications, please subscribe to: + + <https://bitcoincore.org/en/list/announcements/join/> + +How to Upgrade +============== + +If you are running an older version, shut it down. Wait until it has completely +shut down (which might take a few minutes in some cases), then run the +installer (on Windows) or just copy over `/Applications/Bitcoin-Qt` (on Mac) +or `bitcoind`/`bitcoin-qt` (on Linux). + +Upgrading directly from a version of Bitcoin Core that has reached its EOL is +possible, but it might take some time if the data directory needs to be migrated. Old +wallet versions of Bitcoin Core are generally supported. + +Compatibility +============== + +Bitcoin Core is supported and extensively tested on operating systems +using the Linux kernel, macOS 10.12+, and Windows 7 and newer. Bitcoin +Core should also work on most other Unix-like systems but is not as +frequently tested on them. It is not recommended to use Bitcoin Core on +unsupported systems. + +From Bitcoin Core 0.20.0 onwards, macOS versions earlier than 10.12 are no +longer supported. Additionally, Bitcoin Core does not yet change appearance +when macOS "dark mode" is activated. + +Known Bugs +========== + +The process for generating the source code release ("tarball") has changed in an +effort to make it more complete, however, there are a few regressions in +this release: + +- The generated `configure` script is currently missing, and you will need to + install autotools and run `./autogen.sh` before you can run + `./configure`. This is the same as when checking out from git. + +- Instead of running `make` simply, you should instead run + `BITCOIN_GENBUILD_NO_GIT=1 make`. + +Notable changes +=============== + +Changes regarding misbehaving peers +----------------------------------- + +Peers that misbehave (e.g. send us invalid blocks) are now referred to as +discouraged nodes in log output, as they're not (and weren't) strictly banned: +incoming connections are still allowed from them, but they're preferred for +eviction. + +Furthermore, a few additional changes are introduced to how discouraged +addresses are treated: + +- Discouraging an address does not time out automatically after 24 hours + (or the `-bantime` setting). Depending on traffic from other peers, + discouragement may time out at an indeterminate time. + +- Discouragement is not persisted over restarts. + +- There is no method to list discouraged addresses. They are not returned by + the `listbanned` RPC. That RPC also no longer reports the `ban_reason` + field, as `"manually added"` is the only remaining option. + +- Discouragement cannot be removed with the `setban remove` RPC command. + If you need to remove a discouragement, you can remove all discouragements by + stop-starting your node. + +Notification changes +-------------------- + +`-walletnotify` notifications are now sent for wallet transactions that are +removed from the mempool because they conflict with a new block. These +notifications were sent previously before the v0.19 release, but had been +broken since that release (bug +[#18325](https://github.com/bitcoin/bitcoin/issues/18325)). + +PSBT changes +------------ + +PSBTs will contain both the non-witness utxo and the witness utxo for segwit +inputs in order to restore compatibility with wallet software that are now +requiring the full previous transaction for segwit inputs. The witness utxo +is still provided to maintain compatibility with software which relied on its +existence to determine whether an input was segwit. + +0.20.1 change log +================= + +### Mining +- #19019 Fix GBT: Restore "!segwit" and "csv" to "rules" key (luke-jr) + +### P2P protocol and network code +- #19219 Replace automatic bans with discouragement filter (sipa) + +### Wallet +- #19300 Handle concurrent wallet loading (promag) +- #18982 Minimal fix to restore conflicted transaction notifications (ryanofsky) + +### RPC and other APIs +- #19524 Increment input value sum only once per UTXO in decodepsbt (fanquake) +- #19517 psbt: Increment input value sum only once per UTXO in decodepsbt (achow101) +- #19215 psbt: Include and allow both non_witness_utxo and witness_utxo for segwit inputs (achow101) + +### GUI +- #19097 Add missing QPainterPath include (achow101) +- #19059 update Qt base translations for macOS release (fanquake) + +### Build system +- #19152 improve build OS configure output (skmcontrib) +- #19536 qt, build: Fix QFileDialog for static builds (hebasto) + +### Tests and QA +- #19444 Remove cached directories and associated script blocks from appveyor config (sipsorcery) +- #18640 appveyor: Remove clcache (MarcoFalke) + +### Miscellaneous +- #19194 util: Don't reference errno when pthread fails (miztake) +- #18700 Fix locking on WSL using flock instead of fcntl (meshcollider) + +Credits +======= + +Thanks to everyone who directly contributed to this release: + +- Aaron Clauson +- Andrew Chow +- fanquake +- Hennadii Stepanov +- João Barbosa +- Luke Dashjr +- MarcoFalke +- MIZUTA Takeshi +- Pieter Wuille +- Russell Yanofsky +- sachinkm77 +- Samuel Dobson +- Wladimir J. van der Laan + +As well as to everyone that helped with translations on +[Transifex](https://www.transifex.com/bitcoin/bitcoin/). diff --git a/doc/tor.md b/doc/tor.md index 2c54e32f84..17807856e5 100644 --- a/doc/tor.md +++ b/doc/tor.md @@ -1,6 +1,6 @@ # TOR SUPPORT IN BITCOIN -It is possible to run Bitcoin Core as a Tor hidden service, and connect to such services. +It is possible to run Bitcoin Core as a Tor onion service, and connect to such services. The following directions assume you have a Tor proxy running on port 9050. Many distributions default to having a SOCKS proxy listening on port 9050, but others may not. In particular, the Tor Browser Bundle defaults to listening on port 9150. See [Tor Project FAQ:TBBSocksPort](https://www.torproject.org/docs/faq.html.en#TBBSocksPort) for how to properly configure Tor. @@ -14,12 +14,12 @@ outgoing connections, but more is possible. -proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy server will be used to try to reach .onion addresses as well. - -onion=ip:port Set the proxy server to use for Tor hidden services. You do not + -onion=ip:port Set the proxy server to use for Tor onion services. You do not need to set this if it's the same as -proxy. You can use -noonion - to explicitly disable access to hidden services. + to explicitly disable access to onion services. -listen When using -proxy, listening is disabled by default. If you want - to run a hidden service (see next section), you'll need to enable + to run an onion service (see next section), you'll need to enable it explicitly. -connect=X When behind a Tor proxy, you can specify .onion addresses instead @@ -94,11 +94,11 @@ for normal IPv4/IPv6 communication, use: ## 3. Automatically listen on Tor Starting with Tor version 0.2.7.1 it is possible, through Tor's control socket -API, to create and destroy 'ephemeral' hidden services programmatically. +API, to create and destroy 'ephemeral' onion services programmatically. Bitcoin Core has been updated to make use of this. This means that if Tor is running (and proper authentication has been configured), -Bitcoin Core automatically creates a hidden service to listen on. This will positively +Bitcoin Core automatically creates an onion service to listen on. This will positively affect the number of available .onion nodes. This new feature is enabled by default if Bitcoin Core is listening (`-listen`), and @@ -110,7 +110,7 @@ Connecting to Tor's control socket API requires one of two authentication method configured. It also requires the control socket to be enabled, e.g. put `ControlPort 9051` in `torrc` config file. For cookie authentication the user running bitcoind must have read access to the `CookieAuthFile` specified in Tor configuration. In some cases this is -preconfigured and the creation of a hidden service is automatic. If permission problems +preconfigured and the creation of an onion service is automatic. If permission problems are seen with `-debug=tor` they can be resolved by adding both the user running Tor and the user running bitcoind to the same group and setting permissions appropriately. On Debian-based systems the user running bitcoind can be added to the debian-tor group, @@ -127,8 +127,8 @@ in the tor configuration file. The hashed password can be obtained with the comm ## 4. Privacy recommendations -- Do not add anything but Bitcoin Core ports to the hidden service created in section 2. - If you run a web service too, create a new hidden service for that. +- Do not add anything but Bitcoin Core ports to the onion service created in section 2. + If you run a web service too, create a new onion service for that. Otherwise it is trivial to link them, which may reduce privacy. Hidden services created automatically (as in section 3) always have only one port open. diff --git a/src/Makefile.test.include b/src/Makefile.test.include index b961f8d5b9..c3e46c0def 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -265,6 +265,7 @@ BITCOIN_TESTS =\ test/skiplist_tests.cpp \ test/streams_tests.cpp \ test/sync_tests.cpp \ + test/system_tests.cpp \ test/util_threadnames_tests.cpp \ test/timedata_tests.cpp \ test/torcontrol_tests.cpp \ @@ -1207,7 +1208,7 @@ nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES) $(BITCOIN_TESTS): $(GENERATED_TEST_FILES) -CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log) +CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno test/fuzz/*.gcda test/fuzz/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log) CLEANFILES += $(CLEAN_BITCOIN_TEST) diff --git a/src/addrman.cpp b/src/addrman.cpp index 7aba340d9d..7636c6bad2 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -479,11 +479,15 @@ int CAddrMan::Check_() } #endif -void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr) +void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct) { - unsigned int nNodes = ADDRMAN_GETADDR_MAX_PCT * vRandom.size() / 100; - if (nNodes > ADDRMAN_GETADDR_MAX) - nNodes = ADDRMAN_GETADDR_MAX; + size_t nNodes = vRandom.size(); + if (max_pct != 0) { + nNodes = max_pct * nNodes / 100; + } + if (max_addresses != 0) { + nNodes = std::min(nNodes, max_addresses); + } // gather a list of random nodes, skipping those of low quality for (unsigned int n = 0; n < vRandom.size(); n++) { diff --git a/src/addrman.h b/src/addrman.h index 8e82020df0..ca045b91cd 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -153,12 +153,6 @@ public: //! how recent a successful connection should be before we allow an address to be evicted from tried #define ADDRMAN_REPLACEMENT_HOURS 4 -//! the maximum percentage of nodes to return in a getaddr call -#define ADDRMAN_GETADDR_MAX_PCT 23 - -//! the maximum number of nodes to return in a getaddr call -#define ADDRMAN_GETADDR_MAX 2500 - //! Convenience #define ADDRMAN_TRIED_BUCKET_COUNT (1 << ADDRMAN_TRIED_BUCKET_COUNT_LOG2) #define ADDRMAN_NEW_BUCKET_COUNT (1 << ADDRMAN_NEW_BUCKET_COUNT_LOG2) @@ -261,7 +255,7 @@ protected: #endif //! Select several addresses at once. - void GetAddr_(std::vector<CAddress> &vAddr) EXCLUSIVE_LOCKS_REQUIRED(cs); + void GetAddr_(std::vector<CAddress> &vAddr, size_t max_addresses, size_t max_pct) EXCLUSIVE_LOCKS_REQUIRED(cs); //! Mark an entry as currently-connected-to. void Connected_(const CService &addr, int64_t nTime) EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -638,13 +632,13 @@ public: } //! Return a bunch of addresses, selected at random. - std::vector<CAddress> GetAddr() + std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct) { Check(); std::vector<CAddress> vAddr; { LOCK(cs); - GetAddr_(vAddr); + GetAddr_(vAddr, max_addresses, max_pct); } Check(); return vAddr; diff --git a/src/base58.cpp b/src/base58.cpp index 6a9e21ffc2..9b2946e7a9 100644 --- a/src/base58.cpp +++ b/src/base58.cpp @@ -141,7 +141,7 @@ std::string EncodeBase58Check(const std::vector<unsigned char>& vchIn) { // add 4-byte hash check to the end std::vector<unsigned char> vch(vchIn); - uint256 hash = Hash(vch.begin(), vch.end()); + uint256 hash = Hash(vch); vch.insert(vch.end(), (unsigned char*)&hash, (unsigned char*)&hash + 4); return EncodeBase58(vch); } @@ -154,7 +154,7 @@ bool DecodeBase58Check(const char* psz, std::vector<unsigned char>& vchRet, int return false; } // re-calculate the checksum, ensure it matches the included 4-byte checksum - uint256 hash = Hash(vchRet.begin(), vchRet.end() - 4); + uint256 hash = Hash(MakeSpan(vchRet).first(vchRet.size() - 4)); if (memcmp(&hash, &vchRet[vchRet.size() - 4], 4) != 0) { vchRet.clear(); return false; diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp index 26d9340768..ebdad5a4b8 100644 --- a/src/bench/addrman.cpp +++ b/src/bench/addrman.cpp @@ -98,7 +98,7 @@ static void AddrManGetAddr(benchmark::Bench& bench) FillAddrMan(addrman); bench.run([&] { - const auto& addresses = addrman.GetAddr(); + const auto& addresses = addrman.GetAddr(2500, 23); assert(addresses.size() > 0); }); } diff --git a/src/bench/chacha_poly_aead.cpp b/src/bench/chacha_poly_aead.cpp index 30d7851b7f..3b1d3e697a 100644 --- a/src/bench/chacha_poly_aead.cpp +++ b/src/bench/chacha_poly_aead.cpp @@ -93,7 +93,7 @@ static void HASH(benchmark::Bench& bench, size_t buffersize) uint8_t hash[CHash256::OUTPUT_SIZE]; std::vector<uint8_t> in(buffersize,0); bench.batch(in.size()).unit("byte").run([&] { - CHash256().Write(in.data(), in.size()).Finalize(hash); + CHash256().Write(in).Finalize(hash); }); } diff --git a/src/bench/verify_script.cpp b/src/bench/verify_script.cpp index c489bae4f6..9af0b502eb 100644 --- a/src/bench/verify_script.cpp +++ b/src/bench/verify_script.cpp @@ -34,7 +34,7 @@ static void VerifyScriptBench(benchmark::Bench& bench) key.Set(vchKey.begin(), vchKey.end(), false); CPubKey pubkey = key.GetPubKey(); uint160 pubkeyHash; - CHash160().Write(pubkey.begin(), pubkey.size()).Finalize(pubkeyHash.begin()); + CHash160().Write(pubkey).Finalize(pubkeyHash); // Script. CScript scriptPubKey = CScript() << witnessversion << ToByteVector(pubkeyHash); diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp index 5f5bed5bda..9a6fb4abd0 100644 --- a/src/blockfilter.cpp +++ b/src/blockfilter.cpp @@ -291,7 +291,7 @@ uint256 BlockFilter::GetHash() const const std::vector<unsigned char>& data = GetEncodedFilter(); uint256 result; - CHash256().Write(data.data(), data.size()).Finalize(result.begin()); + CHash256().Write(data).Finalize(result); return result; } @@ -301,8 +301,8 @@ uint256 BlockFilter::ComputeHeader(const uint256& prev_header) const uint256 result; CHash256() - .Write(filter_hash.begin(), filter_hash.size()) - .Write(prev_header.begin(), prev_header.size()) - .Finalize(result.begin()); + .Write(filter_hash) + .Write(prev_header) + .Finalize(result); return result; } diff --git a/src/chainparams.cpp b/src/chainparams.cpp index a7c9e33f07..ffd2076c9a 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -110,7 +110,7 @@ public: // Note that of those which support the service bits prefix, most only support a subset of // possible options. - // This is fine at runtime as we'll fall back to using them as a oneshot if they don't support the + // This is fine at runtime as we'll fall back to using them as an addrfetch if they don't support the // service bits we want, but we should get them updated to support all service bits wanted by any // release ASAP to avoid it where possible. vSeeds.emplace_back("seed.bitcoin.sipa.be"); // Pieter Wuille, only supports x1, x5, x9, and xd diff --git a/src/consensus/validation.h b/src/consensus/validation.h index 8de7a8f2d8..2a93a090d6 100644 --- a/src/consensus/validation.h +++ b/src/consensus/validation.h @@ -26,7 +26,8 @@ enum class TxValidationResult { * is uninteresting. */ TX_RECENT_CONSENSUS_CHANGE, - TX_NOT_STANDARD, //!< didn't meet our local policy rules + TX_INPUTS_NOT_STANDARD, //!< inputs (covered by txid) failed policy rules + TX_NOT_STANDARD, //!< otherwise didn't meet our local policy rules TX_MISSING_INPUTS, //!< transaction was missing some of its inputs TX_PREMATURE_SPEND, //!< transaction spends a coinbase too early, or violates locktime/sequence locks /** diff --git a/src/core_write.cpp b/src/core_write.cpp index 34cfeecc6f..f9d918cb6d 100644 --- a/src/core_write.cpp +++ b/src/core_write.cpp @@ -48,13 +48,14 @@ std::string FormatScript(const CScript& script) } } if (vch.size() > 0) { - ret += strprintf("0x%x 0x%x ", HexStr(it2, it - vch.size()), HexStr(it - vch.size(), it)); + ret += strprintf("0x%x 0x%x ", HexStr(std::vector<uint8_t>(it2, it - vch.size())), + HexStr(std::vector<uint8_t>(it - vch.size(), it))); } else { - ret += strprintf("0x%x ", HexStr(it2, it)); + ret += strprintf("0x%x ", HexStr(std::vector<uint8_t>(it2, it))); } continue; } - ret += strprintf("0x%x ", HexStr(it2, script.end())); + ret += strprintf("0x%x ", HexStr(std::vector<uint8_t>(it2, script.end()))); break; } return ret.substr(0, ret.size() - 1); diff --git a/src/hash.cpp b/src/hash.cpp index 26150e5ca8..4c09f5f646 100644 --- a/src/hash.cpp +++ b/src/hash.cpp @@ -12,7 +12,7 @@ inline uint32_t ROTL32(uint32_t x, int8_t r) return (x << r) | (x >> (32 - r)); } -unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char>& vDataToHash) +unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vDataToHash) { // The following is MurmurHash3 (x86_32), see http://code.google.com/p/smhasher/source/browse/trunk/MurmurHash3.cpp uint32_t h1 = nHashSeed; diff --git a/src/hash.h b/src/hash.h index c295568a3e..71806483ff 100644 --- a/src/hash.h +++ b/src/hash.h @@ -25,14 +25,15 @@ private: public: static const size_t OUTPUT_SIZE = CSHA256::OUTPUT_SIZE; - void Finalize(unsigned char hash[OUTPUT_SIZE]) { + void Finalize(Span<unsigned char> output) { + assert(output.size() == OUTPUT_SIZE); unsigned char buf[CSHA256::OUTPUT_SIZE]; sha.Finalize(buf); - sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash); + sha.Reset().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data()); } - CHash256& Write(const unsigned char *data, size_t len) { - sha.Write(data, len); + CHash256& Write(Span<const unsigned char> input) { + sha.Write(input.data(), input.size()); return *this; } @@ -49,14 +50,15 @@ private: public: static const size_t OUTPUT_SIZE = CRIPEMD160::OUTPUT_SIZE; - void Finalize(unsigned char hash[OUTPUT_SIZE]) { + void Finalize(Span<unsigned char> output) { + assert(output.size() == OUTPUT_SIZE); unsigned char buf[CSHA256::OUTPUT_SIZE]; sha.Finalize(buf); - CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(hash); + CRIPEMD160().Write(buf, CSHA256::OUTPUT_SIZE).Finalize(output.data()); } - CHash160& Write(const unsigned char *data, size_t len) { - sha.Write(data, len); + CHash160& Write(Span<const unsigned char> input) { + sha.Write(input.data(), input.size()); return *this; } @@ -67,52 +69,31 @@ public: }; /** Compute the 256-bit hash of an object. */ -template<typename T1> -inline uint256 Hash(const T1 pbegin, const T1 pend) +template<typename T> +inline uint256 Hash(const T& in1) { - static const unsigned char pblank[1] = {}; uint256 result; - CHash256().Write(pbegin == pend ? pblank : (const unsigned char*)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0])) - .Finalize((unsigned char*)&result); + CHash256().Write(MakeUCharSpan(in1)).Finalize(result); return result; } /** Compute the 256-bit hash of the concatenation of two objects. */ template<typename T1, typename T2> -inline uint256 Hash(const T1 p1begin, const T1 p1end, - const T2 p2begin, const T2 p2end) { - static const unsigned char pblank[1] = {}; +inline uint256 Hash(const T1& in1, const T2& in2) { uint256 result; - CHash256().Write(p1begin == p1end ? pblank : (const unsigned char*)&p1begin[0], (p1end - p1begin) * sizeof(p1begin[0])) - .Write(p2begin == p2end ? pblank : (const unsigned char*)&p2begin[0], (p2end - p2begin) * sizeof(p2begin[0])) - .Finalize((unsigned char*)&result); + CHash256().Write(MakeUCharSpan(in1)).Write(MakeUCharSpan(in2)).Finalize(result); return result; } /** Compute the 160-bit hash an object. */ template<typename T1> -inline uint160 Hash160(const T1 pbegin, const T1 pend) +inline uint160 Hash160(const T1& in1) { - static unsigned char pblank[1] = {}; uint160 result; - CHash160().Write(pbegin == pend ? pblank : (const unsigned char*)&pbegin[0], (pend - pbegin) * sizeof(pbegin[0])) - .Finalize((unsigned char*)&result); + CHash160().Write(MakeUCharSpan(in1)).Finalize(result); return result; } -/** Compute the 160-bit hash of a vector. */ -inline uint160 Hash160(const std::vector<unsigned char>& vch) -{ - return Hash160(vch.begin(), vch.end()); -} - -/** Compute the 160-bit hash of a vector. */ -template<unsigned int N> -inline uint160 Hash160(const prevector<N, unsigned char>& vch) -{ - return Hash160(vch.begin(), vch.end()); -} - /** A writer stream (for serialization) that computes a 256-bit hash. */ class CHashWriter { @@ -129,13 +110,13 @@ public: int GetVersion() const { return nVersion; } void write(const char *pch, size_t size) { - ctx.Write((const unsigned char*)pch, size); + ctx.Write({(const unsigned char*)pch, size}); } // invalidates the object uint256 GetHash() { uint256 result; - ctx.Finalize((unsigned char*)&result); + ctx.Finalize(result); return result; } @@ -200,7 +181,7 @@ uint256 SerializeHash(const T& obj, int nType=SER_GETHASH, int nVersion=PROTOCOL return ss.GetHash(); } -unsigned int MurmurHash3(unsigned int nHashSeed, const std::vector<unsigned char>& vDataToHash); +unsigned int MurmurHash3(unsigned int nHashSeed, Span<const unsigned char> vDataToHash); void BIP32Hash(const ChainCode &chainCode, unsigned int nChild, unsigned char header, const unsigned char data[32], unsigned char output[64]); diff --git a/src/init.cpp b/src/init.cpp index 6cca21f375..f5aef08211 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -24,6 +24,7 @@ #include <index/blockfilterindex.h> #include <index/txindex.h> #include <interfaces/chain.h> +#include <interfaces/node.h> #include <key.h> #include <miner.h> #include <net.h> @@ -442,13 +443,13 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - argsman.AddArg("-listenonion", strprintf("Automatically create Tor hidden service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor hidden services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); @@ -999,11 +1000,13 @@ bool AppInitParameterInteraction() } } - // Basic filters are the only supported filters. The basic filters index must be enabled - // to serve compact filters - if (gArgs.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS) && - g_enabled_filter_types.count(BlockFilterType::BASIC) != 1) { - return InitError(_("Cannot set -peerblockfilters without -blockfilterindex.")); + // Signal NODE_COMPACT_FILTERS if peerblockfilters and basic filters index are both enabled. + if (gArgs.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS)) { + if (g_enabled_filter_types.count(BlockFilterType::BASIC) != 1) { + return InitError(_("Cannot set -peerblockfilters without -blockfilterindex.")); + } + + nLocalServices = ServiceFlags(nLocalServices | NODE_COMPACT_FILTERS); } // if using block pruning, then disallow txindex @@ -1242,7 +1245,7 @@ bool AppInitLockDataDirectory() return true; } -bool AppInitMain(const util::Ref& context, NodeContext& node) +bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) { const CChainParams& chainparams = Params(); // ********************************************************* Step 4a: application initialization @@ -1877,6 +1880,15 @@ bool AppInitMain(const util::Ref& context, NodeContext& node) LOCK(cs_main); LogPrintf("block tree size = %u\n", chainman.BlockIndex().size()); chain_active_height = chainman.ActiveChain().Height(); + if (tip_info) { + tip_info->block_height = chain_active_height; + tip_info->block_time = chainman.ActiveChain().Tip() ? chainman.ActiveChain().Tip()->GetBlockTime() : Params().GenesisBlock().GetBlockTime(); + tip_info->verification_progress = GuessVerificationProgress(Params().TxData(), chainman.ActiveChain().Tip()); + } + if (tip_info && ::pindexBestHeader) { + tip_info->header_height = ::pindexBestHeader->nHeight; + tip_info->header_time = ::pindexBestHeader->GetBlockTime(); + } } LogPrintf("nBestHeight = %d\n", chain_active_height); diff --git a/src/init.h b/src/init.h index 33fe96e8ea..20008ba5be 100644 --- a/src/init.h +++ b/src/init.h @@ -11,6 +11,9 @@ #include <util/system.h> struct NodeContext; +namespace interfaces { +struct BlockAndHeaderTipInfo; +} namespace boost { class thread_group; } // namespace boost @@ -54,7 +57,7 @@ bool AppInitLockDataDirectory(); * @note This should only be done after daemonization. Call Shutdown() if this function fails. * @pre Parameters should be parsed and config file should be read, AppInitLockDataDirectory should have been called. */ -bool AppInitMain(const util::Ref& context, NodeContext& node); +bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info = nullptr); /** * Register all arguments with the ArgsManager diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp index 33f0dac263..21400d00f8 100644 --- a/src/interfaces/node.cpp +++ b/src/interfaces/node.cpp @@ -56,6 +56,7 @@ namespace { class NodeImpl : public Node { public: + NodeImpl(NodeContext* context) { setContext(context); } void initError(const bilingual_str& message) override { InitError(message); } bool parseParameters(int argc, const char* const argv[], std::string& error) override { @@ -79,15 +80,15 @@ public: return AppInitBasicSetup() && AppInitParameterInteraction() && AppInitSanityChecks() && AppInitLockDataDirectory(); } - bool appInitMain() override + bool appInitMain(interfaces::BlockAndHeaderTipInfo* tip_info) override { - m_context.chain = MakeChain(m_context); - return AppInitMain(m_context_ref, m_context); + m_context->chain = MakeChain(*m_context); + return AppInitMain(m_context_ref, *m_context, tip_info); } void appShutdown() override { - Interrupt(m_context); - Shutdown(m_context); + Interrupt(*m_context); + Shutdown(*m_context); } void startShutdown() override { @@ -108,19 +109,19 @@ public: StopMapPort(); } } - void setupServerArgs() override { return SetupServerArgs(m_context); } + void setupServerArgs() override { return SetupServerArgs(*m_context); } bool getProxy(Network net, proxyType& proxy_info) override { return GetProxy(net, proxy_info); } size_t getNodeCount(CConnman::NumConnections flags) override { - return m_context.connman ? m_context.connman->GetNodeCount(flags) : 0; + return m_context->connman ? m_context->connman->GetNodeCount(flags) : 0; } bool getNodesStats(NodesStats& stats) override { stats.clear(); - if (m_context.connman) { + if (m_context->connman) { std::vector<CNodeStats> stats_temp; - m_context.connman->GetNodeStats(stats_temp); + m_context->connman->GetNodeStats(stats_temp); stats.reserve(stats_temp.size()); for (auto& node_stats_temp : stats_temp) { @@ -141,46 +142,46 @@ public: } bool getBanned(banmap_t& banmap) override { - if (m_context.banman) { - m_context.banman->GetBanned(banmap); + if (m_context->banman) { + m_context->banman->GetBanned(banmap); return true; } return false; } bool ban(const CNetAddr& net_addr, int64_t ban_time_offset) override { - if (m_context.banman) { - m_context.banman->Ban(net_addr, ban_time_offset); + if (m_context->banman) { + m_context->banman->Ban(net_addr, ban_time_offset); return true; } return false; } bool unban(const CSubNet& ip) override { - if (m_context.banman) { - m_context.banman->Unban(ip); + if (m_context->banman) { + m_context->banman->Unban(ip); return true; } return false; } bool disconnectByAddress(const CNetAddr& net_addr) override { - if (m_context.connman) { - return m_context.connman->DisconnectNode(net_addr); + if (m_context->connman) { + return m_context->connman->DisconnectNode(net_addr); } return false; } bool disconnectById(NodeId id) override { - if (m_context.connman) { - return m_context.connman->DisconnectNode(id); + if (m_context->connman) { + return m_context->connman->DisconnectNode(id); } return false; } - int64_t getTotalBytesRecv() override { return m_context.connman ? m_context.connman->GetTotalBytesRecv() : 0; } - int64_t getTotalBytesSent() override { return m_context.connman ? m_context.connman->GetTotalBytesSent() : 0; } - size_t getMempoolSize() override { return m_context.mempool ? m_context.mempool->size() : 0; } - size_t getMempoolDynamicUsage() override { return m_context.mempool ? m_context.mempool->DynamicMemoryUsage() : 0; } + int64_t getTotalBytesRecv() override { return m_context->connman ? m_context->connman->GetTotalBytesRecv() : 0; } + int64_t getTotalBytesSent() override { return m_context->connman ? m_context->connman->GetTotalBytesSent() : 0; } + size_t getMempoolSize() override { return m_context->mempool ? m_context->mempool->size() : 0; } + size_t getMempoolDynamicUsage() override { return m_context->mempool ? m_context->mempool->DynamicMemoryUsage() : 0; } bool getHeaderTip(int& height, int64_t& block_time) override { LOCK(::cs_main); @@ -223,11 +224,11 @@ public: bool getImporting() override { return ::fImporting; } void setNetworkActive(bool active) override { - if (m_context.connman) { - m_context.connman->SetNetworkActive(active); + if (m_context->connman) { + m_context->connman->SetNetworkActive(active); } } - bool getNetworkActive() override { return m_context.connman && m_context.connman->GetNetworkActive(); } + bool getNetworkActive() override { return m_context->connman && m_context->connman->GetNetworkActive(); } CFeeRate estimateSmartFee(int num_blocks, bool conservative, int* returned_target = nullptr) override { FeeCalculation fee_calc; @@ -269,7 +270,7 @@ public: std::vector<std::unique_ptr<Wallet>> getWallets() override { std::vector<std::unique_ptr<Wallet>> wallets; - for (auto& client : m_context.chain_clients) { + for (auto& client : m_context->chain_clients) { auto client_wallets = client->getWallets(); std::move(client_wallets.begin(), client_wallets.end(), std::back_inserter(wallets)); } @@ -277,12 +278,12 @@ public: } std::unique_ptr<Wallet> loadWallet(const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings) override { - return MakeWallet(LoadWallet(*m_context.chain, name, error, warnings)); + return MakeWallet(LoadWallet(*m_context->chain, name, error, warnings)); } std::unique_ptr<Wallet> createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings, WalletCreationStatus& status) override { std::shared_ptr<CWallet> wallet; - status = CreateWallet(*m_context.chain, passphrase, wallet_creation_flags, name, error, warnings, wallet); + status = CreateWallet(*m_context->chain, passphrase, wallet_creation_flags, name, error, warnings, wallet); return MakeWallet(wallet); } std::unique_ptr<Handler> handleInitMessage(InitMessageFn fn) override @@ -336,13 +337,22 @@ public: /* verification progress is unused when a header was received */ 0); })); } - NodeContext* context() override { return &m_context; } - NodeContext m_context; - util::Ref m_context_ref{m_context}; + NodeContext* context() override { return m_context; } + void setContext(NodeContext* context) override + { + m_context = context; + if (context) { + m_context_ref.Set(*context); + } else { + m_context_ref.Clear(); + } + } + NodeContext* m_context{nullptr}; + util::Ref m_context_ref; }; } // namespace -std::unique_ptr<Node> MakeNode() { return MakeUnique<NodeImpl>(); } +std::unique_ptr<Node> MakeNode(NodeContext* context) { return MakeUnique<NodeImpl>(context); } } // namespace interfaces diff --git a/src/interfaces/node.h b/src/interfaces/node.h index a9680c42b5..753f3e6b13 100644 --- a/src/interfaces/node.h +++ b/src/interfaces/node.h @@ -39,6 +39,16 @@ class Handler; class Wallet; struct BlockTip; +//! Block and header tip information +struct BlockAndHeaderTipInfo +{ + int block_height; + int64_t block_time; + int header_height; + int64_t header_time; + double verification_progress; +}; + //! Top-level interface for a bitcoin node (bitcoind process). class Node { @@ -96,7 +106,7 @@ public: virtual bool baseInitialize() = 0; //! Start node. - virtual bool appInitMain() = 0; + virtual bool appInitMain(interfaces::BlockAndHeaderTipInfo* tip_info = nullptr) = 0; //! Stop node. virtual void appShutdown() = 0; @@ -268,12 +278,14 @@ public: std::function<void(SynchronizationState, interfaces::BlockTip tip, double verification_progress)>; virtual std::unique_ptr<Handler> handleNotifyHeaderTip(NotifyHeaderTipFn fn) = 0; - //! Return pointer to internal chain interface, useful for testing. + //! Get and set internal node context. Useful for testing, but not + //! accessible across processes. virtual NodeContext* context() { return nullptr; } + virtual void setContext(NodeContext* context) { } }; //! Return implementation of Node interface. -std::unique_ptr<Node> MakeNode(); +std::unique_ptr<Node> MakeNode(NodeContext* context = nullptr); //! Block tip (could be a header or not, depends on the subscribed signal). struct BlockTip { diff --git a/src/key.cpp b/src/key.cpp index 7eecc6e083..4ed74a39b1 100644 --- a/src/key.cpp +++ b/src/key.cpp @@ -237,7 +237,7 @@ bool CKey::VerifyPubKey(const CPubKey& pubkey) const { std::string str = "Bitcoin key verification\n"; GetRandBytes(rnd, sizeof(rnd)); uint256 hash; - CHash256().Write((unsigned char*)str.data(), str.size()).Write(rnd, sizeof(rnd)).Finalize(hash.begin()); + CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash); std::vector<unsigned char> vchSig; Sign(hash, vchSig); return pubkey.Verify(hash, vchSig); diff --git a/src/merkleblock.cpp b/src/merkleblock.cpp index 8072b12119..b571d463c9 100644 --- a/src/merkleblock.cpp +++ b/src/merkleblock.cpp @@ -70,7 +70,7 @@ uint256 CPartialMerkleTree::CalcHash(int height, unsigned int pos, const std::ve else right = left; // combine subhashes - return Hash(left.begin(), left.end(), right.begin(), right.end()); + return Hash(left, right); } } @@ -126,7 +126,7 @@ uint256 CPartialMerkleTree::TraverseAndExtract(int height, unsigned int pos, uns right = left; } // and combine them before returning - return Hash(left.begin(), left.end(), right.begin(), right.end()); + return Hash(left, right); } } diff --git a/src/net.cpp b/src/net.cpp index 0c56cddbdc..6c1980735c 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -105,10 +105,10 @@ std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost); static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {}; std::string strSubVersion; -void CConnman::AddOneShot(const std::string& strDest) +void CConnman::AddAddrFetch(const std::string& strDest) { - LOCK(cs_vOneShots); - vOneShots.push_back(strDest); + LOCK(m_addr_fetches_mutex); + m_addr_fetches.push_back(strDest); } uint16_t GetListenPort() @@ -346,7 +346,7 @@ bool CConnman::CheckIncomingNonce(uint64_t nonce) { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (!pnode->fSuccessfullyConnected && !pnode->fInbound && pnode->GetLocalNonce() == nonce) + if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() && pnode->GetLocalNonce() == nonce) return false; } return true; @@ -368,8 +368,10 @@ static CAddress GetBindAddress(SOCKET sock) return addr_bind; } -CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only) +CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type) { + assert(conn_type != ConnectionType::INBOUND); + if (pszDest == nullptr) { if (IsLocal(addrConnect)) return nullptr; @@ -432,7 +434,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo if (hSocket == INVALID_SOCKET) { return nullptr; } - connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, manual_connection); + connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, conn_type == ConnectionType::MANUAL); } if (!proxyConnectionFailed) { // If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to @@ -459,7 +461,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); CAddress addr_bind = GetBindAddress(hSocket); - CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", false, block_relay_only); + CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type); pnode->AddRef(); // We're making a new connection, harvest entropy from the time (and our peer count) @@ -536,8 +538,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) LOCK(cs_SubVer); X(cleanSubVer); } - X(fInbound); - X(m_manual_connection); + stats.fInbound = IsInboundConn(); + stats.m_manual_connection = IsManualConn(); X(nStartingHeight); { LOCK(cs_vSend); @@ -685,7 +687,7 @@ int V1TransportDeserializer::readData(const char *pch, unsigned int nBytes) vRecv.resize(std::min(hdr.nMessageSize, nDataPos + nCopy + 256 * 1024)); } - hasher.Write((const unsigned char*)pch, nCopy); + hasher.Write({(const unsigned char*)pch, nCopy}); memcpy(&vRecv[nDataPos], pch, nCopy); nDataPos += nCopy; @@ -696,7 +698,7 @@ const uint256& V1TransportDeserializer::GetMessageHash() const { assert(Complete()); if (data_hash.IsNull()) - hasher.Finalize(data_hash.begin()); + hasher.Finalize(data_hash); return data_hash; } @@ -722,8 +724,8 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta if (!msg.m_valid_checksum) { LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s\n", SanitizeString(msg.m_command), msg.m_message_size, - HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE), - HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE)); + HexStr(Span<uint8_t>(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE)), + HexStr(hdr.pchChecksum)); } // store receive time @@ -736,7 +738,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta void V1TransportSerializer::prepareForTransport(CSerializedNetMsg& msg, std::vector<unsigned char>& header) { // create dbl-sha256 checksum - uint256 hash = Hash(msg.data.begin(), msg.data.end()); + uint256 hash = Hash(msg.data); // create header CMessageHeader hdr(Params().MessageStart(), msg.m_type.c_str(), msg.data.size()); @@ -872,7 +874,7 @@ bool CConnman::AttemptToEvictConnection() for (const CNode* node : vNodes) { if (node->HasPermission(PF_NOBAN)) continue; - if (!node->fInbound) + if (!node->IsInboundConn()) continue; if (node->fDisconnect) continue; @@ -983,7 +985,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (pnode->fInbound) nInbound++; + if (pnode->IsInboundConn()) nInbound++; } } @@ -1048,7 +1050,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) { if (NetPermissions::HasFlag(permissionFlags, PF_BLOOMFILTER)) { nodeServices = static_cast<ServiceFlags>(nodeServices | NODE_BLOOM); } - CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", true); + CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", ConnectionType::INBOUND); pnode->AddRef(); pnode->m_permissionFlags = permissionFlags; // If this flag is present, the user probably expect that RPC and QT report it as whitelisted (backward compatibility) @@ -1646,7 +1648,7 @@ void CConnman::ThreadDNSAddressSeed() { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - nRelevant += pnode->fSuccessfullyConnected && !pnode->fFeeler && !pnode->fOneShot && !pnode->m_manual_connection && !pnode->fInbound; + if (pnode->fSuccessfullyConnected && pnode->IsOutboundOrBlockRelayConn()) ++nRelevant; } } if (nRelevant >= 2) { @@ -1674,7 +1676,7 @@ void CConnman::ThreadDNSAddressSeed() LogPrintf("Loading addresses from DNS seed %s\n", seed); if (HaveNameProxy()) { - AddOneShot(seed); + AddAddrFetch(seed); } else { std::vector<CNetAddr> vIPs; std::vector<CAddress> vAdd; @@ -1696,8 +1698,8 @@ void CConnman::ThreadDNSAddressSeed() addrman.Add(vAdd, resolveSource); } else { // We now avoid directly using results from DNS Seeds which do not support service bit filtering, - // instead using them as a oneshot to get nodes with our desired service bits. - AddOneShot(seed); + // instead using them as a addrfetch to get nodes with our desired service bits. + AddAddrFetch(seed); } } --seeds_right_now; @@ -1705,17 +1707,6 @@ void CConnman::ThreadDNSAddressSeed() LogPrintf("%d addresses found from DNS seeds\n", found); } - - - - - - - - - - - void CConnman::DumpAddresses() { int64_t nStart = GetTimeMillis(); @@ -1727,20 +1718,20 @@ void CConnman::DumpAddresses() addrman.size(), GetTimeMillis() - nStart); } -void CConnman::ProcessOneShot() +void CConnman::ProcessAddrFetch() { std::string strDest; { - LOCK(cs_vOneShots); - if (vOneShots.empty()) + LOCK(m_addr_fetches_mutex); + if (m_addr_fetches.empty()) return; - strDest = vOneShots.front(); - vOneShots.pop_front(); + strDest = m_addr_fetches.front(); + m_addr_fetches.pop_front(); } CAddress addr; CSemaphoreGrant grant(*semOutbound, true); if (grant) { - OpenNetworkConnection(addr, false, &grant, strDest.c_str(), true); + OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH); } } @@ -1767,7 +1758,7 @@ int CConnman::GetExtraOutboundCount() { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (!pnode->fInbound && !pnode->m_manual_connection && !pnode->fFeeler && !pnode->fDisconnect && !pnode->fOneShot && pnode->fSuccessfullyConnected) { + if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsOutboundOrBlockRelayConn()) { ++nOutbound; } } @@ -1782,11 +1773,11 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) { for (int64_t nLoop = 0;; nLoop++) { - ProcessOneShot(); + ProcessAddrFetch(); for (const std::string& strAddr : connect) { CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), false, false, true); + OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL); for (int i = 0; i < 10 && i < nLoop; i++) { if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) @@ -1805,7 +1796,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL); while (!interruptNet) { - ProcessOneShot(); + ProcessAddrFetch(); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; @@ -1838,21 +1829,27 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) int nOutboundFullRelay = 0; int nOutboundBlockRelay = 0; std::set<std::vector<unsigned char> > setConnected; + { LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { - if (!pnode->fInbound && !pnode->m_manual_connection) { - // Netgroups for inbound and addnode peers are not excluded because our goal here - // is to not use multiple of our limited outbound slots on a single netgroup - // but inbound and addnode peers do not use our outbound slots. Inbound peers - // also have the added issue that they're attacker controlled and could be used - // to prevent us from connecting to particular hosts if we used them here. - setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap)); - if (pnode->m_tx_relay == nullptr) { - nOutboundBlockRelay++; - } else if (!pnode->fFeeler) { - nOutboundFullRelay++; - } + if (pnode->IsFullOutboundConn()) nOutboundFullRelay++; + if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++; + + // Netgroups for inbound and manual peers are not excluded because our goal here + // is to not use multiple of our limited outbound slots on a single netgroup + // but inbound and manual peers do not use our outbound slots. Inbound peers + // also have the added issue that they could be attacker controlled and used + // to prevent us from connecting to particular hosts if we used them here. + switch(pnode->m_conn_type){ + case ConnectionType::INBOUND: + case ConnectionType::MANUAL: + break; + case ConnectionType::OUTBOUND: + case ConnectionType::BLOCK_RELAY: + case ConnectionType::ADDR_FETCH: + case ConnectionType::FEELER: + setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap)); } } } @@ -1945,14 +1942,24 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString()); } - // Open this connection as block-relay-only if we're already at our - // full-relay capacity, but not yet at our block-relay peer limit. - // (It should not be possible for fFeeler to be set if we're not - // also at our block-relay peer limit, but check against that as - // well for sanity.) - bool block_relay_only = nOutboundBlockRelay < m_max_outbound_block_relay && !fFeeler && nOutboundFullRelay >= m_max_outbound_full_relay; + ConnectionType conn_type; + // Determine what type of connection to open. If fFeeler is not + // set, open OUTBOUND connections until we meet our full-relay + // capacity. Then open BLOCK_RELAY connections until we hit our + // block-relay peer limit. Otherwise, default to opening an + // OUTBOUND connection. + if (fFeeler) { + conn_type = ConnectionType::FEELER; + } else if (nOutboundFullRelay < m_max_outbound_full_relay) { + conn_type = ConnectionType::OUTBOUND; + } else if (nOutboundBlockRelay < m_max_outbound_block_relay) { + conn_type = ConnectionType::BLOCK_RELAY; + } else { + // GetTryNewOutboundPeer() is true + conn_type = ConnectionType::OUTBOUND; + } - OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, false, fFeeler, false, block_relay_only); + OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, conn_type); } } } @@ -1976,11 +1983,11 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo() LOCK(cs_vNodes); for (const CNode* pnode : vNodes) { if (pnode->addr.IsValid()) { - mapConnected[pnode->addr] = pnode->fInbound; + mapConnected[pnode->addr] = pnode->IsInboundConn(); } std::string addrName = pnode->GetAddrName(); if (!addrName.empty()) { - mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->fInbound, static_cast<const CService&>(pnode->addr)); + mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->IsInboundConn(), static_cast<const CService&>(pnode->addr)); } } } @@ -2027,7 +2034,7 @@ void CConnman::ThreadOpenAddedConnections() } tried = true; CAddress addr(CService(), NODE_NONE); - OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), false, false, true); + OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL); if (!interruptNet.sleep_for(std::chrono::milliseconds(500))) return; } @@ -2039,8 +2046,10 @@ void CConnman::ThreadOpenAddedConnections() } // if successful, this moves the passed grant to the constructed node -void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool manual_connection, bool block_relay_only) +void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type) { + assert(conn_type != ConnectionType::INBOUND); + // // Initiate outbound network connection // @@ -2058,18 +2067,12 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai } else if (FindNode(std::string(pszDest))) return; - CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, manual_connection, block_relay_only); + CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type); if (!pnode) return; if (grantOutbound) grantOutbound->MoveTo(pnode->grantOutbound); - if (fOneShot) - pnode->fOneShot = true; - if (fFeeler) - pnode->fFeeler = true; - if (manual_connection) - pnode->m_manual_connection = true; m_msgproc->InitializeNode(pnode); { @@ -2127,11 +2130,6 @@ void CConnman::ThreadMessageHandler() } } - - - - - bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, NetPermissionFlags permissions) { int nOne = 1; @@ -2337,7 +2335,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) } for (const auto& strDest : connOptions.vSeedNodes) { - AddOneShot(strDest); + AddAddrFetch(strDest); } if (clientInterface) { @@ -2390,7 +2388,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) else threadDNSAddressSeed = std::thread(&TraceThread<std::function<void()> >, "dnsseed", std::function<void()>(std::bind(&CConnman::ThreadDNSAddressSeed, this))); - // Initiate outbound connections from -addnode + // Initiate manual connections threadOpenAddedConnections = std::thread(&TraceThread<std::function<void()> >, "addcon", std::function<void()>(std::bind(&CConnman::ThreadOpenAddedConnections, this))); if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) { @@ -2523,14 +2521,31 @@ void CConnman::MarkAddressGood(const CAddress& addr) addrman.Good(addr); } -void CConnman::AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty) +bool CConnman::AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty) { - addrman.Add(vAddr, addrFrom, nTimePenalty); + return addrman.Add(vAddr, addrFrom, nTimePenalty); } -std::vector<CAddress> CConnman::GetAddresses() +std::vector<CAddress> CConnman::GetAddresses(size_t max_addresses, size_t max_pct) { - return addrman.GetAddr(); + std::vector<CAddress> addresses = addrman.GetAddr(max_addresses, max_pct); + if (m_banman) { + addresses.erase(std::remove_if(addresses.begin(), addresses.end(), + [this](const CAddress& addr){return m_banman->IsDiscouraged(addr) || m_banman->IsBanned(addr);}), + addresses.end()); + } + return addresses; +} + +std::vector<CAddress> CConnman::GetAddresses(Network requestor_network, size_t max_addresses, size_t max_pct) +{ + const auto current_time = GetTime<std::chrono::microseconds>(); + if (m_addr_response_caches.find(requestor_network) == m_addr_response_caches.end() || + m_addr_response_caches[requestor_network].m_update_addr_response < current_time) { + m_addr_response_caches[requestor_network].m_addrs_response_cache = GetAddresses(max_addresses, max_pct); + m_addr_response_caches[requestor_network].m_update_addr_response = current_time + std::chrono::hours(21) + GetRandMillis(std::chrono::hours(6)); + } + return m_addr_response_caches[requestor_network].m_addrs_response_cache; } bool CConnman::AddNode(const std::string& strNode) @@ -2564,7 +2579,7 @@ size_t CConnman::GetNodeCount(NumConnections flags) int nNum = 0; for (const auto& pnode : vNodes) { - if (flags & (pnode->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT)) { + if (flags & (pnode->IsInboundConn() ? CONNECTIONS_IN : CONNECTIONS_OUT)) { nNum++; } } @@ -2748,26 +2763,26 @@ int CConnman::GetBestHeight() const unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; } -CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, bool fInboundIn, bool block_relay_only) +CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in) : nTimeConnected(GetSystemTimeInSeconds()), addr(addrIn), addrBind(addrBindIn), - fInbound(fInboundIn), nKeyedNetGroup(nKeyedNetGroupIn), // Don't relay addr messages to peers that we connect to as block-relay-only // peers (to prevent adversaries from inferring these links from addr // traffic). - m_addr_known{block_relay_only ? nullptr : MakeUnique<CRollingBloomFilter>(5000, 0.001)}, id(idIn), nLocalHostNonce(nLocalHostNonceIn), + m_conn_type(conn_type_in), nLocalServices(nLocalServicesIn), nMyStartingHeight(nMyStartingHeightIn) { hSocket = hSocketIn; addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn; hashContinue = uint256(); - if (!block_relay_only) { + if (conn_type_in != ConnectionType::BLOCK_RELAY) { m_tx_relay = MakeUnique<TxRelay>(); + m_addr_known = MakeUnique<CRollingBloomFilter>(5000, 0.001); } for (const std::string &msg : getAllNetMessageTypes()) @@ -27,6 +27,7 @@ #include <atomic> #include <cstdint> #include <deque> +#include <map> #include <thread> #include <memory> #include <condition_variable> @@ -50,8 +51,8 @@ static const bool DEFAULT_WHITELISTFORCERELAY = false; static const int TIMEOUT_INTERVAL = 20 * 60; /** Run the feeler connection loop once every 2 minutes or 120 seconds. **/ static const int FEELER_INTERVAL = 120; -/** The maximum number of new addresses to accumulate before announcing. */ -static const unsigned int MAX_ADDR_TO_SEND = 1000; +/** The maximum number of addresses from our addrman to return in response to a getaddr message. */ +static constexpr size_t MAX_ADDR_TO_SEND = 1000; /** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */ static const unsigned int MAX_PROTOCOL_MESSAGE_LENGTH = 4 * 1000 * 1000; /** Maximum length of the user agent string in `version` message */ @@ -113,6 +114,17 @@ struct CSerializedNetMsg std::string m_type; }; +/** Different types of connections to a peer. This enum encapsulates the + * information we have available at the time of opening or accepting the + * connection. Aside from INBOUND, all types are initiated by us. */ +enum class ConnectionType { + INBOUND, /**< peer initiated connections */ + OUTBOUND, /**< full relay connections (blocks, addrs, txns) made automatically. Addresses selected from AddrMan. */ + MANUAL, /**< connections to addresses added via addnode or the connect command line argument */ + FEELER, /**< short lived connections used to test address validity */ + BLOCK_RELAY, /**< only relay blocks to these automatic outbound connections. Addresses selected from AddrMan. */ + ADDR_FETCH, /**< short lived connections used to solicit addrs when starting the node without a populated AddrMan */ +}; class NetEventsInterface; class CConnman @@ -197,7 +209,7 @@ public: bool GetNetworkActive() const { return fNetworkActive; }; bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; }; void SetNetworkActive(bool active); - void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, bool fOneShot = false, bool fFeeler = false, bool manual_connection = false, bool block_relay_only = false); + void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, ConnectionType conn_type = ConnectionType::OUTBOUND); bool CheckIncomingNonce(uint64_t nonce); bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func); @@ -249,8 +261,15 @@ public: // Addrman functions void SetServices(const CService &addr, ServiceFlags nServices); void MarkAddressGood(const CAddress& addr); - void AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty = 0); - std::vector<CAddress> GetAddresses(); + bool AddNewAddresses(const std::vector<CAddress>& vAddr, const CAddress& addrFrom, int64_t nTimePenalty = 0); + std::vector<CAddress> GetAddresses(size_t max_addresses, size_t max_pct); + /** + * Cache is used to minimize topology leaks, so it should + * be used for all non-trusted calls, for example, p2p. + * A non-malicious call (from RPC or a peer with addr permission) should + * call the function without a parameter to avoid using the cache. + */ + std::vector<CAddress> GetAddresses(Network requestor_network, size_t max_addresses, size_t max_pct); // This allows temporarily exceeding m_max_outbound_full_relay, with the goal of finding // a peer that is better than all our current peers. @@ -340,8 +359,8 @@ private: bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions); bool InitBinds(const std::vector<CService>& binds, const std::vector<NetWhitebindPermissions>& whiteBinds); void ThreadOpenAddedConnections(); - void AddOneShot(const std::string& strDest); - void ProcessOneShot(); + void AddAddrFetch(const std::string& strDest); + void ProcessAddrFetch(); void ThreadOpenConnections(std::vector<std::string> connect); void ThreadMessageHandler(); void AcceptConnection(const ListenSocket& hListenSocket); @@ -362,7 +381,7 @@ private: CNode* FindNode(const CService& addr); bool AttemptToEvictConnection(); - CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only); + CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type); void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const; void DeleteNode(CNode* pnode); @@ -405,8 +424,8 @@ private: std::atomic<bool> fNetworkActive{true}; bool fAddressesInitialized{false}; CAddrMan addrman; - std::deque<std::string> vOneShots GUARDED_BY(cs_vOneShots); - RecursiveMutex cs_vOneShots; + std::deque<std::string> m_addr_fetches GUARDED_BY(m_addr_fetches_mutex); + RecursiveMutex m_addr_fetches_mutex; std::vector<std::string> vAddedNodes GUARDED_BY(cs_vAddedNodes); RecursiveMutex cs_vAddedNodes; std::vector<CNode*> vNodes GUARDED_BY(cs_vNodes); @@ -416,6 +435,29 @@ private: unsigned int nPrevNodeCount{0}; /** + * Cache responses to addr requests to minimize privacy leak. + * Attack example: scraping addrs in real-time may allow an attacker + * to infer new connections of the victim by detecting new records + * with fresh timestamps (per self-announcement). + */ + struct CachedAddrResponse { + std::vector<CAddress> m_addrs_response_cache; + std::chrono::microseconds m_update_addr_response{0}; + }; + + /** + * Addr responses stored in different caches + * per network prevent cross-network node identification. + * If a node for example is multi-homed under Tor and IPv6, + * a single cache (or no cache at all) would let an attacker + * to easily detect that it is the same node by comparing responses. + * The used memory equals to 1000 CAddress records (or around 32 bytes) per + * distinct Network (up to 5) we have/had an inbound peer from, + * resulting in at most ~160 KB. + */ + std::map<Network, CachedAddrResponse> m_addr_response_caches; + + /** * Services this instance offers. * * This data is replicated in each CNode instance we create during peer @@ -764,12 +806,8 @@ public: } // This boolean is unusued in actual processing, only present for backward compatibility at RPC/QT level bool m_legacyWhitelisted{false}; - bool fFeeler{false}; // If true this node is being used as a short lived feeler. - bool fOneShot{false}; - bool m_manual_connection{false}; bool fClient{false}; // set by version message bool m_limited_node{false}; //after BIP159, set by version message - const bool fInbound; std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs @@ -782,6 +820,60 @@ public: std::atomic_bool fPauseRecv{false}; std::atomic_bool fPauseSend{false}; + bool IsOutboundOrBlockRelayConn() const { + switch(m_conn_type) { + case ConnectionType::OUTBOUND: + case ConnectionType::BLOCK_RELAY: + return true; + case ConnectionType::INBOUND: + case ConnectionType::MANUAL: + case ConnectionType::ADDR_FETCH: + case ConnectionType::FEELER: + return false; + } + + assert(false); + } + + bool IsFullOutboundConn() const { + return m_conn_type == ConnectionType::OUTBOUND; + } + + bool IsManualConn() const { + return m_conn_type == ConnectionType::MANUAL; + } + + bool IsBlockOnlyConn() const { + return m_conn_type == ConnectionType::BLOCK_RELAY; + } + + bool IsFeelerConn() const { + return m_conn_type == ConnectionType::FEELER; + } + + bool IsAddrFetchConn() const { + return m_conn_type == ConnectionType::ADDR_FETCH; + } + + bool IsInboundConn() const { + return m_conn_type == ConnectionType::INBOUND; + } + + bool ExpectServicesFromConn() const { + switch(m_conn_type) { + case ConnectionType::INBOUND: + case ConnectionType::MANUAL: + case ConnectionType::FEELER: + return false; + case ConnectionType::OUTBOUND: + case ConnectionType::BLOCK_RELAY: + case ConnectionType::ADDR_FETCH: + return true; + } + + assert(false); + } + protected: mapMsgCmdSize mapSendBytesPerMsgCmd; mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); @@ -792,7 +884,7 @@ public: // flood relay std::vector<CAddress> vAddrToSend; - const std::unique_ptr<CRollingBloomFilter> m_addr_known; + std::unique_ptr<CRollingBloomFilter> m_addr_known = nullptr; bool fGetAddr{false}; std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0}; std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0}; @@ -856,7 +948,7 @@ public: std::set<uint256> orphan_work_set; - CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn = "", bool fInboundIn = false, bool block_relay_only = false); + CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in); ~CNode(); CNode(const CNode&) = delete; CNode& operator=(const CNode&) = delete; @@ -864,6 +956,7 @@ public: private: const NodeId id; const uint64_t nLocalHostNonce; + const ConnectionType m_conn_type; //! Services offered to this peer. //! diff --git a/src/net_permissions.cpp b/src/net_permissions.cpp index a75838307c..53648deb40 100644 --- a/src/net_permissions.cpp +++ b/src/net_permissions.cpp @@ -15,6 +15,7 @@ const std::vector<std::string> NET_PERMISSIONS_DOC{ "relay (relay even in -blocksonly mode)", "mempool (allow requesting BIP35 mempool contents)", "download (allow getheaders during IBD, no disconnect after maxuploadtarget limit)", + "addr (responses to GETADDR avoid hitting the cache and contain random records with the most up-to-date info)" }; namespace { @@ -50,6 +51,7 @@ bool TryParsePermissionFlags(const std::string str, NetPermissionFlags& output, else if (permission == "download") NetPermissions::AddFlag(flags, PF_DOWNLOAD); else if (permission == "all") NetPermissions::AddFlag(flags, PF_ALL); else if (permission == "relay") NetPermissions::AddFlag(flags, PF_RELAY); + else if (permission == "addr") NetPermissions::AddFlag(flags, PF_ADDR); else if (permission.length() == 0); // Allow empty entries else { error = strprintf(_("Invalid P2P permission: '%s'"), permission); @@ -75,6 +77,7 @@ std::vector<std::string> NetPermissions::ToStrings(NetPermissionFlags flags) if (NetPermissions::HasFlag(flags, PF_RELAY)) strings.push_back("relay"); if (NetPermissions::HasFlag(flags, PF_MEMPOOL)) strings.push_back("mempool"); if (NetPermissions::HasFlag(flags, PF_DOWNLOAD)) strings.push_back("download"); + if (NetPermissions::HasFlag(flags, PF_ADDR)) strings.push_back("addr"); return strings; } diff --git a/src/net_permissions.h b/src/net_permissions.h index a9633ee2ae..5b68f635a7 100644 --- a/src/net_permissions.h +++ b/src/net_permissions.h @@ -29,10 +29,12 @@ enum NetPermissionFlags { PF_NOBAN = (1U << 4) | PF_DOWNLOAD, // Can query the mempool PF_MEMPOOL = (1U << 5), + // Can request addrs without hitting a privacy-preserving cache + PF_ADDR = (1U << 7), // True if the user did not specifically set fine grained permissions PF_ISIMPLICIT = (1U << 31), - PF_ALL = PF_BLOOMFILTER | PF_FORCERELAY | PF_RELAY | PF_NOBAN | PF_MEMPOOL | PF_DOWNLOAD, + PF_ALL = PF_BLOOMFILTER | PF_FORCERELAY | PF_RELAY | PF_NOBAN | PF_MEMPOOL | PF_DOWNLOAD | PF_ADDR, }; class NetPermissions diff --git a/src/net_processing.cpp b/src/net_processing.cpp index fc5b4a8e7f..7b83583e41 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -143,6 +143,8 @@ static constexpr unsigned int MAX_FEEFILTER_CHANGE_DELAY = 5 * 60; static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000; /** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */ static constexpr uint32_t MAX_GETCFHEADERS_SIZE = 2000; +/** the maximum percentage of addresses from our addrman to return in response to a getaddr message. */ +static constexpr size_t MAX_PCT_ADDR_TO_SEND = 23; struct COrphanTx { // When modifying, adapt the copy of this definition in tests/DoS_tests. @@ -187,7 +189,7 @@ namespace { * million to make it highly unlikely for users to have issues with this * filter. * - * We only need to add wtxids to this filter. For non-segwit + * We typically only add wtxids to this filter. For non-segwit * transactions, the txid == wtxid, so this only prevents us from * re-downloading non-segwit transactions when communicating with * non-wtxidrelay peers -- which is important for avoiding malleation @@ -196,6 +198,12 @@ namespace { * the reject filter store wtxids is exactly what we want to avoid * redownload of a rejected transaction. * + * In cases where we can tell that a segwit transaction will fail + * validation no matter the witness, we may add the txid of such + * transaction to the filter as well. This can be helpful when + * communicating with txid-relay peers or if we were to otherwise fetch a + * transaction via txid (eg in our orphan handling). + * * Memory used: 1.3 MB */ std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main); @@ -236,7 +244,7 @@ namespace { /** When our tip was last updated. */ std::atomic<int64_t> g_last_tip_update(0); - /** Relay map */ + /** Relay map (txid or wtxid -> CTransactionRef) */ typedef std::map<uint256, CTransactionRef> MapRelay; MapRelay mapRelay GUARDED_BY(cs_main); /** Expiration-time ordered list of (expire time, relay map entry) pairs. */ @@ -398,7 +406,7 @@ struct CNodeState { /* Track when to attempt download of announced transactions (process * time in micros -> txid) */ - std::multimap<std::chrono::microseconds, uint256> m_tx_process_time; + std::multimap<std::chrono::microseconds, GenTxid> m_tx_process_time; //! Store all the transactions a peer has recently announced std::set<uint256> m_tx_announced; @@ -473,7 +481,7 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS nPreferredDownload -= state->fPreferredDownload; // Whether this node should be marked as a preferred download node. - state->fPreferredDownload = (!node.fInbound || node.HasPermission(PF_NOBAN)) && !node.fOneShot && !node.fClient; + state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient; nPreferredDownload += state->fPreferredDownload; } @@ -751,34 +759,34 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec } } -void EraseTxRequest(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void EraseTxRequest(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - g_already_asked_for.erase(txid); + g_already_asked_for.erase(gtxid.GetHash()); } -std::chrono::microseconds GetTxRequestTime(const uint256& txid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +std::chrono::microseconds GetTxRequestTime(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - auto it = g_already_asked_for.find(txid); + auto it = g_already_asked_for.find(gtxid.GetHash()); if (it != g_already_asked_for.end()) { return it->second; } return {}; } -void UpdateTxRequestTime(const uint256& txid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void UpdateTxRequestTime(const GenTxid& gtxid, std::chrono::microseconds request_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - auto it = g_already_asked_for.find(txid); + auto it = g_already_asked_for.find(gtxid.GetHash()); if (it == g_already_asked_for.end()) { - g_already_asked_for.insert(std::make_pair(txid, request_time)); + g_already_asked_for.insert(std::make_pair(gtxid.GetHash(), request_time)); } else { g_already_asked_for.update(it, request_time); } } -std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +std::chrono::microseconds CalculateTxGetDataTime(const GenTxid& gtxid, std::chrono::microseconds current_time, bool use_inbound_delay, bool use_txid_delay) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::chrono::microseconds process_time; - const auto last_request_time = GetTxRequestTime(txid); + const auto last_request_time = GetTxRequestTime(gtxid); // First time requesting this tx if (last_request_time.count() == 0) { process_time = current_time; @@ -797,23 +805,23 @@ std::chrono::microseconds CalculateTxGetDataTime(const uint256& txid, std::chron return process_time; } -void RequestTx(CNodeState* state, const uint256& txid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void RequestTx(CNodeState* state, const GenTxid& gtxid, std::chrono::microseconds current_time) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CNodeState::TxDownloadState& peer_download_state = state->m_tx_download; if (peer_download_state.m_tx_announced.size() >= MAX_PEER_TX_ANNOUNCEMENTS || peer_download_state.m_tx_process_time.size() >= MAX_PEER_TX_ANNOUNCEMENTS || - peer_download_state.m_tx_announced.count(txid)) { + peer_download_state.m_tx_announced.count(gtxid.GetHash())) { // Too many queued announcements from this peer, or we already have // this announcement return; } - peer_download_state.m_tx_announced.insert(txid); + peer_download_state.m_tx_announced.insert(gtxid.GetHash()); // Calculate the time to try requesting this transaction. Use // fPreferredDownload as a proxy for outbound peers. - const auto process_time = CalculateTxGetDataTime(txid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0); + const auto process_time = CalculateTxGetDataTime(gtxid, current_time, !state->fPreferredDownload, !state->m_wtxid_relay && g_wtxid_relay_peers > 0); - peer_download_state.m_tx_process_time.emplace(process_time, txid); + peer_download_state.m_tx_process_time.emplace(process_time, gtxid); } } // namespace @@ -827,22 +835,15 @@ void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds) if (state) state->m_last_block_announcement = time_in_seconds; } -// Returns true for outbound peers, excluding manual connections, feelers, and -// one-shots. -static bool IsOutboundDisconnectionCandidate(const CNode& node) -{ - return !(node.fInbound || node.m_manual_connection || node.fFeeler || node.fOneShot); -} - void PeerLogicValidation::InitializeNode(CNode *pnode) { CAddress addr = pnode->addr; std::string addrName = pnode->GetAddrName(); NodeId nodeid = pnode->GetId(); { LOCK(cs_main); - mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->fInbound, pnode->m_manual_connection)); + mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->IsInboundConn(), pnode->IsManualConn())); } - if(!pnode->fInbound) + if(!pnode->IsInboundConn()) PushNodeVersion(*pnode, *connman, GetTime()); } @@ -1161,6 +1162,7 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state, } // Conflicting (but not necessarily invalid) data or different policy: case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE: + case TxValidationResult::TX_INPUTS_NOT_STANDARD: case TxValidationResult::TX_NOT_STANDARD: case TxValidationResult::TX_MISSING_INPUTS: case TxValidationResult::TX_PREMATURE_SPEND: @@ -1453,7 +1455,7 @@ bool static AlreadyHave(const CInv& inv, const CTxMemPool& mempool) EXCLUSIVE_LO if (g_recent_confirmed_transactions->contains(inv.hash)) return true; } - return recentRejects->contains(inv.hash) || mempool.exists(inv.hash, inv.IsMsgWtx()); + return recentRejects->contains(inv.hash) || mempool.exists(ToGenTxid(inv)); } case MSG_BLOCK: case MSG_WITNESS_BLOCK: @@ -1671,9 +1673,9 @@ void static ProcessGetBlockData(CNode& pfrom, const CChainParams& chainparams, c } //! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). -CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid_or_wtxid, bool use_wtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main) +CTransactionRef static FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main) { - auto txinfo = mempool.info(txid_or_wtxid, use_wtxid); + auto txinfo = mempool.info(gtxid); if (txinfo.tx) { // If a TX could have been INVed in reply to a MEMPOOL request, // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request @@ -1686,11 +1688,11 @@ CTransactionRef static FindTxForGetData(const CNode& peer, const uint256& txid_o { LOCK(cs_main); // Otherwise, the transaction must have been announced recently. - if (State(peer.GetId())->m_recently_announced_invs.contains(txid_or_wtxid)) { + if (State(peer.GetId())->m_recently_announced_invs.contains(gtxid.GetHash())) { // If it was, it can be relayed from either the mempool... if (txinfo.tx) return std::move(txinfo.tx); // ... or the relay pool. - auto mi = mapRelay.find(txid_or_wtxid); + auto mi = mapRelay.find(gtxid.GetHash()); if (mi != mapRelay.end()) return mi->second; } } @@ -1727,23 +1729,34 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm continue; } - CTransactionRef tx = FindTxForGetData(pfrom, inv.hash, inv.IsMsgWtx(), mempool_req, now); + CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now); if (tx) { // WTX and WITNESS_TX imply we serialize with witness int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); mempool.RemoveUnbroadcastTx(tx->GetHash()); // As we're going to send tx, make sure its unconfirmed parents are made requestable. - for (const auto& txin : tx->vin) { - auto txinfo = mempool.info(txin.prevout.hash); - if (txinfo.tx && txinfo.m_time > now - UNCONDITIONAL_RELAY_DELAY) { - // Relaying a transaction with a recent but unconfirmed parent. - if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(txin.prevout.hash))) { - LOCK(cs_main); - State(pfrom.GetId())->m_recently_announced_invs.insert(txin.prevout.hash); + std::vector<uint256> parent_ids_to_add; + { + LOCK(mempool.cs); + auto txiter = mempool.GetIter(tx->GetHash()); + if (txiter) { + const CTxMemPool::setEntries& parents = mempool.GetMemPoolParents(*txiter); + parent_ids_to_add.reserve(parents.size()); + for (CTxMemPool::txiter parent_iter : parents) { + if (parent_iter->GetTime() > now - UNCONDITIONAL_RELAY_DELAY) { + parent_ids_to_add.push_back(parent_iter->GetTx().GetHash()); + } } } } + for (const uint256& parent_txid : parent_ids_to_add) { + // Relaying a transaction with a recent but unconfirmed parent. + if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) { + LOCK(cs_main); + State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid); + } + } } else { vNotFound.push_back(inv); } @@ -1964,14 +1977,14 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman& connman, ChainstateMan // until we have a headers chain that has at least // nMinimumChainWork, even if a peer has a chain past our tip, // as an anti-DoS measure. - if (IsOutboundDisconnectionCandidate(pfrom)) { + if (pfrom.IsOutboundOrBlockRelayConn()) { LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId()); pfrom.fDisconnect = true; } } } - if (!pfrom.fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) { + if (!pfrom.fDisconnect && pfrom.IsOutboundOrBlockRelayConn() && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) { // If this is an outbound full-relay peer, check to see if we should protect // it from the bad/lagging chain logic. // Note that block-relay-only peers are already implicitly protected, so we @@ -2052,6 +2065,19 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin // if we start doing this too early. assert(recentRejects); recentRejects->insert(orphanTx.GetWitnessHash()); + // If the transaction failed for TX_INPUTS_NOT_STANDARD, + // then we know that the witness was irrelevant to the policy + // failure, since this check depends only on the txid + // (the scriptPubKey being spent is covered by the txid). + // Add the txid to the reject filter to prevent repeated + // processing of this transaction in the event that child + // transactions are later received (resulting in + // parent-fetching by txid via the orphan-handling logic). + if (orphan_state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && orphanTx.GetWitnessHash() != orphanTx.GetHash()) { + // We only add the txid if it differs from the wtxid, to + // avoid wasting entries in the rolling bloom filter. + recentRejects->insert(orphanTx.GetHash()); + } } EraseOrphanTx(orphanHash); done = true; @@ -2065,7 +2091,7 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin * * May disconnect from the peer in the case of a bad request. * - * @param[in] pfrom The peer that we received the request from + * @param[in] peer The peer that we received the request from * @param[in] chain_params Chain parameters * @param[in] filter_type The filter type the request is for. Must be basic filters. * @param[in] start_height The start height for the request @@ -2075,7 +2101,7 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin * @param[out] filter_index The filter index, if the request can be serviced. * @return True if the request can be serviced. */ -static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_params, +static bool PrepareBlockFilterRequest(CNode& peer, const CChainParams& chain_params, BlockFilterType filter_type, uint32_t start_height, const uint256& stop_hash, uint32_t max_height_diff, const CBlockIndex*& stop_index, @@ -2083,11 +2109,11 @@ static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_pa { const bool supported_filter_type = (filter_type == BlockFilterType::BASIC && - gArgs.GetBoolArg("-peerblockfilters", DEFAULT_PEERBLOCKFILTERS)); + (peer.GetLocalServices() & NODE_COMPACT_FILTERS)); if (!supported_filter_type) { LogPrint(BCLog::NET, "peer %d requested unsupported block filter type: %d\n", - pfrom.GetId(), static_cast<uint8_t>(filter_type)); - pfrom.fDisconnect = true; + peer.GetId(), static_cast<uint8_t>(filter_type)); + peer.fDisconnect = true; return false; } @@ -2098,8 +2124,8 @@ static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_pa // Check that the stop block exists and the peer would be allowed to fetch it. if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) { LogPrint(BCLog::NET, "peer %d requested invalid block hash: %s\n", - pfrom.GetId(), stop_hash.ToString()); - pfrom.fDisconnect = true; + peer.GetId(), stop_hash.ToString()); + peer.fDisconnect = true; return false; } } @@ -2108,14 +2134,14 @@ static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_pa if (start_height > stop_height) { LogPrint(BCLog::NET, "peer %d sent invalid getcfilters/getcfheaders with " /* Continued */ "start height %d and stop height %d\n", - pfrom.GetId(), start_height, stop_height); - pfrom.fDisconnect = true; + peer.GetId(), start_height, stop_height); + peer.fDisconnect = true; return false; } if (stop_height - start_height >= max_height_diff) { LogPrint(BCLog::NET, "peer %d requested too many cfilters/cfheaders: %d / %d\n", - pfrom.GetId(), stop_height - start_height + 1, max_height_diff); - pfrom.fDisconnect = true; + peer.GetId(), stop_height - start_height + 1, max_height_diff); + peer.fDisconnect = true; return false; } @@ -2133,12 +2159,12 @@ static bool PrepareBlockFilterRequest(CNode& pfrom, const CChainParams& chain_pa * * May disconnect from the peer in the case of a bad request. * - * @param[in] pfrom The peer that we received the request from + * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ -static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params, +static void ProcessGetCFilters(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params, CConnman& connman) { uint8_t filter_type_ser; @@ -2151,13 +2177,12 @@ static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainPar const CBlockIndex* stop_index; BlockFilterIndex* filter_index; - if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, start_height, stop_hash, + if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash, MAX_GETCFILTERS_SIZE, stop_index, filter_index)) { return; } std::vector<BlockFilter> filters; - if (!filter_index->LookupFilterRange(start_height, stop_index, filters)) { LogPrint(BCLog::NET, "Failed to find block filter in index: filter_type=%s, start_height=%d, stop_hash=%s\n", BlockFilterTypeName(filter_type), start_height, stop_hash.ToString()); @@ -2165,9 +2190,9 @@ static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainPar } for (const auto& filter : filters) { - CSerializedNetMsg msg = CNetMsgMaker(pfrom.GetSendVersion()) + CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion()) .Make(NetMsgType::CFILTER, filter); - connman.PushMessage(&pfrom, std::move(msg)); + connman.PushMessage(&peer, std::move(msg)); } } @@ -2176,12 +2201,12 @@ static void ProcessGetCFilters(CNode& pfrom, CDataStream& vRecv, const CChainPar * * May disconnect from the peer in the case of a bad request. * - * @param[in] pfrom The peer that we received the request from + * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ -static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params, +static void ProcessGetCFHeaders(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params, CConnman& connman) { uint8_t filter_type_ser; @@ -2194,7 +2219,7 @@ static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainPa const CBlockIndex* stop_index; BlockFilterIndex* filter_index; - if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, start_height, stop_hash, + if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, start_height, stop_hash, MAX_GETCFHEADERS_SIZE, stop_index, filter_index)) { return; } @@ -2217,13 +2242,13 @@ static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainPa return; } - CSerializedNetMsg msg = CNetMsgMaker(pfrom.GetSendVersion()) + CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion()) .Make(NetMsgType::CFHEADERS, filter_type_ser, stop_index->GetBlockHash(), prev_header, filter_hashes); - connman.PushMessage(&pfrom, std::move(msg)); + connman.PushMessage(&peer, std::move(msg)); } /** @@ -2231,12 +2256,12 @@ static void ProcessGetCFHeaders(CNode& pfrom, CDataStream& vRecv, const CChainPa * * May disconnect from the peer in the case of a bad request. * - * @param[in] pfrom The peer that we received the request from + * @param[in] peer The peer that we received the request from * @param[in] vRecv The raw message received * @param[in] chain_params Chain parameters * @param[in] connman Pointer to the connection manager */ -static void ProcessGetCFCheckPt(CNode& pfrom, CDataStream& vRecv, const CChainParams& chain_params, +static void ProcessGetCFCheckPt(CNode& peer, CDataStream& vRecv, const CChainParams& chain_params, CConnman& connman) { uint8_t filter_type_ser; @@ -2248,7 +2273,7 @@ static void ProcessGetCFCheckPt(CNode& pfrom, CDataStream& vRecv, const CChainPa const CBlockIndex* stop_index; BlockFilterIndex* filter_index; - if (!PrepareBlockFilterRequest(pfrom, chain_params, filter_type, /*start_height=*/0, stop_hash, + if (!PrepareBlockFilterRequest(peer, chain_params, filter_type, /*start_height=*/0, stop_hash, /*max_height_diff=*/std::numeric_limits<uint32_t>::max(), stop_index, filter_index)) { return; @@ -2269,12 +2294,12 @@ static void ProcessGetCFCheckPt(CNode& pfrom, CDataStream& vRecv, const CChainPa } } - CSerializedNetMsg msg = CNetMsgMaker(pfrom.GetSendVersion()) + CSerializedNetMsg msg = CNetMsgMaker(peer.GetSendVersion()) .Make(NetMsgType::CFCHECKPT, filter_type_ser, stop_index->GetBlockHash(), headers); - connman.PushMessage(&pfrom, std::move(msg)); + connman.PushMessage(&peer, std::move(msg)); } void ProcessMessage( @@ -2321,11 +2346,11 @@ void ProcessMessage( vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; nSendVersion = std::min(nVersion, PROTOCOL_VERSION); nServices = ServiceFlags(nServiceInt); - if (!pfrom.fInbound) + if (!pfrom.IsInboundConn()) { connman.SetServices(pfrom.addr, nServices); } - if (!pfrom.fInbound && !pfrom.fFeeler && !pfrom.m_manual_connection && !HasAllDesirableServiceFlags(nServices)) + if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices)) { LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices)); pfrom.fDisconnect = true; @@ -2352,20 +2377,20 @@ void ProcessMessage( if (!vRecv.empty()) vRecv >> fRelay; // Disconnect if we connected to ourself - if (pfrom.fInbound && !connman.CheckIncomingNonce(nNonce)) + if (pfrom.IsInboundConn() && !connman.CheckIncomingNonce(nNonce)) { LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString()); pfrom.fDisconnect = true; return; } - if (pfrom.fInbound && addrMe.IsRoutable()) + if (pfrom.IsInboundConn() && addrMe.IsRoutable()) { SeenLocal(addrMe); } // Be shy and don't send version until we hear - if (pfrom.fInbound) + if (pfrom.IsInboundConn()) PushNodeVersion(pfrom, connman, GetAdjustedTime()); if (nVersion >= WTXID_RELAY_VERSION) { @@ -2409,7 +2434,7 @@ void ProcessMessage( UpdatePreferredDownload(pfrom, State(pfrom.GetId())); } - if (!pfrom.fInbound && pfrom.IsAddrRelayPeer()) + if (!pfrom.IsInboundConn() && pfrom.IsAddrRelayPeer()) { // Advertise our address if (fListen && !::ChainstateActive().IsInitialBlockDownload()) @@ -2453,8 +2478,7 @@ void ProcessMessage( } // Feeler connections exist only to verify if address is online. - if (pfrom.fFeeler) { - assert(pfrom.fInbound == false); + if (pfrom.IsFeelerConn()) { pfrom.fDisconnect = true; } return; @@ -2474,7 +2498,7 @@ void ProcessMessage( { pfrom.SetRecvVersion(std::min(pfrom.nVersion.load(), PROTOCOL_VERSION)); - if (!pfrom.fInbound) { + if (!pfrom.IsInboundConn()) { // Mark this node as currently connected, so we update its timestamp later. LOCK(cs_main); State(pfrom.GetId())->fCurrentlyConnected = true; @@ -2541,7 +2565,7 @@ void ProcessMessage( if (!pfrom.IsAddrRelayPeer()) { return; } - if (vAddr.size() > 1000) + if (vAddr.size() > MAX_ADDR_TO_SEND) { LOCK(cs_main); Misbehaving(pfrom.GetId(), 20, strprintf("addr message size = %u", vAddr.size())); @@ -2583,7 +2607,7 @@ void ProcessMessage( connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60); if (vAddr.size() < 1000) pfrom.fGetAddr = false; - if (pfrom.fOneShot) + if (pfrom.IsAddrFetchConn()) pfrom.fDisconnect = true; return; } @@ -2647,7 +2671,9 @@ void ProcessMessage( if (interruptMsgProc) return; - // ignore INVs that don't match wtxidrelay setting + // Ignore INVs that don't match wtxidrelay setting. + // Note that orphan parent fetching always uses MSG_TX GETDATAs regardless of the wtxidrelay setting. + // This is fine as no INV messages are involved in that process. if (State(pfrom.GetId())->m_wtxid_relay) { if (inv.IsMsgTx()) continue; } else { @@ -2678,7 +2704,7 @@ void ProcessMessage( pfrom.fDisconnect = true; return; } else if (!fAlreadyHave && !chainman.ActiveChainstate().IsInitialBlockDownload()) { - RequestTx(State(pfrom.GetId()), inv.hash, current_time); + RequestTx(State(pfrom.GetId()), ToGenTxid(inv), current_time); } } } @@ -2931,15 +2957,17 @@ void ProcessMessage( TxValidationState state; - nodestate->m_tx_download.m_tx_announced.erase(hash); - nodestate->m_tx_download.m_tx_in_flight.erase(hash); - EraseTxRequest(hash); + for (const GenTxid& gtxid : {GenTxid(false, txid), GenTxid(true, wtxid)}) { + nodestate->m_tx_download.m_tx_announced.erase(gtxid.GetHash()); + nodestate->m_tx_download.m_tx_in_flight.erase(gtxid.GetHash()); + EraseTxRequest(gtxid); + } std::list<CTransactionRef> lRemovedTxn; // We do the AlreadyHave() check using wtxid, rather than txid - in the // absence of witness malleation, this is strictly better, because the - // recent rejects filter may contain the wtxid but will never contain + // recent rejects filter may contain the wtxid but rarely contains // the txid of a segwit transaction that has been rejected. // In the presence of witness malleation, it's possible that by only // doing the check with wtxid, we could overlook a transaction which @@ -2975,8 +3003,19 @@ void ProcessMessage( else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected + + // Deduplicate parent txids, so that we don't have to loop over + // the same parent txid more than once down below. + std::vector<uint256> unique_parents; + unique_parents.reserve(tx.vin.size()); for (const CTxIn& txin : tx.vin) { - if (recentRejects->contains(txin.prevout.hash)) { + // We start with all parents, and then remove duplicates below. + unique_parents.push_back(txin.prevout.hash); + } + std::sort(unique_parents.begin(), unique_parents.end()); + unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end()); + for (const uint256& parent_txid : unique_parents) { + if (recentRejects->contains(parent_txid)) { fRejectedParents = true; break; } @@ -2985,17 +3024,15 @@ void ProcessMessage( uint32_t nFetchFlags = GetFetchFlags(pfrom); const auto current_time = GetTime<std::chrono::microseconds>(); - if (!State(pfrom.GetId())->m_wtxid_relay) { - for (const CTxIn& txin : tx.vin) { - // Here, we only have the txid (and not wtxid) of the - // inputs, so we only request parents from - // non-wtxid-relay peers. - // Eventually we should replace this with an improved - // protocol for getting all unconfirmed parents. - CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash); - pfrom.AddKnownTx(txin.prevout.hash); - if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), _inv.hash, current_time); - } + for (const uint256& parent_txid : unique_parents) { + // Here, we only have the txid (and not wtxid) of the + // inputs, so we only request in txid mode, even for + // wtxidrelay peers. + // Eventually we should replace this with an improved + // protocol for getting all unconfirmed parents. + CInv _inv(MSG_TX | nFetchFlags, parent_txid); + pfrom.AddKnownTx(parent_txid); + if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), ToGenTxid(_inv), current_time); } AddOrphanTx(ptx, pfrom.GetId()); @@ -3033,6 +3070,17 @@ void ProcessMessage( // if we start doing this too early. assert(recentRejects); recentRejects->insert(tx.GetWitnessHash()); + // If the transaction failed for TX_INPUTS_NOT_STANDARD, + // then we know that the witness was irrelevant to the policy + // failure, since this check depends only on the txid + // (the scriptPubKey being spent is covered by the txid). + // Add the txid to the reject filter to prevent repeated + // processing of this transaction in the event that child + // transactions are later received (resulting in + // parent-fetching by txid via the orphan-handling logic). + if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) { + recentRejects->insert(tx.GetHash()); + } if (RecursiveDynamicUsage(*ptx) < 100000) { AddToCompactExtraTransactions(ptx); } @@ -3454,7 +3502,7 @@ void ProcessMessage( // to users' AddrMan and later request them by sending getaddr messages. // Making nodes which are behind NAT and can only make outgoing connections ignore // the getaddr message mitigates the attack. - if (!pfrom.fInbound) { + if (!pfrom.IsInboundConn()) { LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom.GetId()); return; } @@ -3472,13 +3520,15 @@ void ProcessMessage( pfrom.fSentAddr = true; pfrom.vAddrToSend.clear(); - std::vector<CAddress> vAddr = connman.GetAddresses(); + std::vector<CAddress> vAddr; + if (pfrom.HasPermission(PF_ADDR)) { + vAddr = connman.GetAddresses(MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); + } else { + vAddr = connman.GetAddresses(pfrom.addr.GetNetwork(), MAX_ADDR_TO_SEND, MAX_PCT_ADDR_TO_SEND); + } FastRandomContext insecure_rand; for (const CAddress &addr : vAddr) { - bool banned_or_discouraged = banman && (banman->IsDiscouraged(addr) || banman->IsBanned(addr)); - if (!banned_or_discouraged) { - pfrom.PushAddress(addr, insecure_rand); - } + pfrom.PushAddress(addr, insecure_rand); } return; } @@ -3735,7 +3785,7 @@ bool PeerLogicValidation::MaybeDiscourageAndDisconnect(CNode& pnode) return false; } - if (pnode.m_manual_connection) { + if (pnode.IsManualConn()) { // We never disconnect or discourage manual peers for bad behavior LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id); return false; @@ -3856,7 +3906,7 @@ void PeerLogicValidation::ConsiderEviction(CNode& pto, int64_t time_in_seconds) CNodeState &state = *State(pto.GetId()); const CNetMsgMaker msgMaker(pto.GetSendVersion()); - if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) { + if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) { // This is an outbound peer subject to disconnection if they don't // announce a block with as much work as the current tip within // CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if @@ -3918,7 +3968,7 @@ void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds) AssertLockHeld(cs_main); // Ignore non-outbound peers, or nodes marked for disconnect already - if (!IsOutboundDisconnectionCandidate(*pnode) || pnode->fDisconnect) return; + if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return; CNodeState *state = State(pnode->GetId()); if (state == nullptr) return; // shouldn't be possible, but just in case // Don't evict our protected peers @@ -4077,8 +4127,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto) { pto->m_addr_known->insert(addr.GetKey()); vAddr.push_back(addr); - // receiver rejects addr messages larger than 1000 - if (vAddr.size() >= 1000) + // receiver rejects addr messages larger than MAX_ADDR_TO_SEND + if (vAddr.size() >= MAX_ADDR_TO_SEND) { connman->PushMessage(pto, msgMaker.Make(NetMsgType::ADDR, vAddr)); vAddr.clear(); @@ -4096,7 +4146,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // Start block sync if (pindexBestHeader == nullptr) pindexBestHeader = ::ChainActive().Tip(); - bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do. + bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do. if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) { // Only actively request headers from a single peer, unless we're close to today. if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) { @@ -4281,7 +4331,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) bool fSendTrickle = pto->HasPermission(PF_NOBAN); if (pto->m_tx_relay->nNextInvSend < current_time) { fSendTrickle = true; - if (pto->fInbound) { + if (pto->IsInboundConn()) { pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{connman->PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL)}; } else { // Use half the delay for outbound peers, as there is less privacy concern for them. @@ -4356,6 +4406,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) std::set<uint256>::iterator it = vInvTx.back(); vInvTx.pop_back(); uint256 hash = *it; + CInv inv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash); // Remove it from the to-be-sent set pto->m_tx_relay->setInventoryTxToSend.erase(it); // Check if not in the filter already @@ -4363,7 +4414,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) continue; } // Not in the mempool anymore? don't bother sending it. - auto txinfo = m_mempool.info(hash, state.m_wtxid_relay); + auto txinfo = m_mempool.info(ToGenTxid(inv)); if (!txinfo.tx) { continue; } @@ -4376,7 +4427,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto) if (pto->m_tx_relay->pfilter && !pto->m_tx_relay->pfilter->IsRelevantAndUpdate(*txinfo.tx)) continue; // Send State(pto->GetId())->m_recently_announced_invs.insert(hash); - vInv.push_back(CInv(state.m_wtxid_relay ? MSG_WTX : MSG_TX, hash)); + vInv.push_back(inv); nRelayedTransactions++; { // Expire old relay messages @@ -4529,15 +4580,15 @@ bool PeerLogicValidation::SendMessages(CNode* pto) auto& tx_process_time = state.m_tx_download.m_tx_process_time; while (!tx_process_time.empty() && tx_process_time.begin()->first <= current_time && state.m_tx_download.m_tx_in_flight.size() < MAX_PEER_TX_IN_FLIGHT) { - const uint256 txid = tx_process_time.begin()->second; + const GenTxid gtxid = tx_process_time.begin()->second; // Erase this entry from tx_process_time (it may be added back for // processing at a later time, see below) tx_process_time.erase(tx_process_time.begin()); - CInv inv(state.m_wtxid_relay ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), txid); + CInv inv(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash()); if (!AlreadyHave(inv, m_mempool)) { // If this transaction was last requested more than 1 minute ago, // then request. - const auto last_request_time = GetTxRequestTime(inv.hash); + const auto last_request_time = GetTxRequestTime(gtxid); if (last_request_time <= current_time - GETDATA_TX_INTERVAL) { LogPrint(BCLog::NET, "Requesting %s peer=%d\n", inv.ToString(), pto->GetId()); vGetData.push_back(inv); @@ -4545,8 +4596,8 @@ bool PeerLogicValidation::SendMessages(CNode* pto) connman->PushMessage(pto, msgMaker.Make(NetMsgType::GETDATA, vGetData)); vGetData.clear(); } - UpdateTxRequestTime(inv.hash, current_time); - state.m_tx_download.m_tx_in_flight.emplace(inv.hash, current_time); + UpdateTxRequestTime(gtxid, current_time); + state.m_tx_download.m_tx_in_flight.emplace(gtxid.GetHash(), current_time); } else { // This transaction is in flight from someone else; queue // up processing to happen after the download times out @@ -4560,13 +4611,13 @@ bool PeerLogicValidation::SendMessages(CNode* pto) // would open us up to an attacker using inbound // wtxid-relay to prevent us from requesting transactions // from outbound txid-relay peers). - const auto next_process_time = CalculateTxGetDataTime(txid, current_time, !state.fPreferredDownload, false); - tx_process_time.emplace(next_process_time, txid); + const auto next_process_time = CalculateTxGetDataTime(gtxid, current_time, !state.fPreferredDownload, false); + tx_process_time.emplace(next_process_time, gtxid); } } else { // We have already seen this transaction, no need to download. - state.m_tx_download.m_tx_announced.erase(inv.hash); - state.m_tx_download.m_tx_in_flight.erase(inv.hash); + state.m_tx_download.m_tx_announced.erase(gtxid.GetHash()); + state.m_tx_download.m_tx_in_flight.erase(gtxid.GetHash()); } } diff --git a/src/netaddress.cpp b/src/netaddress.cpp index 0874b8dcea..d29aed6c8b 100644 --- a/src/netaddress.cpp +++ b/src/netaddress.cpp @@ -553,7 +553,7 @@ std::vector<unsigned char> CNetAddr::GetGroup(const std::vector<bool> &asmap) co uint64_t CNetAddr::GetHash() const { - uint256 hash = Hash(&ip[0], &ip[16]); + uint256 hash = Hash(ip); uint64_t nRet; memcpy(&nRet, &hash, sizeof(nRet)); return nRet; diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp index c56abaf6c9..0e9820da1e 100644 --- a/src/policy/policy.cpp +++ b/src/policy/policy.cpp @@ -152,6 +152,8 @@ bool IsStandardTx(const CTransaction& tx, bool permit_bare_multisig, const CFeeR * script can be anything; an attacker could use a very * expensive-to-check-upon-redemption script like: * DUP CHECKSIG DROP ... repeated 100 times... OP_1 + * + * Note that only the non-witness portion of the transaction is checked here. */ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) { @@ -164,7 +166,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs) std::vector<std::vector<unsigned char> > vSolutions; TxoutType whichType = Solver(prev.scriptPubKey, vSolutions); - if (whichType == TxoutType::NONSTANDARD) { + if (whichType == TxoutType::NONSTANDARD || whichType == TxoutType::WITNESS_UNKNOWN) { + // WITNESS_UNKNOWN failures are typically also caught with a policy + // flag in the script interpreter, but it can be helpful to catch + // this type of NONSTANDARD transaction earlier in transaction + // validation. return false; } else if (whichType == TxoutType::SCRIPTHASH) { std::vector<std::vector<unsigned char> > stack; diff --git a/src/primitives/transaction.h b/src/primitives/transaction.h index 4514db578a..544bab6d9b 100644 --- a/src/primitives/transaction.h +++ b/src/primitives/transaction.h @@ -12,6 +12,8 @@ #include <serialize.h> #include <uint256.h> +#include <tuple> + static const int SERIALIZE_TRANSACTION_NO_WITNESS = 0x40000000; /** An outpoint - a combination of a transaction hash and an index n into its vout */ @@ -388,4 +390,17 @@ typedef std::shared_ptr<const CTransaction> CTransactionRef; static inline CTransactionRef MakeTransactionRef() { return std::make_shared<const CTransaction>(); } template <typename Tx> static inline CTransactionRef MakeTransactionRef(Tx&& txIn) { return std::make_shared<const CTransaction>(std::forward<Tx>(txIn)); } +/** A generic txid reference (txid or wtxid). */ +class GenTxid +{ + const bool m_is_wtxid; + const uint256 m_hash; +public: + GenTxid(bool is_wtxid, const uint256& hash) : m_is_wtxid(is_wtxid), m_hash(hash) {} + bool IsWtxid() const { return m_is_wtxid; } + const uint256& GetHash() const { return m_hash; } + friend bool operator==(const GenTxid& a, const GenTxid& b) { return a.m_is_wtxid == b.m_is_wtxid && a.m_hash == b.m_hash; } + friend bool operator<(const GenTxid& a, const GenTxid& b) { return std::tie(a.m_is_wtxid, a.m_hash) < std::tie(b.m_is_wtxid, b.m_hash); } +}; + #endif // BITCOIN_PRIMITIVES_TRANSACTION_H diff --git a/src/protocol.cpp b/src/protocol.cpp index ee77ca3b94..c989aa3902 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -217,6 +217,7 @@ static std::string serviceFlagToStr(size_t bit) case NODE_GETUTXO: return "GETUTXO"; case NODE_BLOOM: return "BLOOM"; case NODE_WITNESS: return "WITNESS"; + case NODE_COMPACT_FILTERS: return "COMPACT_FILTERS"; case NODE_NETWORK_LIMITED: return "NETWORK_LIMITED"; // Not using default, so we get warned when a case is missing } @@ -241,3 +242,9 @@ std::vector<std::string> serviceFlagsToStr(uint64_t flags) return str_flags; } + +GenTxid ToGenTxid(const CInv& inv) +{ + assert(inv.IsGenTxMsg()); + return {inv.IsMsgWtx(), inv.hash}; +} diff --git a/src/protocol.h b/src/protocol.h index 26e64b0009..0bef12ee62 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -11,6 +11,7 @@ #define BITCOIN_PROTOCOL_H #include <netaddress.h> +#include <primitives/transaction.h> #include <serialize.h> #include <uint256.h> #include <version.h> @@ -63,100 +64,84 @@ namespace NetMsgType { /** * The version message provides information about the transmitting node to the * receiving node at the beginning of a connection. - * @see https://bitcoin.org/en/developer-reference#version */ extern const char* VERSION; /** * The verack message acknowledges a previously-received version message, * informing the connecting node that it can begin to send other messages. - * @see https://bitcoin.org/en/developer-reference#verack */ extern const char* VERACK; /** * The addr (IP address) message relays connection information for peers on the * network. - * @see https://bitcoin.org/en/developer-reference#addr */ extern const char* ADDR; /** * The inv message (inventory message) transmits one or more inventories of * objects known to the transmitting peer. - * @see https://bitcoin.org/en/developer-reference#inv */ extern const char* INV; /** * The getdata message requests one or more data objects from another node. - * @see https://bitcoin.org/en/developer-reference#getdata */ extern const char* GETDATA; /** * The merkleblock message is a reply to a getdata message which requested a * block using the inventory type MSG_MERKLEBLOCK. * @since protocol version 70001 as described by BIP37. - * @see https://bitcoin.org/en/developer-reference#merkleblock */ extern const char* MERKLEBLOCK; /** * The getblocks message requests an inv message that provides block header * hashes starting from a particular point in the block chain. - * @see https://bitcoin.org/en/developer-reference#getblocks */ extern const char* GETBLOCKS; /** * The getheaders message requests a headers message that provides block * headers starting from a particular point in the block chain. * @since protocol version 31800. - * @see https://bitcoin.org/en/developer-reference#getheaders */ extern const char* GETHEADERS; /** * The tx message transmits a single transaction. - * @see https://bitcoin.org/en/developer-reference#tx */ extern const char* TX; /** * The headers message sends one or more block headers to a node which * previously requested certain headers with a getheaders message. * @since protocol version 31800. - * @see https://bitcoin.org/en/developer-reference#headers */ extern const char* HEADERS; /** * The block message transmits a single serialized block. - * @see https://bitcoin.org/en/developer-reference#block */ extern const char* BLOCK; /** * The getaddr message requests an addr message from the receiving node, * preferably one with lots of IP addresses of other receiving nodes. - * @see https://bitcoin.org/en/developer-reference#getaddr */ extern const char* GETADDR; /** * The mempool message requests the TXIDs of transactions that the receiving * node has verified as valid but which have not yet appeared in a block. * @since protocol version 60002. - * @see https://bitcoin.org/en/developer-reference#mempool */ extern const char* MEMPOOL; /** * The ping message is sent periodically to help confirm that the receiving * peer is still connected. - * @see https://bitcoin.org/en/developer-reference#ping */ extern const char* PING; /** * The pong message replies to a ping message, proving to the pinging node that * the ponging node is still alive. * @since protocol version 60001 as described by BIP31. - * @see https://bitcoin.org/en/developer-reference#pong */ extern const char* PONG; /** * The notfound message is a reply to a getdata message which requested an * object the receiving node does not have available for relay. * @since protocol version 70001. - * @see https://bitcoin.org/en/developer-reference#notfound */ extern const char* NOTFOUND; /** @@ -165,7 +150,6 @@ extern const char* NOTFOUND; * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. - * @see https://bitcoin.org/en/developer-reference#filterload */ extern const char* FILTERLOAD; /** @@ -174,7 +158,6 @@ extern const char* FILTERLOAD; * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. - * @see https://bitcoin.org/en/developer-reference#filteradd */ extern const char* FILTERADD; /** @@ -183,14 +166,12 @@ extern const char* FILTERADD; * @since protocol version 70001 as described by BIP37. * Only available with service bit NODE_BLOOM since protocol version * 70011 as described by BIP111. - * @see https://bitcoin.org/en/developer-reference#filterclear */ extern const char* FILTERCLEAR; /** * Indicates that a node prefers to receive new block announcements via a * "headers" message rather than an "inv". * @since protocol version 70012 as described by BIP130. - * @see https://bitcoin.org/en/developer-reference#sendheaders */ extern const char* SENDHEADERS; /** @@ -291,6 +272,9 @@ enum ServiceFlags : uint64_t { // NODE_WITNESS indicates that a node can be asked for blocks and transactions including // witness data. NODE_WITNESS = (1 << 3), + // NODE_COMPACT_FILTERS means the node will service basic block filter requests. + // See BIP157 and BIP158 for details on how this is implemented. + NODE_COMPACT_FILTERS = (1 << 6), // NODE_NETWORK_LIMITED means the same as NODE_NETWORK with the limitation of only // serving the last 288 (2 day) blocks // See BIP159 for details on how this is implemented. @@ -442,4 +426,7 @@ public: uint256 hash; }; +/** Convert a TX/WITNESS_TX/WTX CInv to a GenTxid. */ +GenTxid ToGenTxid(const CInv& inv); + #endif // BITCOIN_PROTOCOL_H diff --git a/src/pubkey.h b/src/pubkey.h index 4c28af4a4d..fcbc7e8416 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -157,13 +157,13 @@ public: //! Get the KeyID of this public key (hash of its serialization) CKeyID GetID() const { - return CKeyID(Hash160(vch, vch + size())); + return CKeyID(Hash160(MakeSpan(vch).first(size()))); } //! Get the 256-bit hash of this public key. uint256 GetHash() const { - return Hash(vch, vch + size()); + return Hash(MakeSpan(vch).first(size())); } /* diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index 523f5c429b..f53fcc41f3 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -29,6 +29,7 @@ #include <interfaces/handler.h> #include <interfaces/node.h> +#include <node/context.h> #include <noui.h> #include <uint256.h> #include <util/system.h> @@ -80,6 +81,7 @@ static void RegisterMetaTypes() qRegisterMetaType<std::function<void()>>("std::function<void()>"); qRegisterMetaType<QMessageBox::Icon>("QMessageBox::Icon"); + qRegisterMetaType<interfaces::BlockAndHeaderTipInfo>("interfaces::BlockAndHeaderTipInfo"); } static QString GetLangTerritory() @@ -163,8 +165,9 @@ void BitcoinCore::initialize() { qDebug() << __func__ << ": Running initialization in thread"; util::ThreadRename("qt-init"); - bool rv = m_node.appInitMain(); - Q_EMIT initializeResult(rv); + interfaces::BlockAndHeaderTipInfo tip_info; + bool rv = m_node.appInitMain(&tip_info); + Q_EMIT initializeResult(rv, tip_info); } catch (const std::exception& e) { handleRunawayException(&e); } catch (...) { @@ -341,7 +344,7 @@ void BitcoinApplication::requestShutdown() Q_EMIT requestedShutdown(); } -void BitcoinApplication::initializeResult(bool success) +void BitcoinApplication::initializeResult(bool success, interfaces::BlockAndHeaderTipInfo tip_info) { qDebug() << __func__ << ": Initialization result: " << success; // Set exit result. @@ -351,7 +354,7 @@ void BitcoinApplication::initializeResult(bool success) // Log this only after AppInitMain finishes, as then logging setup is guaranteed complete qInfo() << "Platform customization:" << platformStyle->getName(); clientModel = new ClientModel(m_node, optionsModel); - window->setClientModel(clientModel); + window->setClientModel(clientModel, &tip_info); #ifdef ENABLE_WALLET if (WalletModel::isWalletEnabled()) { m_wallet_controller = new WalletController(*clientModel, platformStyle, this); @@ -430,7 +433,8 @@ int GuiMain(int argc, char* argv[]) SetupEnvironment(); util::ThreadSetInternalName("main"); - std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(); + NodeContext node_context; + std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context); // Subscribe to global signals from core std::unique_ptr<interfaces::Handler> handler_message_box = node->handleMessageBox(noui_ThreadSafeMessageBox); diff --git a/src/qt/bitcoin.h b/src/qt/bitcoin.h index 077a37fde5..20c6dfc047 100644 --- a/src/qt/bitcoin.h +++ b/src/qt/bitcoin.h @@ -12,6 +12,8 @@ #include <QApplication> #include <memory> +#include <interfaces/node.h> + class BitcoinGUI; class ClientModel; class NetworkStyle; @@ -21,10 +23,6 @@ class PlatformStyle; class WalletController; class WalletModel; -namespace interfaces { -class Handler; -class Node; -} // namespace interfaces /** Class encapsulating Bitcoin Core startup and shutdown. * Allows running startup and shutdown in a different thread from the UI thread. @@ -40,7 +38,7 @@ public Q_SLOTS: void shutdown(); Q_SIGNALS: - void initializeResult(bool success); + void initializeResult(bool success, interfaces::BlockAndHeaderTipInfo tip_info); void shutdownResult(); void runawayException(const QString &message); @@ -91,7 +89,7 @@ public: void setupPlatformStyle(); public Q_SLOTS: - void initializeResult(bool success); + void initializeResult(bool success, interfaces::BlockAndHeaderTipInfo tip_info); void shutdownResult(); /// Handle runaway exceptions. Shows a message box with the problem and quits the program. void handleRunawayException(const QString &message); diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index ebcc04a5eb..56adbf249a 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -574,7 +574,7 @@ void BitcoinGUI::createToolBars() } } -void BitcoinGUI::setClientModel(ClientModel *_clientModel) +void BitcoinGUI::setClientModel(ClientModel *_clientModel, interfaces::BlockAndHeaderTipInfo* tip_info) { this->clientModel = _clientModel; if(_clientModel) @@ -588,8 +588,8 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel) connect(_clientModel, &ClientModel::numConnectionsChanged, this, &BitcoinGUI::setNumConnections); connect(_clientModel, &ClientModel::networkActiveChanged, this, &BitcoinGUI::setNetworkActive); - modalOverlay->setKnownBestHeight(_clientModel->getHeaderTipHeight(), QDateTime::fromTime_t(_clientModel->getHeaderTipTime())); - setNumBlocks(m_node.getNumBlocks(), QDateTime::fromTime_t(m_node.getLastBlockTime()), m_node.getVerificationProgress(), false, SynchronizationState::INIT_DOWNLOAD); + modalOverlay->setKnownBestHeight(tip_info->header_height, QDateTime::fromTime_t(tip_info->header_time)); + setNumBlocks(tip_info->block_height, QDateTime::fromTime_t(tip_info->block_time), tip_info->verification_progress, false, SynchronizationState::INIT_DOWNLOAD); connect(_clientModel, &ClientModel::numBlocksChanged, this, &BitcoinGUI::setNumBlocks); // Receive and report messages from client model @@ -600,7 +600,7 @@ void BitcoinGUI::setClientModel(ClientModel *_clientModel) // Show progress dialog connect(_clientModel, &ClientModel::showProgress, this, &BitcoinGUI::showProgress); - rpcConsole->setClientModel(_clientModel); + rpcConsole->setClientModel(_clientModel, tip_info->block_height, tip_info->block_time, tip_info->verification_progress); updateProxyIcon(); diff --git a/src/qt/bitcoingui.h b/src/qt/bitcoingui.h index 697e83e772..4c55f28693 100644 --- a/src/qt/bitcoingui.h +++ b/src/qt/bitcoingui.h @@ -43,6 +43,7 @@ enum class SynchronizationState; namespace interfaces { class Handler; class Node; +struct BlockAndHeaderTipInfo; } QT_BEGIN_NAMESPACE @@ -75,7 +76,7 @@ public: /** Set the client model. The client model represents the part of the core that communicates with the P2P network, and is wallet-agnostic. */ - void setClientModel(ClientModel *clientModel); + void setClientModel(ClientModel *clientModel = nullptr, interfaces::BlockAndHeaderTipInfo* tip_info = nullptr); #ifdef ENABLE_WALLET void setWalletController(WalletController* wallet_controller); #endif diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui index fea759dee0..0016fb9739 100644 --- a/src/qt/forms/optionsdialog.ui +++ b/src/qt/forms/optionsdialog.ui @@ -459,10 +459,10 @@ <item> <widget class="QCheckBox" name="connectSocksTor"> <property name="toolTip"> - <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor hidden services.</string> + <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor onion services.</string> </property> <property name="text"> - <string>Use separate SOCKS&5 proxy to reach peers via Tor hidden services:</string> + <string>Use separate SOCKS&5 proxy to reach peers via Tor onion services:</string> </property> </widget> </item> diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 821a337a62..a14fae6460 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -556,7 +556,7 @@ bool RPCConsole::eventFilter(QObject* obj, QEvent *event) return QWidget::eventFilter(obj, event); } -void RPCConsole::setClientModel(ClientModel *model) +void RPCConsole::setClientModel(ClientModel *model, int bestblock_height, int64_t bestblock_date, double verification_progress) { clientModel = model; @@ -576,13 +576,13 @@ void RPCConsole::setClientModel(ClientModel *model) setNumConnections(model->getNumConnections()); connect(model, &ClientModel::numConnectionsChanged, this, &RPCConsole::setNumConnections); - interfaces::Node& node = clientModel->node(); - setNumBlocks(node.getNumBlocks(), QDateTime::fromTime_t(node.getLastBlockTime()), node.getVerificationProgress(), false); + setNumBlocks(bestblock_height, QDateTime::fromTime_t(bestblock_date), verification_progress, false); connect(model, &ClientModel::numBlocksChanged, this, &RPCConsole::setNumBlocks); updateNetworkState(); connect(model, &ClientModel::networkActiveChanged, this, &RPCConsole::setNetworkActive); + interfaces::Node& node = clientModel->node(); updateTrafficStats(node.getTotalBytesRecv(), node.getTotalBytesSent()); connect(model, &ClientModel::bytesChanged, this, &RPCConsole::updateTrafficStats); diff --git a/src/qt/rpcconsole.h b/src/qt/rpcconsole.h index de8e37cca2..280c5bd71a 100644 --- a/src/qt/rpcconsole.h +++ b/src/qt/rpcconsole.h @@ -46,7 +46,7 @@ public: return RPCParseCommandLine(&node, strResult, strCommand, true, pstrFilteredOut, wallet_model); } - void setClientModel(ClientModel *model); + void setClientModel(ClientModel *model = nullptr, int bestblock_height = 0, int64_t bestblock_date = 0, double verification_progress = 0.0); void addWallet(WalletModel * const walletModel); void removeWallet(WalletModel* const walletModel); diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp index 9347ff9e42..035c8196bc 100644 --- a/src/qt/test/addressbooktests.cpp +++ b/src/qt/test/addressbooktests.cpp @@ -18,6 +18,7 @@ #include <key.h> #include <key_io.h> #include <wallet/wallet.h> +#include <walletinitinterface.h> #include <QApplication> #include <QTimer> @@ -59,6 +60,7 @@ void EditAddressAndSubmit( void TestAddAddressesToSendBook(interfaces::Node& node) { TestChain100Setup test; + node.setContext(&test.m_node); std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase()); wallet->SetupLegacyScriptPubKeyMan(); bool firstRun; diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp index b880a99baf..0b5c341548 100644 --- a/src/qt/test/apptests.cpp +++ b/src/qt/test/apptests.cpp @@ -67,6 +67,7 @@ void AppTests::appTests() return GetDataDir() / "blocks"; }()); + qRegisterMetaType<interfaces::BlockAndHeaderTipInfo>("interfaces::BlockAndHeaderTipInfo"); m_app.parameterSetup(); m_app.createOptionsModel(true /* reset settings */); QScopedPointer<const NetworkStyle> style(NetworkStyle::instantiate(Params().NetworkIDString())); diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp index 12efca2503..031913bd02 100644 --- a/src/qt/test/test_main.cpp +++ b/src/qt/test/test_main.cpp @@ -52,7 +52,8 @@ int main(int argc, char* argv[]) BasicTestingSetup dummy{CBaseChainParams::REGTEST}; } - std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(); + NodeContext node_context; + std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context); bool fInvalid = false; diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp index 6648029bae..475fd589af 100644 --- a/src/qt/test/wallettests.cpp +++ b/src/qt/test/wallettests.cpp @@ -138,8 +138,7 @@ void TestGUI(interfaces::Node& node) for (int i = 0; i < 5; ++i) { test.CreateAndProcessBlock({}, GetScriptForRawPubKey(test.coinbaseKey.GetPubKey())); } - node.context()->connman = std::move(test.m_node.connman); - node.context()->mempool = std::move(test.m_node.mempool); + node.setContext(&test.m_node); std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase()); bool firstRun; wallet->LoadWallet(firstRun); diff --git a/src/random.cpp b/src/random.cpp index 9c9a35709a..af9504e0ce 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -315,12 +315,16 @@ void GetOSRand(unsigned char *ent32) if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { RandFailure(); } + // Silence a compiler warning about unused function. + (void)GetDevURandom; #elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX) /* getentropy() is available on macOS 10.12 and later. */ if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) { RandFailure(); } + // Silence a compiler warning about unused function. + (void)GetDevURandom; #elif defined(HAVE_SYSCTL_ARND) /* FreeBSD, NetBSD and similar. It is possible for the call to return less * bytes than requested, so need to read in a loop. @@ -334,6 +338,8 @@ void GetOSRand(unsigned char *ent32) } have += len; } while (have < NUM_OS_RANDOM_BYTES); + // Silence a compiler warning about unused function. + (void)GetDevURandom; #else /* Fall back to /dev/urandom if there is no specific method implemented to * get system entropy for this OS. diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 2afc9a3d4a..868ff88d08 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -525,9 +525,9 @@ static UniValue getrawmempool(const JSONRPCRequest& request) {RPCResult::Type::STR_HEX, "", "The transaction id"}, }}, RPCResult{"for verbose = true", - RPCResult::Type::OBJ, "", "", + RPCResult::Type::OBJ_DYN, "", "", { - {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()}, + {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()}, }}, }, RPCExamples{ @@ -556,7 +556,7 @@ static UniValue getmempoolancestors(const JSONRPCRequest& request) RPCResult::Type::ARR, "", "", {{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool ancestor transaction"}}}, RPCResult{"for verbose = true", - RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()}, + RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()}, }, RPCExamples{ HelpExampleCli("getmempoolancestors", "\"mytxid\"") @@ -616,9 +616,9 @@ static UniValue getmempooldescendants(const JSONRPCRequest& request) RPCResult::Type::ARR, "", "", {{RPCResult::Type::STR_HEX, "", "The transaction id of an in-mempool descendant transaction"}}}, RPCResult{"for verbose = true", - RPCResult::Type::OBJ, "", "", + RPCResult::Type::OBJ_DYN, "", "", { - {RPCResult::Type::OBJ_DYN, "transactionid", "", MempoolEntryDescription()}, + {RPCResult::Type::OBJ, "transactionid", "", MempoolEntryDescription()}, }}, }, RPCExamples{ @@ -674,7 +674,7 @@ static UniValue getmempoolentry(const JSONRPCRequest& request) {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The transaction id (must be in mempool)"}, }, RPCResult{ - RPCResult::Type::OBJ_DYN, "", "", MempoolEntryDescription()}, + RPCResult::Type::OBJ, "", "", MempoolEntryDescription()}, RPCExamples{ HelpExampleCli("getmempoolentry", "\"mytxid\"") + HelpExampleRpc("getmempoolentry", "\"mytxid\"") @@ -2407,7 +2407,7 @@ static const CRPCCommand commands[] = { "hidden", "dumptxoutset", &dumptxoutset, {"path"} }, }; // clang-format on - - for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) - t.appendCommand(commands[vcidx].name, &commands[vcidx]); + for (const auto& c : commands) { + t.appendCommand(c.name, &c); + } } diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp index 66ace7263a..d61e02aee2 100644 --- a/src/rpc/client.cpp +++ b/src/rpc/client.cpp @@ -151,6 +151,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "getmempoolancestors", 1, "verbose" }, { "getmempooldescendants", 1, "verbose" }, { "bumpfee", 1, "options" }, + { "psbtbumpfee", 1, "options" }, { "logging", 0, "include" }, { "logging", 1, "exclude" }, { "disconnectnode", 1, "nodeid" }, @@ -173,6 +174,7 @@ static const CRPCConvertParam vRPCConvertParams[] = { "createwallet", 4, "avoid_reuse"}, { "createwallet", 5, "descriptors"}, { "getnodeaddresses", 0, "count"}, + { "addpeeraddress", 1, "port"}, { "stop", 0, "wait" }, }; // clang-format on diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index fee6a893eb..76aa9dbfc1 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -236,6 +236,17 @@ static UniValue generatetodescriptor(const JSONRPCRequest& request) return generateBlocks(chainman, mempool, coinbase_script, num_blocks, max_tries); } +static UniValue generate(const JSONRPCRequest& request) +{ + const std::string help_str{"generate ( nblocks maxtries ) has been replaced by the -generate cli option. Refer to -help for more information."}; + + if (request.fHelp) { + throw std::runtime_error(help_str); + } else { + throw JSONRPCError(RPC_METHOD_NOT_FOUND, help_str); + } +} + static UniValue generatetoaddress(const JSONRPCRequest& request) { RPCHelpMan{"generatetoaddress", @@ -1019,7 +1030,7 @@ static UniValue estimatesmartfee(const JSONRPCRequest& request) RPCResult::Type::OBJ, "", "", { {RPCResult::Type::NUM, "feerate", /* optional */ true, "estimate fee rate in " + CURRENCY_UNIT + "/kB (only present if no errors were encountered)"}, - {RPCResult::Type::ARR, "errors", "Errors encountered during processing", + {RPCResult::Type::ARR, "errors", /* optional */ true, "Errors encountered during processing (if there are any)", { {RPCResult::Type::STR, "", "error"}, }}, @@ -1098,7 +1109,7 @@ static UniValue estimaterawfee(const JSONRPCRequest& request) { {RPCResult::Type::ELISION, "", ""}, }}, - {RPCResult::Type::ARR, "errors", /* optional */ true, "Errors encountered during processing", + {RPCResult::Type::ARR, "errors", /* optional */ true, "Errors encountered during processing (if there are any)", { {RPCResult::Type::STR, "error", ""}, }}, @@ -1198,9 +1209,10 @@ static const CRPCCommand commands[] = { "util", "estimatesmartfee", &estimatesmartfee, {"conf_target", "estimate_mode"} }, { "hidden", "estimaterawfee", &estimaterawfee, {"conf_target", "threshold"} }, + { "hidden", "generate", &generate, {} }, }; // clang-format on - - for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) - t.appendCommand(commands[vcidx].name, &commands[vcidx]); + for (const auto& c : commands) { + t.appendCommand(c.name, &c); + } } diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index 53d38f4e11..ff31bee1e3 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -27,9 +27,9 @@ #include <univalue.h> -static UniValue validateaddress(const JSONRPCRequest& request) +static RPCHelpMan validateaddress() { - RPCHelpMan{"validateaddress", + return RPCHelpMan{"validateaddress", "\nReturn information about the given bitcoin address.\n", { {"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address to validate"}, @@ -50,8 +50,8 @@ static UniValue validateaddress(const JSONRPCRequest& request) HelpExampleCli("validateaddress", "\"" + EXAMPLE_ADDRESS[0] + "\"") + HelpExampleRpc("validateaddress", "\"" + EXAMPLE_ADDRESS[0] + "\"") }, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ CTxDestination dest = DecodeDestination(request.params[0].get_str()); bool isValid = IsValidDestination(dest); @@ -69,11 +69,13 @@ static UniValue validateaddress(const JSONRPCRequest& request) ret.pushKVs(detail); } return ret; +}, + }; } -static UniValue createmultisig(const JSONRPCRequest& request) +static RPCHelpMan createmultisig() { - RPCHelpMan{"createmultisig", + return RPCHelpMan{"createmultisig", "\nCreates a multi-signature address with n signature of m keys required.\n" "It returns a json object with the address and redeemScript.\n", { @@ -98,8 +100,8 @@ static UniValue createmultisig(const JSONRPCRequest& request) "\nAs a JSON-RPC call\n" + HelpExampleRpc("createmultisig", "2, \"[\\\"03789ed0bb717d88f7d321a368d905e7430207ebbd82bd342cf11ae157a7ace5fd\\\",\\\"03dbc6764b8884a92e871274b87583e6d5c2a58819473e17e107ef3f6aa5a61626\\\"]\"") }, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ int required = request.params[0].get_int(); // Get the public keys @@ -135,11 +137,13 @@ static UniValue createmultisig(const JSONRPCRequest& request) result.pushKV("descriptor", descriptor->ToString()); return result; +}, + }; } -UniValue getdescriptorinfo(const JSONRPCRequest& request) +static RPCHelpMan getdescriptorinfo() { - RPCHelpMan{"getdescriptorinfo", + return RPCHelpMan{"getdescriptorinfo", {"\nAnalyses a descriptor.\n"}, { {"descriptor", RPCArg::Type::STR, RPCArg::Optional::NO, "The descriptor."}, @@ -157,8 +161,9 @@ UniValue getdescriptorinfo(const JSONRPCRequest& request) RPCExamples{ "Analyse a descriptor\n" + HelpExampleCli("getdescriptorinfo", "\"wpkh([d34db33f/84h/0h/0h]0279be667ef9dcbbac55a06295Ce870b07029Bfcdb2dce28d959f2815b16f81798)\"") - }}.Check(request); - + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ RPCTypeCheck(request.params, {UniValue::VSTR}); FlatSigningProvider provider; @@ -175,11 +180,13 @@ UniValue getdescriptorinfo(const JSONRPCRequest& request) result.pushKV("issolvable", desc->IsSolvable()); result.pushKV("hasprivatekeys", provider.keys.size() > 0); return result; +}, + }; } -UniValue deriveaddresses(const JSONRPCRequest& request) +static RPCHelpMan deriveaddresses() { - RPCHelpMan{"deriveaddresses", + return RPCHelpMan{"deriveaddresses", {"\nDerives one or more addresses corresponding to an output descriptor.\n" "Examples of output descriptors are:\n" " pkh(<pubkey>) P2PKH outputs for the given pubkey\n" @@ -202,8 +209,9 @@ UniValue deriveaddresses(const JSONRPCRequest& request) RPCExamples{ "First three native segwit receive addresses\n" + HelpExampleCli("deriveaddresses", "\"wpkh([d34db33f/84h/0h/0h]xpub6DJ2dNUysrn5Vt36jH2KLBT2i1auw1tTSSomg8PhqNiUtx8QX2SvC9nrHu81fT41fvDUnhMjEzQgXnQjKEu3oaqMSzhSrHMxyyoEAmUHQbY/0/*)#cjjspncu\" \"[0,2]\"") - }}.Check(request); - + }, + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ RPCTypeCheck(request.params, {UniValue::VSTR, UniValueType()}); // Range argument is checked later const std::string desc_str = request.params[0].get_str(); @@ -254,11 +262,13 @@ UniValue deriveaddresses(const JSONRPCRequest& request) } return addresses; +}, + }; } -static UniValue verifymessage(const JSONRPCRequest& request) +static RPCHelpMan verifymessage() { - RPCHelpMan{"verifymessage", + return RPCHelpMan{"verifymessage", "\nVerify a signed message\n", { {"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The bitcoin address to use for the signature."}, @@ -278,8 +288,8 @@ static UniValue verifymessage(const JSONRPCRequest& request) "\nAs a JSON-RPC call\n" + HelpExampleRpc("verifymessage", "\"1D1ZrZNe3JUo7ZycKEYQQiQAWd9y54F4XX\", \"signature\", \"my message\"") }, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ LOCK(cs_main); std::string strAddress = request.params[0].get_str(); @@ -301,11 +311,13 @@ static UniValue verifymessage(const JSONRPCRequest& request) } return false; +}, + }; } -static UniValue signmessagewithprivkey(const JSONRPCRequest& request) +static RPCHelpMan signmessagewithprivkey() { - RPCHelpMan{"signmessagewithprivkey", + return RPCHelpMan{"signmessagewithprivkey", "\nSign a message with the private key of an address\n", { {"privkey", RPCArg::Type::STR, RPCArg::Optional::NO, "The private key to sign the message with."}, @@ -322,8 +334,8 @@ static UniValue signmessagewithprivkey(const JSONRPCRequest& request) "\nAs a JSON-RPC call\n" + HelpExampleRpc("signmessagewithprivkey", "\"privkey\", \"my message\"") }, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ std::string strPrivkey = request.params[0].get_str(); std::string strMessage = request.params[1].get_str(); @@ -339,11 +351,13 @@ static UniValue signmessagewithprivkey(const JSONRPCRequest& request) } return signature; +}, + }; } -static UniValue setmocktime(const JSONRPCRequest& request) +static RPCHelpMan setmocktime() { - RPCHelpMan{"setmocktime", + return RPCHelpMan{"setmocktime", "\nSet the local time to given timestamp (-regtest only)\n", { {"timestamp", RPCArg::Type::NUM, RPCArg::Optional::NO, UNIX_EPOCH_TIME + "\n" @@ -351,8 +365,8 @@ static UniValue setmocktime(const JSONRPCRequest& request) }, RPCResult{RPCResult::Type::NONE, "", ""}, RPCExamples{""}, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ if (!Params().IsMockableChain()) { throw std::runtime_error("setmocktime is for regression testing (-regtest mode) only"); } @@ -374,19 +388,21 @@ static UniValue setmocktime(const JSONRPCRequest& request) } return NullUniValue; +}, + }; } -static UniValue mockscheduler(const JSONRPCRequest& request) +static RPCHelpMan mockscheduler() { - RPCHelpMan{"mockscheduler", + return RPCHelpMan{"mockscheduler", "\nBump the scheduler into the future (-regtest only)\n", { {"delta_time", RPCArg::Type::NUM, RPCArg::Optional::NO, "Number of seconds to forward the scheduler into the future." }, }, RPCResult{RPCResult::Type::NONE, "", ""}, RPCExamples{""}, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ if (!Params().IsMockableChain()) { throw std::runtime_error("mockscheduler is for regression testing (-regtest mode) only"); } @@ -405,6 +421,8 @@ static UniValue mockscheduler(const JSONRPCRequest& request) node.scheduler->MockForward(std::chrono::seconds(delta_seconds)); return NullUniValue; +}, + }; } static UniValue RPCLockedMemoryInfo() @@ -439,12 +457,12 @@ static std::string RPCMallocInfo() } #endif -static UniValue getmemoryinfo(const JSONRPCRequest& request) +static RPCHelpMan getmemoryinfo() { /* Please, avoid using the word "pool" here in the RPC interface or help, * as users will undoubtedly confuse it with the other "memory pool" */ - RPCHelpMan{"getmemoryinfo", + return RPCHelpMan{"getmemoryinfo", "Returns an object containing information about memory usage.\n", { {"mode", RPCArg::Type::STR, /* default */ "\"stats\"", "determines what kind of information is returned.\n" @@ -474,8 +492,8 @@ static UniValue getmemoryinfo(const JSONRPCRequest& request) HelpExampleCli("getmemoryinfo", "") + HelpExampleRpc("getmemoryinfo", "") }, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ std::string mode = request.params[0].isNull() ? "stats" : request.params[0].get_str(); if (mode == "stats") { UniValue obj(UniValue::VOBJ); @@ -490,6 +508,8 @@ static UniValue getmemoryinfo(const JSONRPCRequest& request) } else { throw JSONRPCError(RPC_INVALID_PARAMETER, "unknown mode " + mode); } +}, + }; } static void EnableOrDisableLogCategories(UniValue cats, bool enable) { @@ -510,9 +530,9 @@ static void EnableOrDisableLogCategories(UniValue cats, bool enable) { } } -UniValue logging(const JSONRPCRequest& request) +static RPCHelpMan logging() { - RPCHelpMan{"logging", + return RPCHelpMan{"logging", "Gets and sets the logging configuration.\n" "When called without an argument, returns the list of categories with status that are currently being debug logged or not.\n" "When called with arguments, adds or removes categories from debug logging and return the lists above.\n" @@ -543,8 +563,8 @@ UniValue logging(const JSONRPCRequest& request) HelpExampleCli("logging", "\"[\\\"all\\\"]\" \"[\\\"http\\\"]\"") + HelpExampleRpc("logging", "[\"all\"], [\"libevent\"]") }, - }.Check(request); - + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ uint32_t original_log_categories = LogInstance().GetCategoryMask(); if (request.params[0].isArray()) { EnableOrDisableLogCategories(request.params[0], true); @@ -575,28 +595,47 @@ UniValue logging(const JSONRPCRequest& request) } return result; +}, + }; } -static UniValue echo(const JSONRPCRequest& request) +static RPCHelpMan echo(const std::string& name) { - if (request.fHelp) - throw std::runtime_error( - RPCHelpMan{"echo|echojson ...", + return RPCHelpMan{name, "\nSimply echo back the input arguments. This command is for testing.\n" - "\nIt will return an internal bug report when exactly 100 arguments are passed.\n" + "\nIt will return an internal bug report when arg9='trigger_internal_bug' is passed.\n" "\nThe difference between echo and echojson is that echojson has argument conversion enabled in the client-side table in " "bitcoin-cli and the GUI. There is no server-side difference.", - {}, + { + {"arg0", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg1", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg2", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg3", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg4", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg5", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg6", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg7", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg8", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + {"arg9", RPCArg::Type::STR, RPCArg::Optional::OMITTED_NAMED_ARG, ""}, + }, RPCResult{RPCResult::Type::NONE, "", "Returns whatever was passed in"}, RPCExamples{""}, - }.ToString() - ); + [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue +{ + if (request.fHelp) throw std::runtime_error(self.ToString()); - CHECK_NONFATAL(request.params.size() != 100); + if (request.params[9].isStr()) { + CHECK_NONFATAL(request.params[9].get_str() != "trigger_internal_bug"); + } return request.params; +}, + }; } +static RPCHelpMan echo() { return echo("echo"); } +static RPCHelpMan echojson() { return echo("echojson"); } + void RegisterMiscRPCCommands(CRPCTable &t) { // clang-format off @@ -616,10 +655,10 @@ static const CRPCCommand commands[] = { "hidden", "setmocktime", &setmocktime, {"timestamp"}}, { "hidden", "mockscheduler", &mockscheduler, {"delta_time"}}, { "hidden", "echo", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, - { "hidden", "echojson", &echo, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, + { "hidden", "echojson", &echojson, {"arg0","arg1","arg2","arg3","arg4","arg5","arg6","arg7","arg8","arg9"}}, }; // clang-format on - - for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) - t.appendCommand(commands[vcidx].name, &commands[vcidx]); + for (const auto& c : commands) { + t.appendCommand(c.name, &c); + } } diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 9981ea35df..9bd7c15992 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -264,7 +264,7 @@ static UniValue addnode(const JSONRPCRequest& request) if (strCommand == "onetry") { CAddress addr; - node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true); + node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), ConnectionType::MANUAL); return NullUniValue; } @@ -276,7 +276,7 @@ static UniValue addnode(const JSONRPCRequest& request) else if(strCommand == "remove") { if(!node.connman->RemoveAddedNode(strNode)) - throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node has not been added."); + throw JSONRPCError(RPC_CLIENT_NODE_NOT_ADDED, "Error: Node could not be removed. It has not been added previously."); } return NullUniValue; @@ -727,7 +727,7 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request) RPCHelpMan{"getnodeaddresses", "\nReturn known addresses which can potentially be used to find new nodes in the network\n", { - {"count", RPCArg::Type::NUM, /* default */ "1", "How many addresses to return. Limited to the smaller of " + ToString(ADDRMAN_GETADDR_MAX) + " or " + ToString(ADDRMAN_GETADDR_MAX_PCT) + "% of all known addresses."}, + {"count", RPCArg::Type::NUM, /* default */ "1", "The maximum number of addresses to return. Specify 0 to return all known addresses."}, }, RPCResult{ RPCResult::Type::ARR, "", "", @@ -754,18 +754,16 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request) int count = 1; if (!request.params[0].isNull()) { count = request.params[0].get_int(); - if (count <= 0) { + if (count < 0) { throw JSONRPCError(RPC_INVALID_PARAMETER, "Address count out of range"); } } // returns a shuffled list of CAddress - std::vector<CAddress> vAddr = node.connman->GetAddresses(); + std::vector<CAddress> vAddr = node.connman->GetAddresses(count, /* max_pct */ 0); UniValue ret(UniValue::VARR); - int address_return_count = std::min<int>(count, vAddr.size()); - for (int i = 0; i < address_return_count; ++i) { + for (const CAddress& addr : vAddr) { UniValue obj(UniValue::VOBJ); - const CAddress& addr = vAddr[i]; obj.pushKV("time", (int)addr.nTime); obj.pushKV("services", (uint64_t)addr.nServices); obj.pushKV("address", addr.ToStringIP()); @@ -775,6 +773,54 @@ static UniValue getnodeaddresses(const JSONRPCRequest& request) return ret; } +static UniValue addpeeraddress(const JSONRPCRequest& request) +{ + RPCHelpMan{"addpeeraddress", + "\nAdd the address of a potential peer to the address manager. This RPC is for testing only.\n", + { + {"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The IP address of the peer"}, + {"port", RPCArg::Type::NUM, RPCArg::Optional::NO, "The port of the peer"}, + }, + RPCResult{ + RPCResult::Type::OBJ, "", "", + { + {RPCResult::Type::BOOL, "success", "whether the peer address was successfully added to the address manager"}, + }, + }, + RPCExamples{ + HelpExampleCli("addpeeraddress", "\"1.2.3.4\" 8333") + + HelpExampleRpc("addpeeraddress", "\"1.2.3.4\", 8333") + }, + }.Check(request); + + NodeContext& node = EnsureNodeContext(request.context); + if (!node.connman) { + throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); + } + + UniValue obj(UniValue::VOBJ); + + std::string addr_string = request.params[0].get_str(); + uint16_t port = request.params[1].get_int(); + + CNetAddr net_addr; + if (!LookupHost(addr_string, net_addr, false)) { + obj.pushKV("success", false); + return obj; + } + CAddress address = CAddress({net_addr, port}, ServiceFlags(NODE_NETWORK|NODE_WITNESS)); + address.nTime = GetAdjustedTime(); + // The source address is set equal to the address. This is equivalent to the peer + // announcing itself. + if (!node.connman->AddNewAddresses({address}, address)) { + obj.pushKV("success", false); + return obj; + } + + obj.pushKV("success", true); + return obj; +} + void RegisterNetRPCCommands(CRPCTable &t) { // clang-format off @@ -794,9 +840,10 @@ static const CRPCCommand commands[] = { "network", "clearbanned", &clearbanned, {} }, { "network", "setnetworkactive", &setnetworkactive, {"state"} }, { "network", "getnodeaddresses", &getnodeaddresses, {"count"} }, + { "hidden", "addpeeraddress", &addpeeraddress, {"address", "port"} }, }; // clang-format on - - for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) - t.appendCommand(commands[vcidx].name, &commands[vcidx]); + for (const auto& c : commands) { + t.appendCommand(c.name, &c); + } } diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 70caf6009a..abc8168c55 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -601,7 +601,7 @@ static UniValue decodescript(const JSONRPCRequest& request) UniValue sr(UniValue::VOBJ); CScript segwitScr; if (which_type == TxoutType::PUBKEY) { - segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0].begin(), solutions_data[0].end()))); + segwitScr = GetScriptForDestination(WitnessV0KeyHash(Hash160(solutions_data[0]))); } else if (which_type == TxoutType::PUBKEYHASH) { segwitScr = GetScriptForDestination(WitnessV0KeyHash(uint160{solutions_data[0]})); } else { @@ -744,7 +744,7 @@ static UniValue signrawtransactionwithkey(const JSONRPCRequest& request) { {RPCResult::Type::STR_HEX, "hex", "The hex-encoded raw transaction with signature(s)"}, {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"}, - {RPCResult::Type::ARR, "errors", "Script verification errors (if there are any)", + {RPCResult::Type::ARR, "errors", /* optional */ true, "Script verification errors (if there are any)", { {RPCResult::Type::OBJ, "", "", { @@ -1348,7 +1348,7 @@ UniValue finalizepsbt(const JSONRPCRequest& request) if (complete && extract) { ssTx << mtx; - result_str = HexStr(ssTx.str()); + result_str = HexStr(ssTx); result.pushKV("hex", result_str); } else { ssTx << psbtx; @@ -1722,7 +1722,7 @@ UniValue analyzepsbt(const JSONRPCRequest& request) {RPCResult::Type::STR_AMOUNT, "estimated_feerate", /* optional */ true, "Estimated feerate of the final signed transaction in " + CURRENCY_UNIT + "/kB. Shown only if all UTXO slots in the PSBT have been filled"}, {RPCResult::Type::STR_AMOUNT, "fee", /* optional */ true, "The transaction fee paid. Shown only if all UTXO slots in the PSBT have been filled"}, {RPCResult::Type::STR, "next", "Role of the next person that this psbt needs to go to"}, - {RPCResult::Type::STR, "error", "Error message if there is one"}, + {RPCResult::Type::STR, "error", /* optional */ true, "Error message (if there is one)"}, } }, RPCExamples { @@ -1821,7 +1821,7 @@ static const CRPCCommand commands[] = { "blockchain", "verifytxoutproof", &verifytxoutproof, {"proof"} }, }; // clang-format on - - for (unsigned int vcidx = 0; vcidx < ARRAYLEN(commands); vcidx++) - t.appendCommand(commands[vcidx].name, &commands[vcidx]); + for (const auto& c : commands) { + t.appendCommand(c.name, &c); + } } diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp index 7fef45f50e..d9ad70fa37 100644 --- a/src/rpc/request.cpp +++ b/src/rpc/request.cpp @@ -78,7 +78,7 @@ bool GenerateAuthCookie(std::string *cookie_out) const size_t COOKIE_SIZE = 32; unsigned char rand_pwd[COOKIE_SIZE]; GetRandBytes(rand_pwd, COOKIE_SIZE); - std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd, rand_pwd+COOKIE_SIZE); + std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd); /** the umask determines what permissions are used to create this file - * these are set to 077 in init.cpp unless overridden with -sysperms. diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp index e5f6b1b9f1..9c8e7fe04a 100644 --- a/src/rpc/server.cpp +++ b/src/rpc/server.cpp @@ -256,13 +256,8 @@ static const CRPCCommand vRPCCommands[] = CRPCTable::CRPCTable() { - unsigned int vcidx; - for (vcidx = 0; vcidx < (sizeof(vRPCCommands) / sizeof(vRPCCommands[0])); vcidx++) - { - const CRPCCommand *pcmd; - - pcmd = &vRPCCommands[vcidx]; - mapCommands[pcmd->name].push_back(pcmd); + for (const auto& c : vRPCCommands) { + appendCommand(c.name, &c); } } diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index 9f4c7bee9c..40dfdb587e 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -260,7 +260,7 @@ public: UniValue obj(UniValue::VOBJ); obj.pushKV("iswitness", true); obj.pushKV("witness_version", (int)id.version); - obj.pushKV("witness_program", HexStr(id.program, id.program + id.length)); + obj.pushKV("witness_program", HexStr(Span<const unsigned char>(id.program, id.length))); return obj; } }; @@ -504,7 +504,7 @@ std::string RPCHelpMan::ToString() const ret += m_name; bool was_optional{false}; for (const auto& arg : m_args) { - if (arg.m_hidden) continue; + if (arg.m_hidden) break; // Any arg that follows is also hidden const bool optional = arg.IsOptional(); ret += " "; if (optional) { @@ -526,7 +526,7 @@ std::string RPCHelpMan::ToString() const Sections sections; for (size_t i{0}; i < m_args.size(); ++i) { const auto& arg = m_args.at(i); - if (arg.m_hidden) continue; + if (arg.m_hidden) break; // Any arg that follows is also hidden if (i == 0) ret += "\nArguments:\n"; diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 9978d084d5..6c0a98cca2 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -190,7 +190,7 @@ class OriginPubkeyProvider final : public PubkeyProvider std::string OriginString() const { - return HexStr(std::begin(m_origin.fingerprint), std::end(m_origin.fingerprint)) + FormatHDKeypath(m_origin.path); + return HexStr(m_origin.fingerprint) + FormatHDKeypath(m_origin.path); } public: diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 9415bba585..39feb4ccc9 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -986,9 +986,9 @@ bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& else if (opcode == OP_SHA256) CSHA256().Write(vch.data(), vch.size()).Finalize(vchHash.data()); else if (opcode == OP_HASH160) - CHash160().Write(vch.data(), vch.size()).Finalize(vchHash.data()); + CHash160().Write(vch).Finalize(vchHash); else if (opcode == OP_HASH256) - CHash256().Write(vch.data(), vch.size()).Finalize(vchHash.data()); + CHash256().Write(vch).Finalize(vchHash); popstack(stack); stack.push_back(vchHash); } diff --git a/src/script/sign.cpp b/src/script/sign.cpp index f425215549..9b3f94f14d 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -186,6 +186,8 @@ static CScript PushAll(const std::vector<valtype>& values) result << OP_0; } else if (v.size() == 1 && v[0] >= 1 && v[0] <= 16) { result << CScript::EncodeOP_N(v[0]); + } else if (v.size() == 1 && v[0] == 0x81) { + result << OP_1NEGATE; } else { result << v; } diff --git a/src/script/standard.cpp b/src/script/standard.cpp index 1c4990791c..3a4882f280 100644 --- a/src/script/standard.cpp +++ b/src/script/standard.cpp @@ -16,10 +16,10 @@ typedef std::vector<unsigned char> valtype; bool fAcceptDatacarrier = DEFAULT_ACCEPT_DATACARRIER; unsigned nMaxDatacarrierBytes = MAX_OP_RETURN_RELAY; -CScriptID::CScriptID(const CScript& in) : BaseHash(Hash160(in.begin(), in.end())) {} +CScriptID::CScriptID(const CScript& in) : BaseHash(Hash160(in)) {} CScriptID::CScriptID(const ScriptHash& in) : BaseHash(static_cast<uint160>(in)) {} -ScriptHash::ScriptHash(const CScript& in) : BaseHash(Hash160(in.begin(), in.end())) {} +ScriptHash::ScriptHash(const CScript& in) : BaseHash(Hash160(in)) {} ScriptHash::ScriptHash(const CScriptID& in) : BaseHash(static_cast<uint160>(in)) {} PKHash::PKHash(const CPubKey& pubkey) : BaseHash(pubkey.GetID()) {} @@ -318,7 +318,7 @@ CScript GetScriptForWitness(const CScript& redeemscript) std::vector<std::vector<unsigned char> > vSolutions; TxoutType typ = Solver(redeemscript, vSolutions); if (typ == TxoutType::PUBKEY) { - return GetScriptForDestination(WitnessV0KeyHash(Hash160(vSolutions[0].begin(), vSolutions[0].end()))); + return GetScriptForDestination(WitnessV0KeyHash(Hash160(vSolutions[0]))); } else if (typ == TxoutType::PUBKEYHASH) { return GetScriptForDestination(WitnessV0KeyHash(uint160{vSolutions[0]})); } diff --git a/src/script/standard.h b/src/script/standard.h index fd29353886..992e37675f 100644 --- a/src/script/standard.h +++ b/src/script/standard.h @@ -79,6 +79,9 @@ public: { return m_hash.size(); } + + unsigned char* data() { return m_hash.data(); } + const unsigned char* data() const { return m_hash.data(); } }; /** A reference to a CScript: the Hash160 of its serialization (see script.h) */ diff --git a/src/span.h b/src/span.h index 79f13c9203..4afb383a59 100644 --- a/src/span.h +++ b/src/span.h @@ -207,4 +207,16 @@ T& SpanPopBack(Span<T>& span) return back; } +// Helper functions to safely cast to unsigned char pointers. +inline unsigned char* UCharCast(char* c) { return (unsigned char*)c; } +inline unsigned char* UCharCast(unsigned char* c) { return c; } +inline const unsigned char* UCharCast(const char* c) { return (unsigned char*)c; } +inline const unsigned char* UCharCast(const unsigned char* c) { return c; } + +// Helper function to safely convert a Span to a Span<[const] unsigned char>. +template <typename T> constexpr auto UCharSpanCast(Span<T> s) -> Span<typename std::remove_pointer<decltype(UCharCast(s.data()))>::type> { return {UCharCast(s.data()), s.size()}; } + +/** Like MakeSpan, but for (const) unsigned char member types only. Only works for (un)signed char containers. */ +template <typename V> constexpr auto MakeUCharSpan(V&& v) -> decltype(UCharSpanCast(MakeSpan(std::forward<V>(v)))) { return UCharSpanCast(MakeSpan(std::forward<V>(v))); } + #endif diff --git a/src/sync.cpp b/src/sync.cpp index 10f0483189..4be13a3c48 100644 --- a/src/sync.cpp +++ b/src/sync.cpp @@ -149,12 +149,17 @@ static void push_lock(void* c, const CLockLocation& locklocation) const LockPair p1 = std::make_pair(i.first, c); if (lockdata.lockorders.count(p1)) continue; - lockdata.lockorders.emplace(p1, lock_stack); const LockPair p2 = std::make_pair(c, i.first); + if (lockdata.lockorders.count(p2)) { + auto lock_stack_copy = lock_stack; + lock_stack.pop_back(); + potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy); + // potential_deadlock_detected() does not return. + } + + lockdata.lockorders.emplace(p1, lock_stack); lockdata.invlockorders.insert(p2); - if (lockdata.lockorders.count(p2)) - potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]); } } @@ -259,6 +264,17 @@ void DeleteLock(void* cs) } } +bool LockStackEmpty() +{ + LockData& lockdata = GetLockData(); + std::lock_guard<std::mutex> lock(lockdata.dd_mutex); + const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id()); + if (it == lockdata.m_lock_stacks.end()) { + return true; + } + return it->second.empty(); +} + bool g_debug_lockorder_abort = true; #endif /* DEBUG_LOCKORDER */ diff --git a/src/sync.h b/src/sync.h index 77327d8bfe..05ff2ee8a9 100644 --- a/src/sync.h +++ b/src/sync.h @@ -56,6 +56,7 @@ template <typename MutexType> void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs); void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs); void DeleteLock(void* cs); +bool LockStackEmpty(); /** * Call abort() if a potential lock order deadlock bug is detected, instead of @@ -64,13 +65,14 @@ void DeleteLock(void* cs); */ extern bool g_debug_lockorder_abort; #else -void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {} -void static inline LeaveCritical() {} -void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {} +inline void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {} +inline void LeaveCritical() {} +inline void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {} template <typename MutexType> -void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {} -void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {} -void static inline DeleteLock(void* cs) {} +inline void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {} +inline void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {} +inline void DeleteLock(void* cs) {} +inline bool LockStackEmpty() { return true; } #endif #define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs) #define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs) diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp index bc6b38c682..25fdd64568 100644 --- a/src/test/addrman_tests.cpp +++ b/src/test/addrman_tests.cpp @@ -392,7 +392,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr) // Test: Sanity check, GetAddr should never return anything if addrman // is empty. BOOST_CHECK_EQUAL(addrman.size(), 0U); - std::vector<CAddress> vAddr1 = addrman.GetAddr(); + std::vector<CAddress> vAddr1 = addrman.GetAddr(/* max_addresses */ 0, /* max_pct */0); BOOST_CHECK_EQUAL(vAddr1.size(), 0U); CAddress addr1 = CAddress(ResolveService("250.250.2.1", 8333), NODE_NONE); @@ -415,13 +415,15 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr) BOOST_CHECK(addrman.Add(addr4, source2)); BOOST_CHECK(addrman.Add(addr5, source1)); - // GetAddr returns 23% of addresses, 23% of 5 is 1 rounded down. - BOOST_CHECK_EQUAL(addrman.GetAddr().size(), 1U); + BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 0, /* max_pct */ 0).size(), 5U); + // Net processing asks for 23% of addresses. 23% of 5 is 1 rounded down. + BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23).size(), 1U); // Test: Ensure GetAddr works with new and tried addresses. addrman.Good(CAddress(addr1, NODE_NONE)); addrman.Good(CAddress(addr2, NODE_NONE)); - BOOST_CHECK_EQUAL(addrman.GetAddr().size(), 1U); + BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 0, /* max_pct */ 0).size(), 5U); + BOOST_CHECK_EQUAL(addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23).size(), 1U); // Test: Ensure GetAddr still returns 23% when addrman has many addrs. for (unsigned int i = 1; i < (8 * 256); i++) { @@ -436,7 +438,7 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr) if (i % 8 == 0) addrman.Good(addr); } - std::vector<CAddress> vAddr = addrman.GetAddr(); + std::vector<CAddress> vAddr = addrman.GetAddr(/* max_addresses */ 2500, /* max_pct */ 23); size_t percent23 = (addrman.size() * 23) / 100; BOOST_CHECK_EQUAL(vAddr.size(), percent23); diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp index f64251fe32..b3cc8cefd9 100644 --- a/src/test/crypto_tests.cpp +++ b/src/test/crypto_tests.cpp @@ -183,7 +183,7 @@ static void TestHKDF_SHA256_32(const std::string &ikm_hex, const std::string &sa CHKDF_HMAC_SHA256_L32 hkdf32(initial_key_material.data(), initial_key_material.size(), salt_stringified); unsigned char out[32]; hkdf32.Expand32(info_stringified, out); - BOOST_CHECK(HexStr(out, out + 32) == okm_check_hex); + BOOST_CHECK(HexStr(out) == okm_check_hex); } static std::string LongTestString() @@ -743,7 +743,7 @@ BOOST_AUTO_TEST_CASE(sha256d64) in[j] = InsecureRandBits(8); } for (int j = 0; j < i; ++j) { - CHash256().Write(in + 64 * j, 64).Finalize(out1 + 32 * j); + CHash256().Write({in + 64 * j, 64}).Finalize({out1 + 32 * j, 32}); } SHA256D64(out2, in, i); BOOST_CHECK(memcmp(out1, out2, 32 * i) == 0); diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index b1a635d9da..0115803e58 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -84,7 +84,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) // Mock an outbound peer CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", /*fInboundIn=*/ false); + CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::OUTBOUND); dummyNode1.SetSendVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); @@ -136,7 +136,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidation &peerLogic, CConnmanTest* connman) { CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE); - vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", /*fInboundIn=*/ false)); + vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", ConnectionType::OUTBOUND)); CNode &node = *vNodes.back(); node.SetSendVersion(PROTOCOL_VERSION); @@ -227,7 +227,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) banman->ClearBanned(); CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", true); + CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::INBOUND); dummyNode1.SetSendVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); dummyNode1.nVersion = 1; @@ -244,7 +244,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) BOOST_CHECK(!banman->IsDiscouraged(ip(0xa0b0c001|0x0000ff00))); // Different IP, not discouraged CAddress addr2(ip(0xa0b0c002), NODE_NONE); - CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", true); + CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", ConnectionType::INBOUND); dummyNode2.SetSendVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode2); dummyNode2.nVersion = 1; @@ -286,7 +286,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) SetMockTime(nStartTime); // Overrides future calls to GetTime() CAddress addr(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", true); + CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", ConnectionType::INBOUND); dummyNode.SetSendVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode); dummyNode.nVersion = 1; diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp index 595cdf9abb..3edcf96495 100644 --- a/src/test/fuzz/crypto.cpp +++ b/src/test/fuzz/crypto.cpp @@ -44,8 +44,8 @@ void test_one_input(const std::vector<uint8_t>& buffer) } } - (void)hash160.Write(data.data(), data.size()); - (void)hash256.Write(data.data(), data.size()); + (void)hash160.Write(data); + (void)hash256.Write(data); (void)hmac_sha256.Write(data.data(), data.size()); (void)hmac_sha512.Write(data.data(), data.size()); (void)ripemd160.Write(data.data(), data.size()); @@ -54,9 +54,8 @@ void test_one_input(const std::vector<uint8_t>& buffer) (void)sha512.Write(data.data(), data.size()); (void)sip_hasher.Write(data.data(), data.size()); - (void)Hash(data.begin(), data.end()); + (void)Hash(data); (void)Hash160(data); - (void)Hash160(data.begin(), data.end()); (void)sha512.Size(); break; } @@ -73,12 +72,12 @@ void test_one_input(const std::vector<uint8_t>& buffer) switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 8)) { case 0: { data.resize(CHash160::OUTPUT_SIZE); - hash160.Finalize(data.data()); + hash160.Finalize(data); break; } case 1: { data.resize(CHash256::OUTPUT_SIZE); - hash256.Finalize(data.data()); + hash256.Finalize(data); break; } case 2: { diff --git a/src/test/fuzz/key.cpp b/src/test/fuzz/key.cpp index c746374c61..955b954700 100644 --- a/src/test/fuzz/key.cpp +++ b/src/test/fuzz/key.cpp @@ -85,7 +85,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) assert(negated_key == key); } - const uint256 random_uint256 = Hash(buffer.begin(), buffer.end()); + const uint256 random_uint256 = Hash(buffer); { CKey child_key; diff --git a/src/test/fuzz/net_permissions.cpp b/src/test/fuzz/net_permissions.cpp index ae531f4462..8a674ac1e9 100644 --- a/src/test/fuzz/net_permissions.cpp +++ b/src/test/fuzz/net_permissions.cpp @@ -24,6 +24,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) NetPermissionFlags::PF_FORCERELAY, NetPermissionFlags::PF_NOBAN, NetPermissionFlags::PF_MEMPOOL, + NetPermissionFlags::PF_ADDR, NetPermissionFlags::PF_ISIMPLICIT, NetPermissionFlags::PF_ALL, }) : diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index 9e40d5cd55..677b87a47a 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -80,7 +80,7 @@ void test_one_input(const std::vector<uint8_t>& buffer) return; } CDataStream random_bytes_data_stream{fuzzed_data_provider.ConsumeRemainingBytes<unsigned char>(), SER_NETWORK, PROTOCOL_VERSION}; - CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, false).release(); + CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND).release(); p2p_node.fSuccessfullyConnected = true; p2p_node.nVersion = PROTOCOL_VERSION; p2p_node.SetSendVersion(PROTOCOL_VERSION); diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp index 91ebf9fb1b..ef427442e9 100644 --- a/src/test/fuzz/process_messages.cpp +++ b/src/test/fuzz/process_messages.cpp @@ -44,9 +44,8 @@ void test_one_input(const std::vector<uint8_t>& buffer) const auto num_peers_to_add = fuzzed_data_provider.ConsumeIntegralInRange(1, 3); for (int i = 0; i < num_peers_to_add; ++i) { const ServiceFlags service_flags = ServiceFlags(fuzzed_data_provider.ConsumeIntegral<uint64_t>()); - const bool inbound{fuzzed_data_provider.ConsumeBool()}; - const bool block_relay_only{fuzzed_data_provider.ConsumeBool()}; - peers.push_back(MakeUnique<CNode>(i, service_flags, 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, inbound, block_relay_only).release()); + const ConnectionType conn_type = fuzzed_data_provider.PickValueInArray({ConnectionType::INBOUND, ConnectionType::OUTBOUND, ConnectionType::MANUAL, ConnectionType::FEELER, ConnectionType::BLOCK_RELAY, ConnectionType::ADDR_FETCH}); + peers.push_back(MakeUnique<CNode>(i, service_flags, 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, conn_type).release()); CNode& p2p_node = *peers.back(); p2p_node.fSuccessfullyConnected = true; diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp index fd35537c77..4e4c44266a 100644 --- a/src/test/key_tests.cpp +++ b/src/test/key_tests.cpp @@ -77,7 +77,7 @@ BOOST_AUTO_TEST_CASE(key_test1) for (int n=0; n<16; n++) { std::string strMsg = strprintf("Very secret message %i: 11", n); - uint256 hashMsg = Hash(strMsg.begin(), strMsg.end()); + uint256 hashMsg = Hash(strMsg); // normal signatures @@ -134,7 +134,7 @@ BOOST_AUTO_TEST_CASE(key_test1) std::vector<unsigned char> detsig, detsigc; std::string strMsg = "Very deterministic message"; - uint256 hashMsg = Hash(strMsg.begin(), strMsg.end()); + uint256 hashMsg = Hash(strMsg); BOOST_CHECK(key1.Sign(hashMsg, detsig)); BOOST_CHECK(key1C.Sign(hashMsg, detsigc)); BOOST_CHECK(detsig == detsigc); @@ -158,7 +158,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests) // When entropy is specified, we should see at least one high R signature within 20 signatures CKey key = DecodeSecret(strSecret1); std::string msg = "A message to be signed"; - uint256 msg_hash = Hash(msg.begin(), msg.end()); + uint256 msg_hash = Hash(msg); std::vector<unsigned char> sig; bool found = false; @@ -179,7 +179,7 @@ BOOST_AUTO_TEST_CASE(key_signature_tests) for (int i = 0; i < 256; ++i) { sig.clear(); std::string msg = "A message to be signed" + ToString(i); - msg_hash = Hash(msg.begin(), msg.end()); + msg_hash = Hash(msg); BOOST_CHECK(key.Sign(msg_hash, sig)); found = sig[3] == 0x20; BOOST_CHECK(sig.size() <= 70); @@ -196,7 +196,7 @@ BOOST_AUTO_TEST_CASE(key_key_negation) std::string str = "Bitcoin key verification\n"; GetRandBytes(rnd, sizeof(rnd)); uint256 hash; - CHash256().Write((unsigned char*)str.data(), str.size()).Write(rnd, sizeof(rnd)).Finalize(hash.begin()); + CHash256().Write(MakeUCharSpan(str)).Write(rnd).Finalize(hash); // import the static test key CKey key = DecodeSecret(strSecret1C); diff --git a/src/test/merkle_tests.cpp b/src/test/merkle_tests.cpp index 03dce552fc..9bc7cc5dab 100644 --- a/src/test/merkle_tests.cpp +++ b/src/test/merkle_tests.cpp @@ -13,9 +13,9 @@ static uint256 ComputeMerkleRootFromBranch(const uint256& leaf, const std::vecto uint256 hash = leaf; for (std::vector<uint256>::const_iterator it = vMerkleBranch.begin(); it != vMerkleBranch.end(); ++it) { if (nIndex & 1) { - hash = Hash(it->begin(), it->end(), hash.begin(), hash.end()); + hash = Hash(*it, hash); } else { - hash = Hash(hash.begin(), hash.end(), it->begin(), it->end()); + hash = Hash(hash, *it); } nIndex >>= 1; } @@ -60,7 +60,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot } } mutated |= (inner[level] == h); - CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + CHash256().Write(inner[level]).Write(h).Finalize(h); } // Store the resulting hash at inner position level. inner[level] = h; @@ -86,7 +86,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot if (pbranch && matchh) { pbranch->push_back(h); } - CHash256().Write(h.begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + CHash256().Write(h).Write(h).Finalize(h); // Increment count to the value it would have if two entries at this // level had existed. count += (((uint32_t)1) << level); @@ -101,7 +101,7 @@ static void MerkleComputation(const std::vector<uint256>& leaves, uint256* proot matchh = true; } } - CHash256().Write(inner[level].begin(), 32).Write(h.begin(), 32).Finalize(h.begin()); + CHash256().Write(inner[level]).Write(h).Finalize(h); level++; } } @@ -144,8 +144,7 @@ static uint256 BlockBuildMerkleTree(const CBlock& block, bool* fMutated, std::ve // Two identical hashes at the end of the list at a particular level. mutated = true; } - vMerkleTree.push_back(Hash(vMerkleTree[j+i].begin(), vMerkleTree[j+i].end(), - vMerkleTree[j+i2].begin(), vMerkleTree[j+i2].end())); + vMerkleTree.push_back(Hash(vMerkleTree[j+i], vMerkleTree[j+i2])); } j += nSize; } diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index ab42be21bd..317000c771 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -180,17 +180,12 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) CAddress addr = CAddress(CService(ipv4Addr, 7777), NODE_NETWORK); std::string pszDest; - bool fInboundIn = false; - // Test that fFeeler is false by default. - std::unique_ptr<CNode> pnode1 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, CAddress(), pszDest, fInboundIn); - BOOST_CHECK(pnode1->fInbound == false); - BOOST_CHECK(pnode1->fFeeler == false); + std::unique_ptr<CNode> pnode1 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, CAddress(), pszDest, ConnectionType::OUTBOUND); + BOOST_CHECK(pnode1->IsInboundConn() == false); - fInboundIn = true; - std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, CAddress(), pszDest, fInboundIn); - BOOST_CHECK(pnode2->fInbound == true); - BOOST_CHECK(pnode2->fFeeler == false); + std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, CAddress(), pszDest, ConnectionType::INBOUND); + BOOST_CHECK(pnode2->IsInboundConn() == true); } // prior to PR #14728, this test triggers an undefined behavior @@ -214,7 +209,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) in_addr ipv4AddrPeer; ipv4AddrPeer.s_addr = 0xa0b0c001; CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK); - std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, false); + std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND); pnode->fSuccessfullyConnected.store(true); // the peer claims to be reaching us via IPv6 diff --git a/src/test/netbase_tests.cpp b/src/test/netbase_tests.cpp index 591b4ce49a..49073ea657 100644 --- a/src/test/netbase_tests.cpp +++ b/src/test/netbase_tests.cpp @@ -406,13 +406,14 @@ BOOST_AUTO_TEST_CASE(netpermissions_test) BOOST_CHECK(NetWhitelistPermissions::TryParse("bloom,forcerelay,noban,relay,mempool@1.2.3.4/32", whitelistPermissions, error)); const auto strings = NetPermissions::ToStrings(PF_ALL); - BOOST_CHECK_EQUAL(strings.size(), 6U); + BOOST_CHECK_EQUAL(strings.size(), 7U); BOOST_CHECK(std::find(strings.begin(), strings.end(), "bloomfilter") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "forcerelay") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "relay") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "noban") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "mempool") != strings.end()); BOOST_CHECK(std::find(strings.begin(), strings.end(), "download") != strings.end()); + BOOST_CHECK(std::find(strings.begin(), strings.end(), "addr") != strings.end()); } BOOST_AUTO_TEST_CASE(netbase_dont_resolve_strings_with_embedded_nul_characters) diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp index 77d748241b..87678af4d1 100644 --- a/src/test/script_standard_tests.cpp +++ b/src/test/script_standard_tests.cpp @@ -216,7 +216,7 @@ BOOST_AUTO_TEST_CASE(script_standard_ExtractDestination) s << OP_0 << ToByteVector(pubkey.GetID()); BOOST_CHECK(ExtractDestination(s, address)); WitnessV0KeyHash keyhash; - CHash160().Write(pubkey.begin(), pubkey.size()).Finalize(keyhash.begin()); + CHash160().Write(pubkey).Finalize(keyhash); BOOST_CHECK(boost::get<WitnessV0KeyHash>(&address) && *boost::get<WitnessV0KeyHash>(&address) == keyhash); // TxoutType::WITNESS_V0_SCRIPTHASH diff --git a/src/test/script_tests.cpp b/src/test/script_tests.cpp index cb3ae290d1..0830743d61 100644 --- a/src/test/script_tests.cpp +++ b/src/test/script_tests.cpp @@ -282,7 +282,7 @@ public: CScript scriptPubKey = script; if (wm == WitnessMode::PKH) { uint160 hash; - CHash160().Write(&script[1], script.size() - 1).Finalize(hash.begin()); + CHash160().Write(MakeSpan(script).subspan(1)).Finalize(hash); script = CScript() << OP_DUP << OP_HASH160 << ToByteVector(hash) << OP_EQUALVERIFY << OP_CHECKSIG; scriptPubKey = CScript() << witnessversion << ToByteVector(hash); } else if (wm == WitnessMode::SH) { diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index c2328f931c..f625b67c2a 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -145,7 +145,7 @@ BOOST_AUTO_TEST_CASE(floats) for (int i = 0; i < 1000; i++) { ss << float(i); } - BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c")); + BOOST_CHECK(Hash(ss) == uint256S("8e8b4cf3e4df8b332057e3e23af42ebc663b61e0495d5e7e32d85099d7f3fe0c")); // decode for (int i = 0; i < 1000; i++) { @@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(doubles) for (int i = 0; i < 1000; i++) { ss << double(i); } - BOOST_CHECK(Hash(ss.begin(), ss.end()) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96")); + BOOST_CHECK(Hash(ss) == uint256S("43d0c82591953c4eafe114590d392676a01585d25b25d433557f0d7878b23f96")); // decode for (int i = 0; i < 1000; i++) { diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp index 1a2d775f49..548fd020a6 100644 --- a/src/test/settings_tests.cpp +++ b/src/test/settings_tests.cpp @@ -228,7 +228,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup) if (OnlyHasDefaultSectionSetting(settings, network, name)) desc += " ignored"; desc += "\n"; - out_sha.Write((const unsigned char*)desc.data(), desc.size()); + out_sha.Write(MakeUCharSpan(desc)); if (out_file) { BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size()); } @@ -241,7 +241,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup) unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE]; out_sha.Finalize(out_sha_bytes); - std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes)); + std::string out_sha_hex = HexStr(out_sha_bytes); // If check below fails, should manually dump the results with: // diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp index 3ea8714f3a..19029ebd3c 100644 --- a/src/test/sync_tests.cpp +++ b/src/test/sync_tests.cpp @@ -14,6 +14,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2) { LOCK2(mutex1, mutex2); } + BOOST_CHECK(LockStackEmpty()); bool error_thrown = false; try { LOCK2(mutex2, mutex1); @@ -21,6 +22,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2) BOOST_CHECK_EQUAL(e.what(), "potential deadlock detected: mutex1 -> mutex2 -> mutex1"); error_thrown = true; } + BOOST_CHECK(LockStackEmpty()); #ifdef DEBUG_LOCKORDER BOOST_CHECK(error_thrown); #else @@ -40,9 +42,13 @@ BOOST_AUTO_TEST_CASE(potential_deadlock_detected) RecursiveMutex rmutex1, rmutex2; TestPotentialDeadLockDetected(rmutex1, rmutex2); + // The second test ensures that lock tracking data have not been broken by exception. + TestPotentialDeadLockDetected(rmutex1, rmutex2); Mutex mutex1, mutex2; TestPotentialDeadLockDetected(mutex1, mutex2); + // The second test ensures that lock tracking data have not been broken by exception. + TestPotentialDeadLockDetected(mutex1, mutex2); #ifdef DEBUG_LOCKORDER g_debug_lockorder_abort = prev; diff --git a/src/test/system_tests.cpp b/src/test/system_tests.cpp new file mode 100644 index 0000000000..a55145c738 --- /dev/null +++ b/src/test/system_tests.cpp @@ -0,0 +1,95 @@ +// Copyright (c) 2019 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. +// +#include <test/util/setup_common.h> +#include <util/system.h> +#include <univalue.h> + +#ifdef HAVE_BOOST_PROCESS +#include <boost/process.hpp> +#endif // HAVE_BOOST_PROCESS + +#include <boost/test/unit_test.hpp> + +BOOST_FIXTURE_TEST_SUITE(system_tests, BasicTestingSetup) + +// At least one test is required (in case HAVE_BOOST_PROCESS is not defined). +// Workaround for https://github.com/bitcoin/bitcoin/issues/19128 +BOOST_AUTO_TEST_CASE(dummy) +{ + BOOST_CHECK(true); +} + +#ifdef HAVE_BOOST_PROCESS + +bool checkMessage(const std::runtime_error& ex) +{ + // On Linux & Mac: "No such file or directory" + // On Windows: "The system cannot find the file specified." + const std::string what(ex.what()); + BOOST_CHECK(what.find("file") != std::string::npos); + return true; +} + +bool checkMessageFalse(const std::runtime_error& ex) +{ + BOOST_CHECK_EQUAL(ex.what(), std::string("RunCommandParseJSON error: process(false) returned 1: \n")); + return true; +} + +bool checkMessageStdErr(const std::runtime_error& ex) +{ + const std::string what(ex.what()); + BOOST_CHECK(what.find("RunCommandParseJSON error:") != std::string::npos); + return checkMessage(ex); +} + +BOOST_AUTO_TEST_CASE(run_command) +{ + { + const UniValue result = RunCommandParseJSON(""); + BOOST_CHECK(result.isNull()); + } + { +#ifdef WIN32 + // Windows requires single quotes to prevent escaping double quotes from the JSON... + const UniValue result = RunCommandParseJSON("echo '{\"success\": true}'"); +#else + // ... but Linux and macOS echo a single quote if it's used + const UniValue result = RunCommandParseJSON("echo \"{\"success\": true}\""); +#endif + BOOST_CHECK(result.isObject()); + const UniValue& success = find_value(result, "success"); + BOOST_CHECK(!success.isNull()); + BOOST_CHECK_EQUAL(success.getBool(), true); + } + { + // An invalid command is handled by Boost + BOOST_CHECK_EXCEPTION(RunCommandParseJSON("invalid_command"), boost::process::process_error, checkMessage); // Command failed + } + { + // Return non-zero exit code, no output to stderr + BOOST_CHECK_EXCEPTION(RunCommandParseJSON("false"), std::runtime_error, checkMessageFalse); + } + { + // Return non-zero exit code, with error message for stderr + BOOST_CHECK_EXCEPTION(RunCommandParseJSON("ls nosuchfile"), std::runtime_error, checkMessageStdErr); + } + { + BOOST_REQUIRE_THROW(RunCommandParseJSON("echo \"{\""), std::runtime_error); // Unable to parse JSON + } + // Test std::in, except for Windows +#ifndef WIN32 + { + const UniValue result = RunCommandParseJSON("cat", "{\"success\": true}"); + BOOST_CHECK(result.isObject()); + const UniValue& success = find_value(result, "success"); + BOOST_CHECK(!success.isNull()); + BOOST_CHECK_EQUAL(success.getBool(), true); + } +#endif +} +#endif // HAVE_BOOST_PROCESS + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 4bf6e734ce..c30f44292c 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -361,6 +361,8 @@ static CScript PushAll(const std::vector<valtype>& values) result << OP_0; } else if (v.size() == 1 && v[0] >= 1 && v[0] <= 16) { result << CScript::EncodeOP_N(v[0]); + } else if (v.size() == 1 && v[0] == 0x81) { + result << OP_1NEGATE; } else { result << v; } diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 14f65dcb7c..b2ae1cb845 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -11,6 +11,7 @@ #include <consensus/validation.h> #include <crypto/sha256.h> #include <init.h> +#include <interfaces/chain.h> #include <miner.h> #include <net.h> #include <net_processing.h> @@ -32,6 +33,7 @@ #include <util/vector.h> #include <validation.h> #include <validationinterface.h> +#include <walletinitinterface.h> #include <functional> @@ -104,6 +106,8 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve SetupNetworking(); InitSignatureCache(); InitScriptExecutionCache(); + m_node.chain = interfaces::MakeChain(m_node); + g_wallet_init_interface.Construct(m_node); fCheckBlockIndex = true; static bool noui_connected = false; if (!noui_connected) { diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index a30e366028..bf7c6c3e3e 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -105,47 +105,24 @@ BOOST_AUTO_TEST_CASE(util_ParseHex) BOOST_AUTO_TEST_CASE(util_HexStr) { BOOST_CHECK_EQUAL( - HexStr(ParseHex_expected, ParseHex_expected + sizeof(ParseHex_expected)), + HexStr(ParseHex_expected), "04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f"); BOOST_CHECK_EQUAL( - HexStr(ParseHex_expected + sizeof(ParseHex_expected), - ParseHex_expected + sizeof(ParseHex_expected)), + HexStr(Span<const unsigned char>( + ParseHex_expected + sizeof(ParseHex_expected), + ParseHex_expected + sizeof(ParseHex_expected))), ""); BOOST_CHECK_EQUAL( - HexStr(ParseHex_expected, ParseHex_expected), + HexStr(Span<const unsigned char>(ParseHex_expected, ParseHex_expected)), ""); std::vector<unsigned char> ParseHex_vec(ParseHex_expected, ParseHex_expected + 5); BOOST_CHECK_EQUAL( - HexStr(ParseHex_vec.rbegin(), ParseHex_vec.rend()), - "b0fd8a6704" - ); - - BOOST_CHECK_EQUAL( - HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected), - std::reverse_iterator<const uint8_t *>(ParseHex_expected)), - "" - ); - - BOOST_CHECK_EQUAL( - HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 1), - std::reverse_iterator<const uint8_t *>(ParseHex_expected)), - "04" - ); - - BOOST_CHECK_EQUAL( - HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 5), - std::reverse_iterator<const uint8_t *>(ParseHex_expected)), - "b0fd8a6704" - ); - - BOOST_CHECK_EQUAL( - HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 65), - std::reverse_iterator<const uint8_t *>(ParseHex_expected)), - "5f1df16b2b704c8a578d0bbaf74d385cde12c11ee50455f3c438ef4c3fbcf649b6de611feae06279a60939e028a8d65c10b73071a6f16719274855feb0fd8a6704" + HexStr(ParseHex_vec), + "04678afdb0" ); } @@ -573,57 +550,52 @@ BOOST_AUTO_TEST_CASE(util_ReadConfigStream) BOOST_CHECK(test_args.m_settings.ro_config["sec1"].size() == 3); BOOST_CHECK(test_args.m_settings.ro_config["sec2"].size() == 2); - BOOST_CHECK(test_args.m_settings.ro_config[""].count("a") - && test_args.m_settings.ro_config[""].count("b") - && test_args.m_settings.ro_config[""].count("ccc") - && test_args.m_settings.ro_config[""].count("d") - && test_args.m_settings.ro_config[""].count("fff") - && test_args.m_settings.ro_config[""].count("ggg") - && test_args.m_settings.ro_config[""].count("h") - && test_args.m_settings.ro_config[""].count("i") - ); - BOOST_CHECK(test_args.m_settings.ro_config["sec1"].count("ccc") - && test_args.m_settings.ro_config["sec1"].count("h") - && test_args.m_settings.ro_config["sec2"].count("ccc") - && test_args.m_settings.ro_config["sec2"].count("iii") - ); - - BOOST_CHECK(test_args.IsArgSet("-a") - && test_args.IsArgSet("-b") - && test_args.IsArgSet("-ccc") - && test_args.IsArgSet("-d") - && test_args.IsArgSet("-fff") - && test_args.IsArgSet("-ggg") - && test_args.IsArgSet("-h") - && test_args.IsArgSet("-i") - && !test_args.IsArgSet("-zzz") - && !test_args.IsArgSet("-iii") - ); - - BOOST_CHECK(test_args.GetArg("-a", "xxx") == "" - && test_args.GetArg("-b", "xxx") == "1" - && test_args.GetArg("-ccc", "xxx") == "argument" - && test_args.GetArg("-d", "xxx") == "e" - && test_args.GetArg("-fff", "xxx") == "0" - && test_args.GetArg("-ggg", "xxx") == "1" - && test_args.GetArg("-h", "xxx") == "0" - && test_args.GetArg("-i", "xxx") == "1" - && test_args.GetArg("-zzz", "xxx") == "xxx" - && test_args.GetArg("-iii", "xxx") == "xxx" - ); + BOOST_CHECK(test_args.m_settings.ro_config[""].count("a")); + BOOST_CHECK(test_args.m_settings.ro_config[""].count("b")); + BOOST_CHECK(test_args.m_settings.ro_config[""].count("ccc")); + BOOST_CHECK(test_args.m_settings.ro_config[""].count("d")); + BOOST_CHECK(test_args.m_settings.ro_config[""].count("fff")); + BOOST_CHECK(test_args.m_settings.ro_config[""].count("ggg")); + BOOST_CHECK(test_args.m_settings.ro_config[""].count("h")); + BOOST_CHECK(test_args.m_settings.ro_config[""].count("i")); + BOOST_CHECK(test_args.m_settings.ro_config["sec1"].count("ccc")); + BOOST_CHECK(test_args.m_settings.ro_config["sec1"].count("h")); + BOOST_CHECK(test_args.m_settings.ro_config["sec2"].count("ccc")); + BOOST_CHECK(test_args.m_settings.ro_config["sec2"].count("iii")); + + BOOST_CHECK(test_args.IsArgSet("-a")); + BOOST_CHECK(test_args.IsArgSet("-b")); + BOOST_CHECK(test_args.IsArgSet("-ccc")); + BOOST_CHECK(test_args.IsArgSet("-d")); + BOOST_CHECK(test_args.IsArgSet("-fff")); + BOOST_CHECK(test_args.IsArgSet("-ggg")); + BOOST_CHECK(test_args.IsArgSet("-h")); + BOOST_CHECK(test_args.IsArgSet("-i")); + BOOST_CHECK(!test_args.IsArgSet("-zzz")); + BOOST_CHECK(!test_args.IsArgSet("-iii")); + + BOOST_CHECK_EQUAL(test_args.GetArg("-a", "xxx"), ""); + BOOST_CHECK_EQUAL(test_args.GetArg("-b", "xxx"), "1"); + BOOST_CHECK_EQUAL(test_args.GetArg("-ccc", "xxx"), "argument"); + BOOST_CHECK_EQUAL(test_args.GetArg("-d", "xxx"), "e"); + BOOST_CHECK_EQUAL(test_args.GetArg("-fff", "xxx"), "0"); + BOOST_CHECK_EQUAL(test_args.GetArg("-ggg", "xxx"), "1"); + BOOST_CHECK_EQUAL(test_args.GetArg("-h", "xxx"), "0"); + BOOST_CHECK_EQUAL(test_args.GetArg("-i", "xxx"), "1"); + BOOST_CHECK_EQUAL(test_args.GetArg("-zzz", "xxx"), "xxx"); + BOOST_CHECK_EQUAL(test_args.GetArg("-iii", "xxx"), "xxx"); for (const bool def : {false, true}) { - BOOST_CHECK(test_args.GetBoolArg("-a", def) - && test_args.GetBoolArg("-b", def) - && !test_args.GetBoolArg("-ccc", def) - && !test_args.GetBoolArg("-d", def) - && !test_args.GetBoolArg("-fff", def) - && test_args.GetBoolArg("-ggg", def) - && !test_args.GetBoolArg("-h", def) - && test_args.GetBoolArg("-i", def) - && test_args.GetBoolArg("-zzz", def) == def - && test_args.GetBoolArg("-iii", def) == def - ); + BOOST_CHECK(test_args.GetBoolArg("-a", def)); + BOOST_CHECK(test_args.GetBoolArg("-b", def)); + BOOST_CHECK(!test_args.GetBoolArg("-ccc", def)); + BOOST_CHECK(!test_args.GetBoolArg("-d", def)); + BOOST_CHECK(!test_args.GetBoolArg("-fff", def)); + BOOST_CHECK(test_args.GetBoolArg("-ggg", def)); + BOOST_CHECK(!test_args.GetBoolArg("-h", def)); + BOOST_CHECK(test_args.GetBoolArg("-i", def)); + BOOST_CHECK(test_args.GetBoolArg("-zzz", def) == def); + BOOST_CHECK(test_args.GetBoolArg("-iii", def) == def); } BOOST_CHECK(test_args.GetArgs("-a").size() == 1 @@ -659,13 +631,12 @@ BOOST_AUTO_TEST_CASE(util_ReadConfigStream) test_args.SelectConfigNetwork("sec1"); // same as original - BOOST_CHECK(test_args.GetArg("-a", "xxx") == "" - && test_args.GetArg("-b", "xxx") == "1" - && test_args.GetArg("-fff", "xxx") == "0" - && test_args.GetArg("-ggg", "xxx") == "1" - && test_args.GetArg("-zzz", "xxx") == "xxx" - && test_args.GetArg("-iii", "xxx") == "xxx" - ); + BOOST_CHECK_EQUAL(test_args.GetArg("-a", "xxx"), ""); + BOOST_CHECK_EQUAL(test_args.GetArg("-b", "xxx"), "1"); + BOOST_CHECK_EQUAL(test_args.GetArg("-fff", "xxx"), "0"); + BOOST_CHECK_EQUAL(test_args.GetArg("-ggg", "xxx"), "1"); + BOOST_CHECK_EQUAL(test_args.GetArg("-zzz", "xxx"), "xxx"); + BOOST_CHECK_EQUAL(test_args.GetArg("-iii", "xxx"), "xxx"); // d is overridden BOOST_CHECK(test_args.GetArg("-d", "xxx") == "eee"); // section-specific setting @@ -680,14 +651,13 @@ BOOST_AUTO_TEST_CASE(util_ReadConfigStream) test_args.SelectConfigNetwork("sec2"); // same as original - BOOST_CHECK(test_args.GetArg("-a", "xxx") == "" - && test_args.GetArg("-b", "xxx") == "1" - && test_args.GetArg("-d", "xxx") == "e" - && test_args.GetArg("-fff", "xxx") == "0" - && test_args.GetArg("-ggg", "xxx") == "1" - && test_args.GetArg("-zzz", "xxx") == "xxx" - && test_args.GetArg("-h", "xxx") == "0" - ); + BOOST_CHECK(test_args.GetArg("-a", "xxx") == ""); + BOOST_CHECK(test_args.GetArg("-b", "xxx") == "1"); + BOOST_CHECK(test_args.GetArg("-d", "xxx") == "e"); + BOOST_CHECK(test_args.GetArg("-fff", "xxx") == "0"); + BOOST_CHECK(test_args.GetArg("-ggg", "xxx") == "1"); + BOOST_CHECK(test_args.GetArg("-zzz", "xxx") == "xxx"); + BOOST_CHECK(test_args.GetArg("-h", "xxx") == "0"); // section-specific setting BOOST_CHECK(test_args.GetArg("-iii", "xxx") == "2"); // section takes priority for multiple values @@ -1009,7 +979,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup) desc += "\n"; - out_sha.Write((const unsigned char*)desc.data(), desc.size()); + out_sha.Write(MakeUCharSpan(desc)); if (out_file) { BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size()); } @@ -1022,7 +992,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup) unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE]; out_sha.Finalize(out_sha_bytes); - std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes)); + std::string out_sha_hex = HexStr(out_sha_bytes); // If check below fails, should manually dump the results with: // @@ -1112,7 +1082,7 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup) } desc += "\n"; - out_sha.Write((const unsigned char*)desc.data(), desc.size()); + out_sha.Write(MakeUCharSpan(desc)); if (out_file) { BOOST_REQUIRE(fwrite(desc.data(), 1, desc.size(), out_file) == desc.size()); } @@ -1125,7 +1095,7 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup) unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE]; out_sha.Finalize(out_sha_bytes); - std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes)); + std::string out_sha_hex = HexStr(out_sha_bytes); // If check below fails, should manually dump the results with: // @@ -2186,8 +2156,8 @@ BOOST_AUTO_TEST_CASE(message_hash) std::string(1, (char)unsigned_tx.length()) + unsigned_tx; - const uint256 signature_hash = Hash(unsigned_tx.begin(), unsigned_tx.end()); - const uint256 message_hash1 = Hash(prefixed_message.begin(), prefixed_message.end()); + const uint256 signature_hash = Hash(unsigned_tx); + const uint256 message_hash1 = Hash(prefixed_message); const uint256 message_hash2 = MessageHash(unsigned_tx); BOOST_CHECK_EQUAL(message_hash1, message_hash2); diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 84118b36ef..5d56d1ff89 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -405,7 +405,7 @@ static bool WriteBinaryFile(const fs::path &filename, const std::string &data) /****** Bitcoin specific TorController implementation ********/ /** Controller that connects to Tor control socket, authenticate, then create - * and maintain an ephemeral hidden service. + * and maintain an ephemeral onion service. */ class TorController { @@ -534,7 +534,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply& // Finally - now create the service if (private_key.empty()) // No private key, generate one private_key = "NEW:RSA1024"; // Explicitly request RSA1024 - see issue #9214 - // Request hidden service, redirect port. + // Request onion service, redirect port. // Note that the 'virtual' port is always the default port to avoid decloaking nodes using other ports. _conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, Params().GetDefaultPort(), GetListenPort()), std::bind(&TorController::add_onion_cb, this, std::placeholders::_1, std::placeholders::_2)); diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 1d9f6a4a46..de1a3ec68f 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -811,15 +811,17 @@ CTransactionRef CTxMemPool::get(const uint256& hash) const return i->GetSharedTx(); } -TxMempoolInfo CTxMemPool::info(const uint256& hash, bool wtxid) const +TxMempoolInfo CTxMemPool::info(const GenTxid& gtxid) const { LOCK(cs); - indexed_transaction_set::const_iterator i = (wtxid ? get_iter_from_wtxid(hash) : mapTx.find(hash)); + indexed_transaction_set::const_iterator i = (gtxid.IsWtxid() ? get_iter_from_wtxid(gtxid.GetHash()) : mapTx.find(gtxid.GetHash())); if (i == mapTx.end()) return TxMempoolInfo(); return GetInfo(i); } +TxMempoolInfo CTxMemPool::info(const uint256& txid) const { return info(GenTxid{false, txid}); } + void CTxMemPool::PrioritiseTransaction(const uint256& hash, const CAmount& nFeeDelta) { { diff --git a/src/txmempool.h b/src/txmempool.h index d4e9845942..4743e1b63a 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -716,14 +716,15 @@ public: return totalTxSize; } - bool exists(const uint256& hash, bool wtxid=false) const + bool exists(const GenTxid& gtxid) const { LOCK(cs); - if (wtxid) { - return (mapTx.get<index_by_wtxid>().count(hash) != 0); + if (gtxid.IsWtxid()) { + return (mapTx.get<index_by_wtxid>().count(gtxid.GetHash()) != 0); } - return (mapTx.count(hash) != 0); + return (mapTx.count(gtxid.GetHash()) != 0); } + bool exists(const uint256& txid) const { return exists(GenTxid{false, txid}); } CTransactionRef get(const uint256& hash) const; txiter get_iter_from_wtxid(const uint256& wtxid) const EXCLUSIVE_LOCKS_REQUIRED(cs) @@ -731,7 +732,8 @@ public: AssertLockHeld(cs); return mapTx.project<0>(mapTx.get<index_by_wtxid>().find(wtxid)); } - TxMempoolInfo info(const uint256& hash, bool wtxid=false) const; + TxMempoolInfo info(const uint256& hash) const; + TxMempoolInfo info(const GenTxid& gtxid) const; std::vector<TxMempoolInfo> infoAll() const; size_t DynamicMemoryUsage() const; diff --git a/src/uint256.cpp b/src/uint256.cpp index a943e71062..ee1b34eadd 100644 --- a/src/uint256.cpp +++ b/src/uint256.cpp @@ -12,20 +12,24 @@ template <unsigned int BITS> base_blob<BITS>::base_blob(const std::vector<unsigned char>& vch) { - assert(vch.size() == sizeof(data)); - memcpy(data, vch.data(), sizeof(data)); + assert(vch.size() == sizeof(m_data)); + memcpy(m_data, vch.data(), sizeof(m_data)); } template <unsigned int BITS> std::string base_blob<BITS>::GetHex() const { - return HexStr(std::reverse_iterator<const uint8_t*>(data + sizeof(data)), std::reverse_iterator<const uint8_t*>(data)); + uint8_t m_data_rev[WIDTH]; + for (int i = 0; i < WIDTH; ++i) { + m_data_rev[i] = m_data[WIDTH - 1 - i]; + } + return HexStr(m_data_rev); } template <unsigned int BITS> void base_blob<BITS>::SetHex(const char* psz) { - memset(data, 0, sizeof(data)); + memset(m_data, 0, sizeof(m_data)); // skip leading spaces while (IsSpace(*psz)) @@ -39,7 +43,7 @@ void base_blob<BITS>::SetHex(const char* psz) size_t digits = 0; while (::HexDigit(psz[digits]) != -1) digits++; - unsigned char* p1 = (unsigned char*)data; + unsigned char* p1 = (unsigned char*)m_data; unsigned char* pend = p1 + WIDTH; while (digits > 0 && p1 < pend) { *p1 = ::HexDigit(psz[--digits]); diff --git a/src/uint256.h b/src/uint256.h index b36598f572..8ab747ef49 100644 --- a/src/uint256.h +++ b/src/uint256.h @@ -18,11 +18,11 @@ class base_blob { protected: static constexpr int WIDTH = BITS / 8; - uint8_t data[WIDTH]; + uint8_t m_data[WIDTH]; public: base_blob() { - memset(data, 0, sizeof(data)); + memset(m_data, 0, sizeof(m_data)); } explicit base_blob(const std::vector<unsigned char>& vch); @@ -30,17 +30,17 @@ public: bool IsNull() const { for (int i = 0; i < WIDTH; i++) - if (data[i] != 0) + if (m_data[i] != 0) return false; return true; } void SetNull() { - memset(data, 0, sizeof(data)); + memset(m_data, 0, sizeof(m_data)); } - inline int Compare(const base_blob& other) const { return memcmp(data, other.data, sizeof(data)); } + inline int Compare(const base_blob& other) const { return memcmp(m_data, other.m_data, sizeof(m_data)); } friend inline bool operator==(const base_blob& a, const base_blob& b) { return a.Compare(b) == 0; } friend inline bool operator!=(const base_blob& a, const base_blob& b) { return a.Compare(b) != 0; } @@ -51,34 +51,37 @@ public: void SetHex(const std::string& str); std::string ToString() const; + const unsigned char* data() const { return m_data; } + unsigned char* data() { return m_data; } + unsigned char* begin() { - return &data[0]; + return &m_data[0]; } unsigned char* end() { - return &data[WIDTH]; + return &m_data[WIDTH]; } const unsigned char* begin() const { - return &data[0]; + return &m_data[0]; } const unsigned char* end() const { - return &data[WIDTH]; + return &m_data[WIDTH]; } unsigned int size() const { - return sizeof(data); + return sizeof(m_data); } uint64_t GetUint64(int pos) const { - const uint8_t* ptr = data + pos * 8; + const uint8_t* ptr = m_data + pos * 8; return ((uint64_t)ptr[0]) | \ ((uint64_t)ptr[1]) << 8 | \ ((uint64_t)ptr[2]) << 16 | \ @@ -92,13 +95,13 @@ public: template<typename Stream> void Serialize(Stream& s) const { - s.write((char*)data, sizeof(data)); + s.write((char*)m_data, sizeof(m_data)); } template<typename Stream> void Unserialize(Stream& s) { - s.read((char*)data, sizeof(data)); + s.read((char*)m_data, sizeof(m_data)); } }; diff --git a/src/util/strencodings.cpp b/src/util/strencodings.cpp index 3a903b6897..d10f92ffe6 100644 --- a/src/util/strencodings.cpp +++ b/src/util/strencodings.cpp @@ -569,3 +569,16 @@ std::string Capitalize(std::string str) str[0] = ToUpper(str.front()); return str; } + +std::string HexStr(const Span<const uint8_t> s) +{ + std::string rv; + static constexpr char hexmap[16] = { '0', '1', '2', '3', '4', '5', '6', '7', + '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; + rv.reserve(s.size() * 2); + for (uint8_t v: s) { + rv.push_back(hexmap[v >> 4]); + rv.push_back(hexmap[v & 15]); + } + return rv; +} diff --git a/src/util/strencodings.h b/src/util/strencodings.h index bd988f1410..eaa0fa9992 100644 --- a/src/util/strencodings.h +++ b/src/util/strencodings.h @@ -10,6 +10,7 @@ #define BITCOIN_UTIL_STRENCODINGS_H #include <attributes.h> +#include <span.h> #include <cstdint> #include <iterator> @@ -119,27 +120,11 @@ NODISCARD bool ParseUInt64(const std::string& str, uint64_t *out); */ NODISCARD bool ParseDouble(const std::string& str, double *out); -template<typename T> -std::string HexStr(const T itbegin, const T itend) -{ - std::string rv; - static const char hexmap[16] = { '0', '1', '2', '3', '4', '5', '6', '7', - '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' }; - rv.reserve(std::distance(itbegin, itend) * 2); - for(T it = itbegin; it < itend; ++it) - { - unsigned char val = (unsigned char)(*it); - rv.push_back(hexmap[val>>4]); - rv.push_back(hexmap[val&15]); - } - return rv; -} - -template<typename T> -inline std::string HexStr(const T& vch) -{ - return HexStr(vch.begin(), vch.end()); -} +/** + * Convert a span of bytes to a lower-case hexadecimal string. + */ +std::string HexStr(const Span<const uint8_t> s); +inline std::string HexStr(const Span<const char> s) { return HexStr(MakeUCharSpan(s)); } /** * Format a paragraph of text to a fixed width, adding spaces for diff --git a/src/util/system.cpp b/src/util/system.cpp index 8164e884b1..7b74789b32 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -6,6 +6,10 @@ #include <sync.h> #include <util/system.h> +#ifdef HAVE_BOOST_PROCESS +#include <boost/process.hpp> +#endif // HAVE_BOOST_PROCESS + #include <chainparamsbase.h> #include <util/strencodings.h> #include <util/string.h> @@ -1021,7 +1025,7 @@ bool FileCommit(FILE *file) return false; } #else - #if defined(__linux__) || defined(__NetBSD__) + #if defined(HAVE_FDATASYNC) if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync LogPrintf("%s: fdatasync failed: %d\n", __func__, errno); return false; @@ -1161,6 +1165,43 @@ void runCommand(const std::string& strCommand) } #endif +#ifdef HAVE_BOOST_PROCESS +UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in) +{ + namespace bp = boost::process; + + UniValue result_json; + bp::opstream stdin_stream; + bp::ipstream stdout_stream; + bp::ipstream stderr_stream; + + if (str_command.empty()) return UniValue::VNULL; + + bp::child c( + str_command, + bp::std_out > stdout_stream, + bp::std_err > stderr_stream, + bp::std_in < stdin_stream + ); + if (!str_std_in.empty()) { + stdin_stream << str_std_in << std::endl; + } + stdin_stream.pipe().close(); + + std::string result; + std::string error; + std::getline(stdout_stream, result); + std::getline(stderr_stream, error); + + c.wait(); + const int n_error = c.exit_code(); + if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error)); + if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result); + + return result_json; +} +#endif // HAVE_BOOST_PROCESS + void SetupEnvironment() { #ifdef HAVE_MALLOPT_ARENA_MAX diff --git a/src/util/system.h b/src/util/system.h index 0bd14cc9ea..1df194ca84 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -37,6 +37,8 @@ #include <boost/thread/condition_variable.hpp> // for boost::thread_interrupted +class UniValue; + // Application startup time (used for uptime calculation) int64_t GetStartupTime(); @@ -96,6 +98,16 @@ std::string ShellEscape(const std::string& arg); #if HAVE_SYSTEM void runCommand(const std::string& strCommand); #endif +#ifdef HAVE_BOOST_PROCESS +/** + * Execute a command which returns JSON, and parse the result. + * + * @param str_command The command to execute, including any arguments + * @param str_std_in string to pass to stdin + * @return parsed JSON + */ +UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in=""); +#endif // HAVE_BOOST_PROCESS /** * Most paths passed as configuration arguments are treated as relative to diff --git a/src/validation.cpp b/src/validation.cpp index 3cebf1090e..cf2f9dde62 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -688,8 +688,9 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) } // Check for non-standard pay-to-script-hash in inputs - if (fRequireStandard && !AreInputsStandard(tx, m_view)) - return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-nonstandard-inputs"); + if (fRequireStandard && !AreInputsStandard(tx, m_view)) { + return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); + } // Check for non-standard witness in P2WSH if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view)) @@ -1198,8 +1199,8 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) { return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(), - HexStr(blk_start, blk_start + CMessageHeader::MESSAGE_START_SIZE), - HexStr(message_start, message_start + CMessageHeader::MESSAGE_START_SIZE)); + HexStr(blk_start), + HexStr(message_start)); } if (blk_size > MAX_SIZE) { @@ -3434,7 +3435,7 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc if (consensusParams.SegwitHeight != std::numeric_limits<int>::max()) { if (commitpos == -1) { uint256 witnessroot = BlockWitnessMerkleRoot(block, nullptr); - CHash256().Write(witnessroot.begin(), 32).Write(ret.data(), 32).Finalize(witnessroot.begin()); + CHash256().Write(witnessroot).Write(ret).Finalize(witnessroot); CTxOut out; out.nValue = 0; out.scriptPubKey.resize(MINIMUM_WITNESS_COMMITMENT); @@ -3579,7 +3580,7 @@ static bool ContextualCheckBlock(const CBlock& block, BlockValidationState& stat if (block.vtx[0]->vin[0].scriptWitness.stack.size() != 1 || block.vtx[0]->vin[0].scriptWitness.stack[0].size() != 32) { return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-nonce-size", strprintf("%s : invalid witness reserved value size", __func__)); } - CHash256().Write(hashWitness.begin(), 32).Write(&block.vtx[0]->vin[0].scriptWitness.stack[0][0], 32).Finalize(hashWitness.begin()); + CHash256().Write(hashWitness).Write(block.vtx[0]->vin[0].scriptWitness.stack[0]).Finalize(hashWitness); if (memcmp(hashWitness.begin(), &block.vtx[0]->vout[commitpos].scriptPubKey[6], 32)) { return state.Invalid(BlockValidationResult::BLOCK_MUTATED, "bad-witness-merkle-match", strprintf("%s : witness merkle commitment mismatch", __func__)); } diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index a04311fdf5..24eb2ee34c 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -38,7 +38,7 @@ void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filena for (const auto& item : env.m_fileids) { if (fileid == item.second && &fileid != &item.second) { throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (duplicates fileid %s from %s)", filename, - HexStr(std::begin(item.second.value), std::end(item.second.value)), item.first)); + HexStr(item.second.value), item.first)); } } } diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 3b752ca936..e0c3a1287a 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -34,7 +34,7 @@ std::string static EncodeDumpString(const std::string &str) { std::stringstream ret; for (const unsigned char c : str) { if (c <= 32 || c >= 128 || c == '%') { - ret << '%' << HexStr(&c, &c + 1); + ret << '%' << HexStr(Span<const unsigned char>(&c, 1)); } else { ret << c; } diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 9d334063c4..58f3777da5 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -1484,7 +1484,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request) {RPCResult::Type::ARR, "removed", "<structure is the same as \"transactions\" above, only present if include_removed=true>\n" "Note: transactions that were re-added in the active chain will appear as-is in this array, and may thus have a positive confirmation count." , {{RPCResult::Type::ELISION, "", ""},}}, - {RPCResult::Type::STR_HEX, "lastblock", "The hash of the block (target_confirmations-1) from the best block on the main chain. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones"}, + {RPCResult::Type::STR_HEX, "lastblock", "The hash of the block (target_confirmations-1) from the best block on the main chain, or the genesis hash if the referenced block does not exist yet. This is typically used to feed back into listsinceblock the next time you call it. So you would generally use a target_confirmations of say 6, so you will be continually re-notified of transactions until they've reached 6 confirmations plus any new ones"}, } }, RPCExamples{ @@ -1567,6 +1567,7 @@ static UniValue listsinceblock(const JSONRPCRequest& request) } uint256 lastblock; + target_confirms = std::min(target_confirms, wallet.GetLastBlockHeight() + 1); CHECK_NONFATAL(wallet.chain().findAncestorByHeight(wallet.GetLastBlockHash(), wallet.GetLastBlockHeight() + 1 - target_confirms, FoundBlock().hash(lastblock))); UniValue ret(UniValue::VOBJ); @@ -2338,7 +2339,7 @@ static UniValue getwalletinfo(const JSONRPCRequest& request) {RPCResult::Type::NUM_TIME, "keypoololdest", "the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool. Legacy wallets only."}, {RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"}, {RPCResult::Type::NUM, "keypoolsize_hd_internal", "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"}, - {RPCResult::Type::NUM_TIME, "unlocked_until", "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked"}, + {RPCResult::Type::NUM_TIME, "unlocked_until", /* optional */ true, "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked (only present for passphrase-encrypted wallets)"}, {RPCResult::Type::STR_AMOUNT, "paytxfee", "the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB"}, {RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "the Hash160 of the HD seed (only present when HD is enabled)"}, {RPCResult::Type::BOOL, "private_keys_enabled", "false if privatekeys are disabled for this wallet (enforced watch-only wallet)"}, @@ -3165,7 +3166,7 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request) { {RPCResult::Type::STR_HEX, "hex", "The hex-encoded raw transaction with signature(s)"}, {RPCResult::Type::BOOL, "complete", "If the transaction has a complete set of signatures"}, - {RPCResult::Type::ARR, "errors", "Script verification errors (if there are any)", + {RPCResult::Type::ARR, "errors", /* optional */ true, "Script verification errors (if there are any)", { {RPCResult::Type::OBJ, "", "", { @@ -3222,59 +3223,73 @@ UniValue signrawtransactionwithwallet(const JSONRPCRequest& request) static UniValue bumpfee(const JSONRPCRequest& request) { - RPCHelpMan{"bumpfee", - "\nBumps the fee of an opt-in-RBF transaction T, replacing it with a new transaction B.\n" - "An opt-in RBF transaction with the given txid must be in the wallet.\n" - "The command will pay the additional fee by reducing change outputs or adding inputs when necessary. It may add a new change output if one does not already exist.\n" - "All inputs in the original transaction will be included in the replacement transaction.\n" - "The command will fail if the wallet or mempool contains a transaction that spends one of T's outputs.\n" - "By default, the new fee will be calculated automatically using estimatesmartfee.\n" - "The user can specify a confirmation target for estimatesmartfee.\n" - "Alternatively, the user can specify a fee_rate (" + CURRENCY_UNIT + " per kB) for the new transaction.\n" - "At a minimum, the new fee rate must be high enough to pay an additional new relay fee (incrementalfee\n" - "returned by getnetworkinfo) to enter the node's mempool.\n", + bool want_psbt = request.strMethod == "psbtbumpfee"; + + RPCHelpMan{request.strMethod, + "\nBumps the fee of an opt-in-RBF transaction T, replacing it with a new transaction B.\n" + + std::string(want_psbt ? "Returns a PSBT instead of creating and signing a new transaction.\n" : "") + + "An opt-in RBF transaction with the given txid must be in the wallet.\n" + "The command will pay the additional fee by reducing change outputs or adding inputs when necessary. It may add a new change output if one does not already exist.\n" + "All inputs in the original transaction will be included in the replacement transaction.\n" + "The command will fail if the wallet or mempool contains a transaction that spends one of T's outputs.\n" + "By default, the new fee will be calculated automatically using estimatesmartfee.\n" + "The user can specify a confirmation target for estimatesmartfee.\n" + "Alternatively, the user can specify a fee_rate (" + CURRENCY_UNIT + " per kB) for the new transaction.\n" + "At a minimum, the new fee rate must be high enough to pay an additional new relay fee (incrementalfee\n" + "returned by getnetworkinfo) to enter the node's mempool.\n", + { + {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The txid to be bumped"}, + {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "", { - {"txid", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The txid to be bumped"}, - {"options", RPCArg::Type::OBJ, RPCArg::Optional::OMITTED_NAMED_ARG, "", - { - {"conf_target", RPCArg::Type::NUM, /* default */ "wallet default", "Confirmation target (in blocks)"}, - {"fee_rate", RPCArg::Type::NUM, /* default */ "fall back to 'conf_target'", "fee rate (NOT total fee) to pay, in " + CURRENCY_UNIT + " per kB\n" - " Specify a fee rate instead of relying on the built-in fee estimator.\n" - "Must be at least 0.0001 " + CURRENCY_UNIT + " per kB higher than the current transaction fee rate.\n"}, - {"replaceable", RPCArg::Type::BOOL, /* default */ "true", "Whether the new transaction should still be\n" - " marked bip-125 replaceable. If true, the sequence numbers in the transaction will\n" - " be left unchanged from the original. If false, any input sequence numbers in the\n" - " original transaction that were less than 0xfffffffe will be increased to 0xfffffffe\n" - " so the new transaction will not be explicitly bip-125 replaceable (though it may\n" - " still be replaceable in practice, for example if it has unconfirmed ancestors which\n" - " are replaceable)."}, - {"estimate_mode", RPCArg::Type::STR, /* default */ "unset", std::string() + "The fee estimate mode, must be one of (case insensitive):\n" - " \"" + FeeModes("\"\n\"") + "\""}, - }, - "options"}, - }, - RPCResult{ - RPCResult::Type::OBJ, "", "", { - {RPCResult::Type::STR, "psbt", "The base64-encoded unsigned PSBT of the new transaction. Only returned when wallet private keys are disabled."}, - {RPCResult::Type::STR_HEX, "txid", "The id of the new transaction. Only returned when wallet private keys are enabled."}, - {RPCResult::Type::STR_AMOUNT, "origfee", "The fee of the replaced transaction."}, - {RPCResult::Type::STR_AMOUNT, "fee", "The fee of the new transaction."}, - {RPCResult::Type::ARR, "errors", "Errors encountered during processing (may be empty).", - { - {RPCResult::Type::STR, "", ""}, - }}, - } - }, - RPCExamples{ - "\nBump the fee, get the new transaction\'s txid\n" + - HelpExampleCli("bumpfee", "<txid>") + {"conf_target", RPCArg::Type::NUM, /* default */ "wallet default", "Confirmation target (in blocks)"}, + {"fee_rate", RPCArg::Type::NUM, /* default */ "fall back to 'conf_target'", "fee rate (NOT total fee) to pay, in " + CURRENCY_UNIT + " per kB\n" + " Specify a fee rate instead of relying on the built-in fee estimator.\n" + "Must be at least 0.0001 " + CURRENCY_UNIT + " per kB higher than the current transaction fee rate.\n"}, + {"replaceable", RPCArg::Type::BOOL, /* default */ "true", "Whether the new transaction should still be\n" + " marked bip-125 replaceable. If true, the sequence numbers in the transaction will\n" + " be left unchanged from the original. If false, any input sequence numbers in the\n" + " original transaction that were less than 0xfffffffe will be increased to 0xfffffffe\n" + " so the new transaction will not be explicitly bip-125 replaceable (though it may\n" + " still be replaceable in practice, for example if it has unconfirmed ancestors which\n" + " are replaceable)."}, + {"estimate_mode", RPCArg::Type::STR, /* default */ "unset", std::string() + "The fee estimate mode, must be one of (case insensitive):\n" + " \"" + FeeModes("\"\n\"") + "\""}, }, - }.Check(request); + "options"}, + }, + RPCResult{ + RPCResult::Type::OBJ, "", "", Cat(Cat<std::vector<RPCResult>>( + { + {RPCResult::Type::STR, "psbt", "The base64-encoded unsigned PSBT of the new transaction." + std::string(want_psbt ? "" : " Only returned when wallet private keys are disabled. (DEPRECATED)")}, + }, + want_psbt ? std::vector<RPCResult>{} : std::vector<RPCResult>{{RPCResult::Type::STR_HEX, "txid", "The id of the new transaction. Only returned when wallet private keys are enabled."}} + ), + { + {RPCResult::Type::STR_AMOUNT, "origfee", "The fee of the replaced transaction."}, + {RPCResult::Type::STR_AMOUNT, "fee", "The fee of the new transaction."}, + {RPCResult::Type::ARR, "errors", "Errors encountered during processing (may be empty).", + { + {RPCResult::Type::STR, "", ""}, + }}, + }) + }, + RPCExamples{ + "\nBump the fee, get the new transaction\'s" + std::string(want_psbt ? "psbt" : "txid") + "\n" + + HelpExampleCli(request.strMethod, "<txid>") + }, + }.Check(request); std::shared_ptr<CWallet> const wallet = GetWalletForJSONRPCRequest(request); if (!wallet) return NullUniValue; CWallet* const pwallet = wallet.get(); + if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS) && !want_psbt) { + if (!pwallet->chain().rpcEnableDeprecated("bumpfee")) { + throw JSONRPCError(RPC_METHOD_DEPRECATED, "Using bumpfee with wallets that have private keys disabled is deprecated. Use psbtbumpfee instead or restart bitcoind with -deprecatedrpc=bumpfee. This functionality will be removed in 0.22"); + } + want_psbt = true; + } + RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VOBJ}); uint256 hash(ParseHashV(request.params[0], "txid")); @@ -3359,7 +3374,7 @@ static UniValue bumpfee(const JSONRPCRequest& request) // If wallet private keys are enabled, return the new transaction id, // otherwise return the base64-encoded unsigned PSBT of the new transaction. - if (!pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { + if (!want_psbt) { if (!feebumper::SignTransaction(*pwallet, mtx)) { throw JSONRPCError(RPC_WALLET_ERROR, "Can't sign transaction."); } @@ -3392,6 +3407,11 @@ static UniValue bumpfee(const JSONRPCRequest& request) return result; } +static UniValue psbtbumpfee(const JSONRPCRequest& request) +{ + return bumpfee(request); +} + UniValue rescanblockchain(const JSONRPCRequest& request) { RPCHelpMan{"rescanblockchain", @@ -3688,7 +3708,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request) if (meta->has_key_origin) { ret.pushKV("hdkeypath", WriteHDKeypath(meta->key_origin.path)); ret.pushKV("hdseedid", meta->hd_seed_id.GetHex()); - ret.pushKV("hdmasterfingerprint", HexStr(meta->key_origin.fingerprint, meta->key_origin.fingerprint + 4)); + ret.pushKV("hdmasterfingerprint", HexStr(meta->key_origin.fingerprint)); } } } @@ -4137,6 +4157,7 @@ static const CRPCCommand commands[] = { "wallet", "addmultisigaddress", &addmultisigaddress, {"nrequired","keys","label","address_type"} }, { "wallet", "backupwallet", &backupwallet, {"destination"} }, { "wallet", "bumpfee", &bumpfee, {"txid", "options"} }, + { "wallet", "psbtbumpfee", &psbtbumpfee, {"txid", "options"} }, { "wallet", "createwallet", &createwallet, {"wallet_name", "disable_private_keys", "blank", "passphrase", "avoid_reuse", "descriptors"} }, { "wallet", "dumpprivkey", &dumpprivkey, {"address"} }, { "wallet", "dumpwallet", &dumpwallet, {"filename"} }, diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp index af57210f01..c0755db751 100644 --- a/src/wallet/salvage.cpp +++ b/src/wallet/salvage.cpp @@ -16,14 +16,12 @@ static const char *HEADER_END = "HEADER=END"; static const char *DATA_END = "DATA=END"; typedef std::pair<std::vector<unsigned char>, std::vector<unsigned char> > KeyValPair; -bool RecoverDatabaseFile(const fs::path& file_path) +bool RecoverDatabaseFile(const fs::path& file_path, bilingual_str& error, std::vector<bilingual_str>& warnings) { std::string filename; std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, filename); - bilingual_str open_err; - if (!env->Open(open_err)) { - tfm::format(std::cerr, "%s\n", open_err.original); + if (!env->Open(error)) { return false; } @@ -39,11 +37,9 @@ bool RecoverDatabaseFile(const fs::path& file_path) int result = env->dbenv->dbrename(nullptr, filename.c_str(), nullptr, newFilename.c_str(), DB_AUTO_COMMIT); - if (result == 0) - LogPrintf("Renamed %s to %s\n", filename, newFilename); - else + if (result != 0) { - LogPrintf("Failed to rename %s to %s\n", filename, newFilename); + error = strprintf(Untranslated("Failed to rename %s to %s"), filename, newFilename); return false; } @@ -60,10 +56,10 @@ bool RecoverDatabaseFile(const fs::path& file_path) Db db(env->dbenv.get(), 0); result = db.verify(newFilename.c_str(), nullptr, &strDump, DB_SALVAGE | DB_AGGRESSIVE); if (result == DB_VERIFY_BAD) { - LogPrintf("Salvage: Database salvage found errors, all data may not be recoverable.\n"); + warnings.push_back(Untranslated("Salvage: Database salvage found errors, all data may not be recoverable.")); } if (result != 0 && result != DB_VERIFY_BAD) { - LogPrintf("Salvage: Database salvage failed with result %d.\n", result); + error = strprintf(Untranslated("Salvage: Database salvage failed with result %d."), result); return false; } @@ -87,7 +83,7 @@ bool RecoverDatabaseFile(const fs::path& file_path) break; getline(strDump, valueHex); if (valueHex == DATA_END) { - LogPrintf("Salvage: WARNING: Number of keys in data does not match number of values.\n"); + warnings.push_back(Untranslated("Salvage: WARNING: Number of keys in data does not match number of values.")); break; } salvagedData.push_back(make_pair(ParseHex(keyHex), ParseHex(valueHex))); @@ -96,7 +92,7 @@ bool RecoverDatabaseFile(const fs::path& file_path) bool fSuccess; if (keyHex != DATA_END) { - LogPrintf("Salvage: WARNING: Unexpected end of file while reading salvage output.\n"); + warnings.push_back(Untranslated("Salvage: WARNING: Unexpected end of file while reading salvage output.")); fSuccess = false; } else { fSuccess = (result == 0); @@ -104,10 +100,9 @@ bool RecoverDatabaseFile(const fs::path& file_path) if (salvagedData.empty()) { - LogPrintf("Salvage(aggressive) found no records in %s.\n", newFilename); + error = strprintf(Untranslated("Salvage(aggressive) found no records in %s."), newFilename); return false; } - LogPrintf("Salvage(aggressive) found %u records\n", salvagedData.size()); std::unique_ptr<Db> pdbCopy = MakeUnique<Db>(env->dbenv.get(), 0); int ret = pdbCopy->open(nullptr, // Txn pointer @@ -117,7 +112,7 @@ bool RecoverDatabaseFile(const fs::path& file_path) DB_CREATE, // Flags 0); if (ret > 0) { - LogPrintf("Cannot create database file %s\n", filename); + error = strprintf(Untranslated("Cannot create database file %s"), filename); pdbCopy->close(0); return false; } @@ -141,7 +136,7 @@ bool RecoverDatabaseFile(const fs::path& file_path) } if (!fReadOK) { - LogPrintf("WARNING: WalletBatch::Recover skipping %s: %s\n", strType, strErr); + warnings.push_back(strprintf(Untranslated("WARNING: WalletBatch::Recover skipping %s: %s"), strType, strErr)); continue; } Dbt datKey(&row.first[0], row.first.size()); diff --git a/src/wallet/salvage.h b/src/wallet/salvage.h index e361930f5e..5a8538f942 100644 --- a/src/wallet/salvage.h +++ b/src/wallet/salvage.h @@ -9,6 +9,8 @@ #include <fs.h> #include <streams.h> -bool RecoverDatabaseFile(const fs::path& file_path); +struct bilingual_str; + +bool RecoverDatabaseFile(const fs::path& file_path, bilingual_str& error, std::vector<bilingual_str>& warnings); #endif // BITCOIN_WALLET_SALVAGE_H diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index d2770a46f7..7ef06663b5 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -630,13 +630,13 @@ static size_t CalculateNestedKeyhashInputSize(bool use_max_sig) CPubKey pubkey = key.GetPubKey(); // Generate pubkey hash - uint160 key_hash(Hash160(pubkey.begin(), pubkey.end())); + uint160 key_hash(Hash160(pubkey)); // Create inner-script to enter into keystore. Key hash can't be 0... CScript inner_script = CScript() << OP_0 << std::vector<unsigned char>(key_hash.begin(), key_hash.end()); // Create outer P2SH script for the output - uint160 script_id(Hash160(inner_script.begin(), inner_script.end())); + uint160 script_id(Hash160(inner_script)); CScript script_pubkey = CScript() << OP_HASH160 << std::vector<unsigned char>(script_id.begin(), script_id.end()) << OP_EQUAL; // Add inner-script to key store and key to watchonly diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 6d5282a8c5..9c7b446c23 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -2474,23 +2474,6 @@ bool CWallet::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, } // At this point, one input was not fully signed otherwise we would have exited already - // Find that input and figure out what went wrong. - for (unsigned int i = 0; i < tx.vin.size(); i++) { - // Get the prevout - CTxIn& txin = tx.vin[i]; - auto coin = coins.find(txin.prevout); - if (coin == coins.end() || coin->second.IsSpent()) { - input_errors[i] = "Input not found or already spent"; - continue; - } - - // Check if this input is complete - SignatureData sigdata = DataFromTransaction(tx, i, coin->second.out); - if (!sigdata.complete) { - input_errors[i] = "Unable to sign input, missing keys"; - continue; - } - } return false; } diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index a6d327994b..fa6814d0d3 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -103,7 +103,7 @@ bool WalletBatch::WriteKey(const CPubKey& vchPubKey, const CPrivKey& vchPrivKey, vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end()); vchKey.insert(vchKey.end(), vchPrivKey.begin(), vchPrivKey.end()); - return WriteIC(std::make_pair(DBKeys::KEY, vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey.begin(), vchKey.end())), false); + return WriteIC(std::make_pair(DBKeys::KEY, vchPubKey), std::make_pair(vchPrivKey, Hash(vchKey)), false); } bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey, @@ -115,7 +115,7 @@ bool WalletBatch::WriteCryptedKey(const CPubKey& vchPubKey, } // Compute a checksum of the encrypted key - uint256 checksum = Hash(vchCryptedSecret.begin(), vchCryptedSecret.end()); + uint256 checksum = Hash(vchCryptedSecret); const auto key = std::make_pair(DBKeys::CRYPTED_KEY, vchPubKey); if (!WriteIC(key, std::make_pair(vchCryptedSecret, checksum), false)) { @@ -209,7 +209,7 @@ bool WalletBatch::WriteDescriptorKey(const uint256& desc_id, const CPubKey& pubk key.insert(key.end(), pubkey.begin(), pubkey.end()); key.insert(key.end(), privkey.begin(), privkey.end()); - return WriteIC(std::make_pair(DBKeys::WALLETDESCRIPTORKEY, std::make_pair(desc_id, pubkey)), std::make_pair(privkey, Hash(key.begin(), key.end())), false); + return WriteIC(std::make_pair(DBKeys::WALLETDESCRIPTORKEY, std::make_pair(desc_id, pubkey)), std::make_pair(privkey, Hash(key)), false); } bool WalletBatch::WriteCryptedDescriptorKey(const uint256& desc_id, const CPubKey& pubkey, const std::vector<unsigned char>& secret) @@ -365,7 +365,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, vchKey.insert(vchKey.end(), vchPubKey.begin(), vchPubKey.end()); vchKey.insert(vchKey.end(), pkey.begin(), pkey.end()); - if (Hash(vchKey.begin(), vchKey.end()) != hash) + if (Hash(vchKey) != hash) { strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt"; return false; @@ -414,7 +414,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, if (!ssValue.eof()) { uint256 checksum; ssValue >> checksum; - if ((checksum_valid = Hash(vchPrivKey.begin(), vchPrivKey.end()) != checksum)) { + if ((checksum_valid = Hash(vchPrivKey) != checksum)) { strErr = "Error reading wallet database: Crypted key corrupt"; return false; } @@ -621,7 +621,7 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, to_hash.insert(to_hash.end(), pubkey.begin(), pubkey.end()); to_hash.insert(to_hash.end(), pkey.begin(), pkey.end()); - if (Hash(to_hash.begin(), to_hash.end()) != hash) + if (Hash(to_hash) != hash) { strErr = "Error reading wallet database: CPubKey/CPrivKey corrupt"; return false; diff --git a/src/wallet/walletdb.h b/src/wallet/walletdb.h index 7c5bf7652b..64d60b1f44 100644 --- a/src/wallet/walletdb.h +++ b/src/wallet/walletdb.h @@ -261,10 +261,6 @@ public: DBErrors ZapSelectTx(std::vector<uint256>& vHashIn, std::vector<uint256>& vHashOut); /* Function to determine if a certain KV/key-type is a key (cryptographical key) type */ static bool IsKeyType(const std::string& strType); - /* verifies the database environment */ - static bool VerifyEnvironment(const fs::path& wallet_path, bilingual_str& errorStr); - /* verifies the database file */ - static bool VerifyDatabaseFile(const fs::path& wallet_path, bilingual_str& errorStr); //! write the hdchain model (external chain child index counter) bool WriteHDChain(const CHDChain& chain); diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp index 9f25b1ae7d..c1cba0fd13 100644 --- a/src/wallet/wallettool.cpp +++ b/src/wallet/wallettool.cpp @@ -104,27 +104,6 @@ static void WalletShowInfo(CWallet* wallet_instance) tfm::format(std::cout, "Address Book: %zu\n", wallet_instance->m_address_book.size()); } -static bool SalvageWallet(const fs::path& path) -{ - // Create a Database handle to allow for the db to be initialized before recovery - std::unique_ptr<WalletDatabase> database = CreateWalletDatabase(path); - - // Initialize the environment before recovery - bilingual_str error_string; - try { - database->Verify(error_string); - } catch (const fs::filesystem_error& e) { - error_string = Untranslated(strprintf("Error loading wallet. %s", fsbridge::get_filesystem_error_message(e))); - } - if (!error_string.original.empty()) { - tfm::format(std::cerr, "Failed to open wallet for salvage :%s\n", error_string.original); - return false; - } - - // Perform the recovery - return RecoverDatabaseFile(path); -} - bool ExecuteWalletToolFunc(const std::string& command, const std::string& name) { fs::path path = fs::absolute(name, GetWalletDir()); @@ -147,7 +126,18 @@ bool ExecuteWalletToolFunc(const std::string& command, const std::string& name) WalletShowInfo(wallet_instance.get()); wallet_instance->Close(); } else if (command == "salvage") { - return SalvageWallet(path); + bilingual_str error; + std::vector<bilingual_str> warnings; + bool ret = RecoverDatabaseFile(path, error, warnings); + if (!ret) { + for (const auto warning : warnings) { + tfm::format(std::cerr, "%s\n", warning.original); + } + if (!error.empty()) { + tfm::format(std::cerr, "%s\n", error.original); + } + } + return ret; } } else { tfm::format(std::cerr, "Invalid command: %s\n", command); diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 5d782026dc..34e4999329 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -166,7 +166,7 @@ class ExampleTest(BitcoinTestFramework): height = self.nodes[0].getblockcount() - for i in range(10): + for _ in range(10): # Use the mininode and blocktools functionality to manually build a block # Calling the generate() rpc is easier, but this allows us to exactly # control the blocks and transactions. diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index 79777f5582..f19ee12f95 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -123,7 +123,7 @@ class AssumeValidTest(BitcoinTestFramework): height += 1 # Bury the block 100 deep so the coinbase output is spendable - for i in range(100): + for _ in range(100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.solve() self.blocks.append(block) @@ -149,7 +149,7 @@ class AssumeValidTest(BitcoinTestFramework): height += 1 # Bury the assumed valid block 2100 deep - for i in range(2100): + for _ in range(2100): block = create_block(self.tip, create_coinbase(height), self.block_time) block.nVersion = 4 block.solve() diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index 19cdc10935..1253c45418 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -141,7 +141,7 @@ class BIP68Test(BitcoinTestFramework): # some of those inputs to be sequence locked (and randomly choose # between height/time locking). Small random chance of making the locks # all pass. - for i in range(400): + for _ in range(400): # Randomly choose up to 10 inputs num_inputs = random.randint(1, 10) random.shuffle(utxos) @@ -260,7 +260,7 @@ class BIP68Test(BitcoinTestFramework): # Use prioritisetransaction to lower the effective feerate to 0 self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN)) cur_time = int(time.time()) - for i in range(10): + for _ in range(10): self.nodes[0].setmocktime(cur_time + 600) self.nodes[0].generate(1) cur_time += 600 diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 6619d83dc4..c74761869b 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -125,7 +125,7 @@ class FullBlockTest(BitcoinTestFramework): # collect spendable outputs now to avoid cluttering the code later on out = [] - for i in range(NUM_OUTPUTS_TO_COLLECT): + for _ in range(NUM_OUTPUTS_TO_COLLECT): out.append(self.get_spendable_output()) # Start by building a couple of blocks on top (which output is spent is diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py index c6852ef017..dfb3683143 100755 --- a/test/functional/feature_csv_activation.py +++ b/test/functional/feature_csv_activation.py @@ -161,7 +161,7 @@ class BIP68_112_113Test(BitcoinTestFramework): def generate_blocks(self, number): test_blocks = [] - for i in range(number): + for _ in range(number): block = self.create_test_block([]) test_blocks.append(block) self.last_block_time += 600 @@ -209,22 +209,22 @@ class BIP68_112_113Test(BitcoinTestFramework): # Note we reuse inputs for v1 and v2 txs so must test these separately # 16 normal inputs bip68inputs = [] - for i in range(16): + for _ in range(16): bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) # 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112basicinputs = [] - for j in range(2): + for _ in range(2): inputs = [] - for i in range(16): + for _ in range(16): inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112basicinputs.append(inputs) # 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig) bip112diverseinputs = [] - for j in range(2): + for _ in range(2): inputs = [] - for i in range(16): + for _ in range(16): inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress)) bip112diverseinputs.append(inputs) diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index 7b38e09bf9..7a2e35c095 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -195,7 +195,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework): while len(utxo_list) >= 2 and num_transactions < count: tx = CTransaction() input_amount = 0 - for i in range(2): + for _ in range(2): utxo = utxo_list.pop() tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout']))) input_amount += int(utxo['amount'] * COIN) @@ -205,7 +205,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework): # Sanity check -- if we chose inputs that are too small, skip continue - for i in range(3): + for _ in range(3): tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) # Sign and send the transaction to get into the mempool diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 3cf0fb8f7b..702a1d9995 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -176,9 +176,9 @@ class EstimateFeeTest(BitcoinTestFramework): # We shuffle our confirmed txout set before each set of transactions # small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible # resorting to tx's that depend on the mempool when those run out - for i in range(numblocks): + for _ in range(numblocks): random.shuffle(self.confutxo) - for j in range(random.randrange(100 - 50, 100 + 50)): + for _ in range(random.randrange(100 - 50, 100 + 50)): from_index = random.randint(1, 2) (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo, self.memutxo, Decimal("0.005"), min_fee, min_fee) @@ -243,7 +243,7 @@ class EstimateFeeTest(BitcoinTestFramework): self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting self.log.info("Will output estimates for 1/2/3/6/15/25 blocks") - for i in range(2): + for _ in range(2): self.log.info("Creating transactions and mining them with a block size that can't keep up") # Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine self.transact_and_mine(10, self.nodes[2]) diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py index 5538d6d3b4..0dc2839191 100755 --- a/test/functional/feature_maxuploadtarget.py +++ b/test/functional/feature_maxuploadtarget.py @@ -104,7 +104,7 @@ class MaxUploadTest(BitcoinTestFramework): assert_equal(len(self.nodes[0].getpeerinfo()), 3) # At most a couple more tries should succeed (depending on how long # the test has been running so far). - for i in range(3): + for _ in range(3): p2p_conns[0].send_message(getdata_request) p2p_conns[0].wait_for_disconnect() assert_equal(len(self.nodes[0].getpeerinfo()), 2) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index e46e5aacc8..02fa88f7c8 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -147,7 +147,7 @@ class PruneTest(BitcoinTestFramework): # Create stale blocks in manageable sized chunks self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds") - for j in range(12): + for _ in range(12): # Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain # Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects disconnect_nodes(self.nodes[0], 1) diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index acf551ef69..1b531ad51d 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -376,7 +376,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) outputs = [] - for i in range(MAX_REPLACEMENT_LIMIT+1): + for _ in range(MAX_REPLACEMENT_LIMIT+1): outputs.append(CTxOut(split_value, CScript([1]))) splitting_tx = CTransaction() diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index 5195d20dcb..0842972779 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -126,11 +126,11 @@ class SegWitTest(BitcoinTestFramework): assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript)) p2sh_ids.append([]) wit_ids.append([]) - for v in range(2): + for _ in range(2): p2sh_ids[i].append([]) wit_ids[i].append([]) - for i in range(5): + for _ in range(5): for n in range(3): for v in range(2): wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999"))) diff --git a/test/functional/mempool_package_onemore.py b/test/functional/mempool_package_onemore.py index 0739d7e29b..e956fe07d2 100755 --- a/test/functional/mempool_package_onemore.py +++ b/test/functional/mempool_package_onemore.py @@ -31,7 +31,7 @@ class MempoolPackagesTest(BitcoinTestFramework): for (txid, vout) in zip(parent_txids, vouts): inputs.append({'txid' : txid, 'vout' : vout}) outputs = {} - for i in range(num_outputs): + for _ in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs, 0, True) signedtx = node.signrawtransactionwithwallet(rawtx) diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index 542d24f4be..98dac30ace 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -48,7 +48,7 @@ class MempoolPackagesTest(BitcoinTestFramework): send_value = satoshi_round((value - fee)/num_outputs) inputs = [ {'txid' : parent_txid, 'vout' : vout} ] outputs = {} - for i in range(num_outputs): + for _ in range(num_outputs): outputs[node.getnewaddress()] = send_value rawtx = node.createrawtransaction(inputs, outputs) signedtx = node.signrawtransactionwithwallet(rawtx) @@ -70,7 +70,7 @@ class MempoolPackagesTest(BitcoinTestFramework): # MAX_ANCESTORS transactions off a confirmed tx should be fine chain = [] witness_chain = [] - for i in range(MAX_ANCESTORS): + for _ in range(MAX_ANCESTORS): (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1) value = sent_value chain.append(txid) @@ -245,7 +245,7 @@ class MempoolPackagesTest(BitcoinTestFramework): # Sign and send up to MAX_DESCENDANT transactions chained off the parent tx chain = [] # save sent txs for the purpose of checking node1's mempool later (see below) - for i in range(MAX_DESCENDANTS - 1): + for _ in range(MAX_DESCENDANTS - 1): utxo = transaction_package.pop(0) (txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) chain.append(txid) @@ -312,7 +312,7 @@ class MempoolPackagesTest(BitcoinTestFramework): send_value = satoshi_round((value - fee)/2) inputs = [ {'txid' : txid, 'vout' : vout} ] outputs = {} - for i in range(2): + for _ in range(2): outputs[self.nodes[0].getnewaddress()] = send_value rawtx = self.nodes[0].createrawtransaction(inputs, outputs) signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx) @@ -326,7 +326,7 @@ class MempoolPackagesTest(BitcoinTestFramework): # Create tx2-7 vout = 1 txid = tx0_id - for i in range(6): + for _ in range(6): (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1) vout = 0 value = sent_value diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index 5d00648aed..85c4d6d570 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -62,7 +62,7 @@ class MempoolPersistTest(BitcoinTestFramework): def run_test(self): self.log.debug("Send 5 transactions from node2 (to its own address)") tx_creation_time_lower = int(time.time()) - for i in range(5): + for _ in range(5): last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10")) node2_balance = self.nodes[2].getbalance() self.sync_all() diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py index 8a703ef009..8baf974a0a 100755 --- a/test/functional/mempool_updatefromblock.py +++ b/test/functional/mempool_updatefromblock.py @@ -73,7 +73,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework): n_outputs = size - tx_count output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001')) outputs = {} - for n in range(0, n_outputs): + for _ in range(n_outputs): outputs[self.nodes[0].getnewaddress()] = output_value else: output_value = (inputs_value - fee).quantize(Decimal('0.00000001')) diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py index 6d947ac660..a9e86bd2fc 100755 --- a/test/functional/p2p_blockfilters.py +++ b/test/functional/p2p_blockfilters.py @@ -4,12 +4,13 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Tests NODE_COMPACT_FILTERS (BIP 157/158). -Tests that a node configured with -blockfilterindex and -peerblockfilters can serve -cfheaders and cfcheckpts. +Tests that a node configured with -blockfilterindex and -peerblockfilters signals +NODE_COMPACT_FILTERS and can serve cfilters, cfheaders and cfcheckpts. """ from test_framework.messages import ( FILTER_TYPE_BASIC, + NODE_COMPACT_FILTERS, hash256, msg_getcfcheckpt, msg_getcfheaders, @@ -70,6 +71,14 @@ class CompactFiltersTest(BitcoinTestFramework): self.nodes[1].generate(1001) wait_until(lambda: self.nodes[1].getblockcount() == 2000) + # Check that nodes have signalled NODE_COMPACT_FILTERS correctly. + assert node0.nServices & NODE_COMPACT_FILTERS != 0 + assert node1.nServices & NODE_COMPACT_FILTERS == 0 + + # Check that the localservices is as expected. + assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0 + assert int(self.nodes[1].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS == 0 + self.log.info("get cfcheckpt on chain to be re-orged out.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index 0b3738b572..225d393e1b 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -125,7 +125,7 @@ class CompactBlocksTest(BitcoinTestFramework): out_value = total_value // 10 tx = CTransaction() tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b'')) - for i in range(10): + for _ in range(10): tx.vout.append(CTxOut(out_value, CScript([OP_TRUE]))) tx.rehash() @@ -266,7 +266,7 @@ class CompactBlocksTest(BitcoinTestFramework): address = node.getnewaddress() segwit_tx_generated = False - for i in range(num_transactions): + for _ in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] tx = FromHex(CTransaction(), hex_tx) @@ -294,12 +294,11 @@ class CompactBlocksTest(BitcoinTestFramework): block.rehash() # Wait until the block was announced (via compact blocks) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: - assert "cmpctblock" in test_node.last_message # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) @@ -309,12 +308,11 @@ class CompactBlocksTest(BitcoinTestFramework): inv = CInv(MSG_CMPCT_BLOCK, block_hash) test_node.send_message(msg_getdata([inv])) - wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) + wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock) # Now fetch and check the compact block header_and_shortids = None with mininode_lock: - assert "cmpctblock" in test_node.last_message # Convert the on-the-wire representation to absolute indexes header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids) self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block) @@ -418,7 +416,7 @@ class CompactBlocksTest(BitcoinTestFramework): def build_block_with_transactions(self, node, utxo, num_transactions): block = self.build_block_on_tip(node) - for i in range(num_transactions): + for _ in range(num_transactions): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b'')) tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) @@ -627,7 +625,7 @@ class CompactBlocksTest(BitcoinTestFramework): # Test that requesting old compactblocks doesn't work. MAX_CMPCTBLOCK_DEPTH = 5 new_blocks = [] - for i in range(MAX_CMPCTBLOCK_DEPTH + 1): + for _ in range(MAX_CMPCTBLOCK_DEPTH + 1): test_node.clear_block_announcement() new_blocks.append(node.generate(1)[0]) wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock) @@ -691,10 +689,9 @@ class CompactBlocksTest(BitcoinTestFramework): node.submitblock(ToHex(block)) for l in listeners: - wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock) + wait_until(lambda: "cmpctblock" in l.last_message, timeout=30, lock=mininode_lock) with mininode_lock: for l in listeners: - assert "cmpctblock" in l.last_message l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256() assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256) diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py index 73afe9adc4..0b51d8f4bb 100755 --- a/test/functional/p2p_feefilter.py +++ b/test/functional/p2p_feefilter.py @@ -19,7 +19,7 @@ def hashToHex(hash): # Wait up to 60 secs to see if the testnode has received all the expected invs def allInvsMatch(invsExpected, testnode): - for x in range(60): + for _ in range(60): with mininode_lock: if (sorted(invsExpected) == sorted(testnode.txinvs)): return True @@ -91,7 +91,7 @@ class FeeFilterTest(BitcoinTestFramework): # Test that invs are received by test connection for all txs at # feerate of .2 sat/byte node1.settxfee(Decimal("0.00000200")) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] assert allInvsMatch(txids, conn) conn.clear_invs() @@ -100,14 +100,14 @@ class FeeFilterTest(BitcoinTestFramework): # Test that txs are still being received by test connection (paying .15 sat/byte) node1.settxfee(Decimal("0.00000150")) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] assert allInvsMatch(txids, conn) conn.clear_invs() # Change tx fee rate to .1 sat/byte and test they are no longer received # by the test connection node1.settxfee(Decimal("0.00000100")) - [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] self.sync_mempools() # must be sure node 0 has received all txs # Send one transaction from node0 that should be received, so that we @@ -124,7 +124,7 @@ class FeeFilterTest(BitcoinTestFramework): # Remove fee filter and check that txs are received again conn.send_and_ping(msg_feefilter(0)) - txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] + txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)] assert allInvsMatch(txids, conn) conn.clear_invs() diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py index 741da3be31..ce3856fc95 100755 --- a/test/functional/p2p_filter.py +++ b/test/functional/p2p_filter.py @@ -218,7 +218,6 @@ class FilterTest(BitcoinTestFramework): # Add peer but do not send version yet filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False) # Send version with fRelay=False - filter_peer_without_nrelay.wait_until(lambda: filter_peer_without_nrelay.is_connected, timeout=10) version_without_fRelay = msg_version() version_without_fRelay.nRelay = 0 filter_peer_without_nrelay.send_message(version_without_fRelay) diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py new file mode 100755 index 0000000000..c9278eab92 --- /dev/null +++ b/test/functional/p2p_getaddr_caching.py @@ -0,0 +1,109 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test addr response caching""" + +import time +from test_framework.messages import ( + CAddress, + NODE_NETWORK, + NODE_WITNESS, + msg_addr, + msg_getaddr, +) +from test_framework.mininode import ( + P2PInterface, + mininode_lock +) +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, +) + +MAX_ADDR_TO_SEND = 1000 + +def gen_addrs(n): + addrs = [] + for i in range(n): + addr = CAddress() + addr.time = int(time.time()) + addr.nServices = NODE_NETWORK | NODE_WITNESS + # Use first octets to occupy different AddrMan buckets + first_octet = i >> 8 + second_octet = i % 256 + addr.ip = "{}.{}.1.1".format(first_octet, second_octet) + addr.port = 8333 + addrs.append(addr) + return addrs + +class AddrReceiver(P2PInterface): + + def __init__(self): + super().__init__() + self.received_addrs = None + + def get_received_addrs(self): + with mininode_lock: + return self.received_addrs + + def on_addr(self, message): + self.received_addrs = [] + for addr in message.addrs: + self.received_addrs.append(addr.ip) + + def addr_received(self): + return self.received_addrs is not None + + +class AddrTest(BitcoinTestFramework): + def set_test_params(self): + self.setup_clean_chain = False + self.num_nodes = 1 + + def run_test(self): + self.log.info('Create connection that sends and requests addr messages') + addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) + + msg_send_addrs = msg_addr() + self.log.info('Fill peer AddrMan with a lot of records') + # Since these addrs are sent from the same source, not all of them will be stored, + # because we allocate a limited number of AddrMan buckets per addr source. + total_addrs = 10000 + addrs = gen_addrs(total_addrs) + for i in range(int(total_addrs/MAX_ADDR_TO_SEND)): + msg_send_addrs.addrs = addrs[i * MAX_ADDR_TO_SEND:(i + 1) * MAX_ADDR_TO_SEND] + addr_source.send_and_ping(msg_send_addrs) + + responses = [] + self.log.info('Send many addr requests within short time to receive same response') + N = 5 + cur_mock_time = int(time.time()) + for i in range(N): + addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) + addr_receiver.send_and_ping(msg_getaddr()) + # Trigger response + cur_mock_time += 5 * 60 + self.nodes[0].setmocktime(cur_mock_time) + addr_receiver.wait_until(addr_receiver.addr_received) + responses.append(addr_receiver.get_received_addrs()) + for response in responses[1:]: + assert_equal(response, responses[0]) + assert(len(response) < MAX_ADDR_TO_SEND) + + cur_mock_time += 3 * 24 * 60 * 60 + self.nodes[0].setmocktime(cur_mock_time) + + self.log.info('After time passed, see a new response to addr request') + last_addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) + last_addr_receiver.send_and_ping(msg_getaddr()) + # Trigger response + cur_mock_time += 5 * 60 + self.nodes[0].setmocktime(cur_mock_time) + last_addr_receiver.wait_until(last_addr_receiver.addr_received) + # new response is different + assert(set(responses[0]) != set(last_addr_receiver.get_received_addrs())) + + +if __name__ == '__main__': + AddrTest().main() diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index fe6e236fc4..2fc5245241 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -63,16 +63,12 @@ class CLazyNode(P2PInterface): def on_getblocktxn(self, message): self.bad_message(message) def on_blocktxn(self, message): self.bad_message(message) + # Node that never sends a version. We'll use this to send a bunch of messages # anyway, and eventually get disconnected. class CNodeNoVersionMisbehavior(CLazyNode): - # Send enough veracks without a message to reach the peer discouragement - # threshold. This should get us disconnected. NOTE: implementation-specific - # test; update if our discouragement policy for peer misbehavior changes. - def on_open(self): - super().on_open() - for _ in range(DISCOURAGEMENT_THRESHOLD): - self.send_message(msg_verack()) + pass + # Node that never sends a version. This one just sits idle and hopes to receive # any message (it shouldn't!) @@ -80,6 +76,7 @@ class CNodeNoVersionIdle(CLazyNode): def __init__(self): super().__init__() + # Node that sends a version but not a verack. class CNodeNoVerackIdle(CLazyNode): def __init__(self): @@ -114,6 +111,11 @@ class P2PLeakTest(BitcoinTestFramework): no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False, wait_for_verack=False) no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle(), wait_for_verack=False) + # Send enough veracks without a message to reach the peer discouragement + # threshold. This should get us disconnected. + for _ in range(DISCOURAGEMENT_THRESHOLD): + no_version_disconnect_node.send_message(msg_verack()) + # Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the # verack, since we never sent one no_verack_idlenode.wait_for_verack() @@ -153,7 +155,6 @@ class P2PLeakTest(BitcoinTestFramework): p2p_old_node = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False) old_version_msg = msg_version() old_version_msg.nVersion = 31799 - wait_until(lambda: p2p_old_node.is_connected) with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']): p2p_old_node.send_message(old_version_msg) p2p_old_node.wait_for_disconnect() diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py index 32a795e345..254352c816 100755 --- a/test/functional/p2p_permissions.py +++ b/test/functional/p2p_permissions.py @@ -96,7 +96,7 @@ class P2PPermissionsTests(BitcoinTestFramework): self.checkpermission( # all permission added ["-whitelist=all@127.0.0.1"], - ["forcerelay", "noban", "mempool", "bloomfilter", "relay", "download"], + ["forcerelay", "noban", "mempool", "bloomfilter", "relay", "download", "addr"], False) self.stop_node(1) diff --git a/test/functional/p2p_ping.py b/test/functional/p2p_ping.py index e00af88cc4..5f5fd3e104 100755 --- a/test/functional/p2p_ping.py +++ b/test/functional/p2p_ping.py @@ -7,13 +7,8 @@ import time -from test_framework.messages import ( - msg_pong, -) -from test_framework.mininode import ( - P2PInterface, - wait_until, -) +from test_framework.messages import msg_pong +from test_framework.mininode import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal @@ -78,7 +73,7 @@ class PingPongTest(BitcoinTestFramework): with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']): # mock time PING_INTERVAL ahead to trigger node into sending a ping self.mock_forward(PING_INTERVAL + 1) - wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) self.mock_forward(9) # Send the wrong pong no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce - 1)) @@ -93,27 +88,27 @@ class PingPongTest(BitcoinTestFramework): assert 'ping' not in no_pong_node.last_message # mock time PING_INTERVAL ahead to trigger node into sending a ping self.mock_forward(PING_INTERVAL + 1) - wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) ping_delay = 29 self.mock_forward(ping_delay) - wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce)) self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None) self.log.info('Check that minping is decreased after a fast roundtrip') # mock time PING_INTERVAL ahead to trigger node into sending a ping self.mock_forward(PING_INTERVAL + 1) - wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) ping_delay = 9 self.mock_forward(ping_delay) - wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce)) self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None) self.log.info('Check that peer is disconnected after ping timeout') assert 'ping' not in no_pong_node.last_message self.nodes[0].ping() - wait_until(lambda: 'ping' in no_pong_node.last_message) + no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message) with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']): self.mock_forward(20 * 60 + 1) time.sleep(4) # peertimeout + 1 diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 9915b844d1..564e49f3d8 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -174,6 +174,9 @@ class TestP2PConn(P2PInterface): self.last_wtxidrelay.append(message) def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True, use_wtxid=False): + if success: + # sanity check + assert (self.wtxidrelay and use_wtxid) or (not self.wtxidrelay and not use_wtxid) with mininode_lock: self.last_message.pop("getdata", None) if use_wtxid: @@ -259,6 +262,8 @@ class SegWitTest(BitcoinTestFramework): self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK) # self.std_node is for testing node1 (fRequireStandard=true) self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS) + # self.std_wtx_node is for testing node1 with wtxid relay + self.std_wtx_node = self.nodes[1].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=NODE_NETWORK | NODE_WITNESS) assert self.test_node.nServices & NODE_WITNESS != 0 @@ -942,7 +947,7 @@ class SegWitTest(BitcoinTestFramework): parent_tx = CTransaction() parent_tx.vin.append(CTxIn(prevout, b"")) child_value = int(value / NUM_OUTPUTS) - for i in range(NUM_OUTPUTS): + for _ in range(NUM_OUTPUTS): parent_tx.vout.append(CTxOut(child_value, script_pubkey)) parent_tx.vout[0].nValue -= 50000 assert parent_tx.vout[0].nValue > 0 @@ -952,7 +957,7 @@ class SegWitTest(BitcoinTestFramework): for i in range(NUM_OUTPUTS): child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b"")) child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))] - for i in range(NUM_OUTPUTS): + for _ in range(NUM_OUTPUTS): child_tx.wit.vtxinwit.append(CTxInWitness()) child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program] child_tx.rehash() @@ -1199,7 +1204,7 @@ class SegWitTest(BitcoinTestFramework): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) value = self.utxo[0].nValue - for i in range(10): + for _ in range(10): tx.vout.append(CTxOut(int(value / 10), script_pubkey)) tx.vout[0].nValue -= 1000 assert tx.vout[0].nValue >= 0 @@ -1319,9 +1324,14 @@ class SegWitTest(BitcoinTestFramework): tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2] tx3.rehash() - # Node will not be blinded to the transaction + # Node will not be blinded to the transaction, requesting it any number of times + # if it is being announced via txid relay. + # Node will be blinded to the transaction via wtxid, however. self.std_node.announce_tx_and_wait_for_getdata(tx3) + self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True) test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size') + self.std_node.announce_tx_and_wait_for_getdata(tx3) + self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True, success=False) # Remove witness stuffing, instead add extra witness push on stack tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])) @@ -1372,7 +1382,7 @@ class SegWitTest(BitcoinTestFramework): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS - for i in range(NUM_SEGWIT_VERSIONS): + for _ in range(NUM_SEGWIT_VERSIONS): tx.vout.append(CTxOut(split_value, CScript([OP_TRUE]))) tx.rehash() block = self.build_next_block() @@ -1418,7 +1428,7 @@ class SegWitTest(BitcoinTestFramework): temp_utxo.pop() # last entry in temp_utxo was the output we just spent temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue)) - # Spend everything in temp_utxo back to an OP_TRUE output. + # Spend everything in temp_utxo into an segwit v1 output. tx3 = CTransaction() total_value = 0 for i in temp_utxo: @@ -1426,8 +1436,16 @@ class SegWitTest(BitcoinTestFramework): tx3.wit.vtxinwit.append(CTxInWitness()) total_value += i.nValue tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program] - tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE]))) + tx3.vout.append(CTxOut(total_value - 1000, script_pubkey)) tx3.rehash() + + # First we test this transaction against fRequireStandard=true node + # making sure the txid is added to the reject filter + self.std_node.announce_tx_and_wait_for_getdata(tx3) + test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs") + # Now the node will no longer ask for getdata of this transaction when advertised by same txid + self.std_node.announce_tx_and_wait_for_getdata(tx3, timeout=5, success=False) + # Spending a higher version witness output is not allowed by policy, # even with fRequireStandard=false. test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades") @@ -1646,7 +1664,7 @@ class SegWitTest(BitcoinTestFramework): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b"")) split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS - for i in range(NUM_SIGHASH_TESTS): + for _ in range(NUM_SIGHASH_TESTS): tx.vout.append(CTxOut(split_value, script_pubkey)) tx.wit.vtxinwit.append(CTxInWitness()) sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key) @@ -1676,7 +1694,7 @@ class SegWitTest(BitcoinTestFramework): tx.wit.vtxinwit.append(CTxInWitness()) total_value += temp_utxos[i].nValue split_value = total_value // num_outputs - for i in range(num_outputs): + for _ in range(num_outputs): tx.vout.append(CTxOut(split_value, script_pubkey)) for i in range(num_inputs): # Now try to sign each input, using a random hashtype. @@ -1974,7 +1992,7 @@ class SegWitTest(BitcoinTestFramework): split_value = self.utxo[0].nValue // outputs tx = CTransaction() tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")) - for i in range(outputs): + for _ in range(outputs): tx.vout.append(CTxOut(split_value, script_pubkey)) tx.vout[-2].scriptPubKey = script_pubkey_toomany tx.vout[-1].scriptPubKey = script_pubkey_justright @@ -2060,7 +2078,7 @@ class SegWitTest(BitcoinTestFramework): if (len(tx.wit.vtxinwit) != len(tx.vin)): # vtxinwit must have the same length as vin tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)] - for i in range(len(tx.wit.vtxinwit), len(tx.vin)): + for _ in range(len(tx.wit.vtxinwit), len(tx.vin)): tx.wit.vtxinwit.append(CTxInWitness()) r += tx.wit.serialize() r += struct.pack("<I", tx.nLockTime) @@ -2133,17 +2151,17 @@ class SegWitTest(BitcoinTestFramework): # Send tx2 through; it's an orphan so won't be accepted with mininode_lock: - self.tx_node.last_message.pop("getdata", None) - test_transaction_acceptance(self.nodes[0], self.tx_node, tx2, with_witness=True, accepted=False) + self.wtx_node.last_message.pop("getdata", None) + test_transaction_acceptance(self.nodes[0], self.wtx_node, tx2, with_witness=True, accepted=False) - # Expect a request for parent (tx) due to use of non-WTX peer - self.tx_node.wait_for_getdata([tx.sha256], 60) + # Expect a request for parent (tx) by txid despite use of WTX peer + self.wtx_node.wait_for_getdata([tx.sha256], 60) with mininode_lock: - lgd = self.tx_node.lastgetdata[:] + lgd = self.wtx_node.lastgetdata[:] assert_equal(lgd, [CInv(MSG_TX|MSG_WITNESS_FLAG, tx.sha256)]) # Send tx through - test_transaction_acceptance(self.nodes[0], self.tx_node, tx, with_witness=False, accepted=True) + test_transaction_acceptance(self.nodes[0], self.wtx_node, tx, with_witness=False, accepted=True) # Check tx2 is there now assert_equal(tx2.hash in self.nodes[0].getrawmempool(), True) diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py index 481b1c1841..126a46bd53 100755 --- a/test/functional/p2p_sendheaders.py +++ b/test/functional/p2p_sendheaders.py @@ -328,7 +328,7 @@ class SendHeadersTest(BitcoinTestFramework): for j in range(2): self.log.debug("Part 2.{}.{}: starting...".format(i, j)) blocks = [] - for b in range(i + 1): + for _ in range(i + 1): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -443,7 +443,7 @@ class SendHeadersTest(BitcoinTestFramework): # Create 2 blocks. Send the blocks, then send the headers. blocks = [] - for b in range(2): + for _ in range(2): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -461,7 +461,7 @@ class SendHeadersTest(BitcoinTestFramework): # This time, direct fetch should work blocks = [] - for b in range(3): + for _ in range(3): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -482,7 +482,7 @@ class SendHeadersTest(BitcoinTestFramework): blocks = [] # Create extra blocks for later - for b in range(20): + for _ in range(20): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -529,7 +529,7 @@ class SendHeadersTest(BitcoinTestFramework): test_node.last_message.pop("getdata", None) blocks = [] # Create two more blocks. - for j in range(2): + for _ in range(2): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 @@ -550,7 +550,7 @@ class SendHeadersTest(BitcoinTestFramework): # Now we test that if we repeatedly don't send connecting headers, we # don't go into an infinite loop trying to get them to connect. MAX_UNCONNECTING_HEADERS = 10 - for j in range(MAX_UNCONNECTING_HEADERS + 1): + for _ in range(MAX_UNCONNECTING_HEADERS + 1): blocks.append(create_block(tip, create_coinbase(height), block_time)) blocks[-1].solve() tip = blocks[-1].sha256 diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 2527edc135..3ea1c6e5e7 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -162,7 +162,7 @@ class TxDownloadTest(BitcoinTestFramework): # Setup the p2p connections self.peers = [] for node in self.nodes: - for i in range(NUM_INBOUND): + for _ in range(NUM_INBOUND): self.peers.append(node.add_p2p_connection(TestP2PConn())) self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND)) diff --git a/test/functional/rpc_deprecated.py b/test/functional/rpc_deprecated.py index 9a21998d11..b71854d234 100755 --- a/test/functional/rpc_deprecated.py +++ b/test/functional/rpc_deprecated.py @@ -4,13 +4,13 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test deprecation of RPC calls.""" from test_framework.test_framework import BitcoinTestFramework -# from test_framework.util import assert_raises_rpc_error +from test_framework.util import assert_raises_rpc_error, find_vout_for_address class DeprecatedRpcTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 self.setup_clean_chain = True - self.extra_args = [[], []] + self.extra_args = [[], ['-deprecatedrpc=bumpfee']] def run_test(self): # This test should be used to verify correct behaviour of deprecated @@ -23,7 +23,38 @@ class DeprecatedRpcTest(BitcoinTestFramework): # self.log.info("Test generate RPC") # assert_raises_rpc_error(-32, 'The wallet generate rpc method is deprecated', self.nodes[0].rpc.generate, 1) # self.nodes[1].generate(1) - self.log.info("No tested deprecated RPC methods") + + if self.is_wallet_compiled(): + self.log.info("Test bumpfee RPC") + self.nodes[0].generate(101) + self.nodes[0].createwallet(wallet_name='nopriv', disable_private_keys=True) + noprivs0 = self.nodes[0].get_wallet_rpc('nopriv') + w0 = self.nodes[0].get_wallet_rpc('') + self.nodes[1].createwallet(wallet_name='nopriv', disable_private_keys=True) + noprivs1 = self.nodes[1].get_wallet_rpc('nopriv') + + address = w0.getnewaddress() + desc = w0.getaddressinfo(address)['desc'] + change_addr = w0.getrawchangeaddress() + change_desc = w0.getaddressinfo(change_addr)['desc'] + txid = w0.sendtoaddress(address=address, amount=10) + vout = find_vout_for_address(w0, txid, address) + self.nodes[0].generate(1) + rawtx = w0.createrawtransaction([{'txid': txid, 'vout': vout}], {w0.getnewaddress(): 5}, 0, True) + rawtx = w0.fundrawtransaction(rawtx, {'changeAddress': change_addr}) + signed_tx = w0.signrawtransactionwithwallet(rawtx['hex'])['hex'] + + noprivs0.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}]) + noprivs1.importmulti([{'desc': desc, 'timestamp': 0}, {'desc': change_desc, 'timestamp': 0, 'internal': True}]) + + txid = w0.sendrawtransaction(signed_tx) + self.sync_all() + + assert_raises_rpc_error(-32, 'Using bumpfee with wallets that have private keys disabled is deprecated. Use psbtbumpfee instead or restart bitcoind with -deprecatedrpc=bumpfee. This functionality will be removed in 0.22', noprivs0.bumpfee, txid) + bumped_psbt = noprivs1.bumpfee(txid) + assert 'psbt' in bumped_psbt + else: + self.log.info("No tested deprecated RPC methods") if __name__ == '__main__': DeprecatedRpcTest().main() diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py index 57c8f511ac..2a0971b808 100755 --- a/test/functional/rpc_fundrawtransaction.py +++ b/test/functional/rpc_fundrawtransaction.py @@ -554,7 +554,7 @@ class RawTransactionsTest(BitcoinTestFramework): self.nodes[1].generate(1) self.sync_all() - for i in range(0,20): + for _ in range(20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() @@ -582,7 +582,7 @@ class RawTransactionsTest(BitcoinTestFramework): self.nodes[1].generate(1) self.sync_all() - for i in range(0,20): + for _ in range(20): self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01) self.nodes[0].generate(1) self.sync_all() diff --git a/test/functional/rpc_generate.py b/test/functional/rpc_generate.py new file mode 100755 index 0000000000..9404f1e25e --- /dev/null +++ b/test/functional/rpc_generate.py @@ -0,0 +1,35 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test generate RPC.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, +) + + +class RPCGenerateTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + + def run_test(self): + message = ( + "generate ( nblocks maxtries ) has been replaced by the -generate " + "cli option. Refer to -help for more information." + ) + + self.log.info("Test rpc generate raises with message to use cli option") + assert_raises_rpc_error(-32601, message, self.nodes[0].rpc.generate) + + self.log.info("Test rpc generate help prints message to use cli option") + assert_equal(message, self.nodes[0].help("generate")) + + self.log.info("Test rpc generate is a hidden command not discoverable in general help") + assert message not in self.nodes[0].help() + + +if __name__ == "__main__": + RPCGenerateTest().main() diff --git a/test/functional/rpc_generateblock.py b/test/functional/rpc_generateblock.py index aa58c0af9d..08ff0fba50 100755 --- a/test/functional/rpc_generateblock.py +++ b/test/functional/rpc_generateblock.py @@ -55,7 +55,7 @@ class GenerateBlockTest(BitcoinTestFramework): node.generatetoaddress(110, address) # Generate some extra mempool transactions to verify they don't get mined - for i in range(10): + for _ in range(10): node.sendtoaddress(address, 0.001) self.log.info('Generate block with txid') diff --git a/test/functional/rpc_misc.py b/test/functional/rpc_misc.py index c8517d719e..cc5a264adb 100755 --- a/test/functional/rpc_misc.py +++ b/test/functional/rpc_misc.py @@ -27,8 +27,8 @@ class RpcMiscTest(BitcoinTestFramework): self.log.info("test CHECK_NONFATAL") assert_raises_rpc_error( -1, - "Internal bug detected: 'request.params.size() != 100'", - lambda: node.echo(*[0] * 100), + 'Internal bug detected: \'request.params[9].get_str() != "trigger_internal_bug"\'', + lambda: node.echo(arg9='trigger_internal_bug'), ) self.log.info("test getmemoryinfo") diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index 3336246c8b..192b60e5d2 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -22,8 +22,6 @@ from test_framework.util import ( from test_framework.mininode import P2PInterface import test_framework.messages from test_framework.messages import ( - CAddress, - msg_addr, NODE_NETWORK, NODE_WITNESS, ) @@ -131,6 +129,13 @@ class NetTest(BitcoinTestFramework): added_nodes = self.nodes[0].getaddednodeinfo(ip_port) assert_equal(len(added_nodes), 1) assert_equal(added_nodes[0]['addednode'], ip_port) + # check that node cannot be added again + assert_raises_rpc_error(-23, "Node already added", self.nodes[0].addnode, node=ip_port, command='add') + # check that node can be removed + self.nodes[0].addnode(node=ip_port, command='remove') + assert_equal(self.nodes[0].getaddednodeinfo(), []) + # check that trying to remove the node again returns an error + assert_raises_rpc_error(-24, "Node could not be removed", self.nodes[0].addnode, node=ip_port, command='remove') # check that a non-existent node returns an error assert_raises_rpc_error(-24, "Node has not been added", self.nodes[0].getaddednodeinfo, '1.1.1.1') @@ -154,30 +159,34 @@ class NetTest(BitcoinTestFramework): def _test_getnodeaddresses(self): self.nodes[0].add_p2p_connection(P2PInterface()) - # send some addresses to the node via the p2p message addr - msg = msg_addr() + # Add some addresses to the Address Manager over RPC. Due to the way + # bucket and bucket position are calculated, some of these addresses + # will collide. imported_addrs = [] - for i in range(256): - a = "123.123.123.{}".format(i) + for i in range(10000): + first_octet = i >> 8 + second_octet = i % 256 + a = "{}.{}.1.1".format(first_octet, second_octet) imported_addrs.append(a) - addr = CAddress() - addr.time = 100000000 - addr.nServices = NODE_NETWORK | NODE_WITNESS - addr.ip = a - addr.port = 8333 - msg.addrs.append(addr) - self.nodes[0].p2p.send_and_ping(msg) - - # obtain addresses via rpc call and check they were ones sent in before - REQUEST_COUNT = 10 - node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT) - assert_equal(len(node_addresses), REQUEST_COUNT) + self.nodes[0].addpeeraddress(a, 8333) + + # Obtain addresses via rpc call and check they were ones sent in before. + # + # Maximum possible addresses in addrman is 10000, although actual + # number will usually be less due to bucket and bucket position + # collisions. + node_addresses = self.nodes[0].getnodeaddresses(0) + assert_greater_than(len(node_addresses), 5000) + assert_greater_than(10000, len(node_addresses)) for a in node_addresses: - assert_greater_than(a["time"], 1527811200) # 1st June 2018 + assert_greater_than(a["time"], 1527811200) # 1st June 2018 assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS) assert a["address"] in imported_addrs assert_equal(a["port"], 8333) + node_addresses = self.nodes[0].getnodeaddresses(1) + assert_equal(len(node_addresses), 1) + assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1) # addrman's size cannot be known reliably after insertion, as hash collisions may occur diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index 4d985dd1b1..f7f23bc8f4 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -430,7 +430,7 @@ class PSBTTest(BitcoinTestFramework): # Check that joining shuffles the inputs and outputs # 10 attempts should be enough to get a shuffled join shuffled = False - for i in range(0, 10): + for _ in range(10): shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2]) shuffled |= joined != shuffled_joined if shuffled: diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py index 3d08202724..704b65c060 100755 --- a/test/functional/rpc_signrawtransaction.py +++ b/test/functional/rpc_signrawtransaction.py @@ -198,10 +198,30 @@ class SignRawTransactionsTest(BitcoinTestFramework): assert_equal(spending_tx_signed['complete'], True) self.nodes[0].sendrawtransaction(spending_tx_signed['hex']) + def OP_1NEGATE_test(self): + self.log.info("Test OP_1NEGATE (0x4f) satisfies BIP62 minimal push standardness rule") + hex_str = ( + "0200000001FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF" + "FFFFFFFF00000000044F024F9CFDFFFFFF01F0B9F5050000000023210277777777" + "77777777777777777777777777777777777777777777777777777777AC66030000" + ) + prev_txs = [ + { + "txid": "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", + "vout": 0, + "scriptPubKey": "A914AE44AB6E9AA0B71F1CD2B453B69340E9BFBAEF6087", + "redeemScript": "4F9C", + "amount": 1, + } + ] + txn = self.nodes[0].signrawtransactionwithwallet(hex_str, prev_txs) + assert txn["complete"] + def run_test(self): self.successful_signing_test() self.script_verification_error_test() self.witness_script_test() + self.OP_1NEGATE_test() self.test_with_lock_outputs() diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 2462a9a6db..5207b563a1 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -53,6 +53,7 @@ NODE_NETWORK = (1 << 0) NODE_GETUTXO = (1 << 1) NODE_BLOOM = (1 << 2) NODE_WITNESS = (1 << 3) +NODE_COMPACT_FILTERS = (1 << 6) NODE_NETWORK_LIMITED = (1 << 10) MSG_TX = 1 @@ -111,7 +112,7 @@ def deser_uint256(f): def ser_uint256(u): rs = b"" - for i in range(8): + for _ in range(8): rs += struct.pack("<I", u & 0xFFFFFFFF) u >>= 32 return rs @@ -134,7 +135,7 @@ def uint256_from_compact(c): def deser_vector(f, c): nit = deser_compact_size(f) r = [] - for i in range(nit): + for _ in range(nit): t = c() t.deserialize(f) r.append(t) @@ -157,7 +158,7 @@ def ser_vector(l, ser_function_name=None): def deser_uint256_vector(f): nit = deser_compact_size(f) r = [] - for i in range(nit): + for _ in range(nit): t = deser_uint256(f) r.append(t) return r @@ -173,7 +174,7 @@ def ser_uint256_vector(l): def deser_string_vector(f): nit = deser_compact_size(f) r = [] - for i in range(nit): + for _ in range(nit): t = deser_string(f) r.append(t) return r @@ -467,7 +468,7 @@ class CTransaction: else: self.vout = deser_vector(f, CTxOut) if flags != 0: - self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))] + self.wit.vtxinwit = [CTxInWitness() for _ in range(len(self.vin))] self.wit.deserialize(f) else: self.wit = CTxWitness() @@ -500,7 +501,7 @@ class CTransaction: if (len(self.wit.vtxinwit) != len(self.vin)): # vtxinwit must have the same length as vin self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)] - for i in range(len(self.wit.vtxinwit), len(self.vin)): + for _ in range(len(self.wit.vtxinwit), len(self.vin)): self.wit.vtxinwit.append(CTxInWitness()) r += self.wit.serialize() r += struct.pack("<I", self.nLockTime) @@ -735,7 +736,7 @@ class P2PHeaderAndShortIDs: self.header.deserialize(f) self.nonce = struct.unpack("<Q", f.read(8))[0] self.shortids_length = deser_compact_size(f) - for i in range(self.shortids_length): + for _ in range(self.shortids_length): # shortids are defined to be 6 bytes in the spec, so append # two zero bytes and read it in as an 8-byte number self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0]) @@ -852,7 +853,7 @@ class BlockTransactionsRequest: def deserialize(self, f): self.blockhash = deser_uint256(f) indexes_length = deser_compact_size(f) - for i in range(indexes_length): + for _ in range(indexes_length): self.indexes.append(deser_compact_size(f)) def serialize(self): diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py index 07811667a8..eaf637fbb8 100755 --- a/test/functional/test_framework/mininode.py +++ b/test/functional/test_framework/mininode.py @@ -388,18 +388,22 @@ class P2PInterface(P2PConnection): # Connection helper methods - def wait_until(self, test_function, timeout=60): + def wait_until(self, test_function_in, *, timeout=60, check_connected=True): + def test_function(): + if check_connected: + assert self.is_connected + return test_function_in() + wait_until(test_function, timeout=timeout, lock=mininode_lock, timeout_factor=self.timeout_factor) def wait_for_disconnect(self, timeout=60): test_function = lambda: not self.is_connected - self.wait_until(test_function, timeout=timeout) + self.wait_until(test_function, timeout=timeout, check_connected=False) # Message receiving helper methods def wait_for_tx(self, txid, timeout=60): def test_function(): - assert self.is_connected if not self.last_message.get('tx'): return False return self.last_message['tx'].tx.rehash() == txid @@ -408,14 +412,12 @@ class P2PInterface(P2PConnection): def wait_for_block(self, blockhash, timeout=60): def test_function(): - assert self.is_connected return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash self.wait_until(test_function, timeout=timeout) def wait_for_header(self, blockhash, timeout=60): def test_function(): - assert self.is_connected last_headers = self.last_message.get('headers') if not last_headers: return False @@ -425,7 +427,6 @@ class P2PInterface(P2PConnection): def wait_for_merkleblock(self, blockhash, timeout=60): def test_function(): - assert self.is_connected last_filtered_block = self.last_message.get('merkleblock') if not last_filtered_block: return False @@ -437,9 +438,7 @@ class P2PInterface(P2PConnection): """Waits for a getdata message. The object hashes in the inventory vector must match the provided hash_list.""" - def test_function(): - assert self.is_connected last_data = self.last_message.get("getdata") if not last_data: return False @@ -454,9 +453,7 @@ class P2PInterface(P2PConnection): value must be explicitly cleared before calling this method, or this will return immediately with success. TODO: change this method to take a hash value and only return true if the correct block header has been requested.""" - def test_function(): - assert self.is_connected return self.last_message.get("getheaders") self.wait_until(test_function, timeout=timeout) @@ -467,7 +464,6 @@ class P2PInterface(P2PConnection): raise NotImplementedError("wait_for_inv() will only verify the first inv object") def test_function(): - assert self.is_connected return self.last_message.get("inv") and \ self.last_message["inv"].inv[0].type == expected_inv[0].type and \ self.last_message["inv"].inv[0].hash == expected_inv[0].hash @@ -491,7 +487,6 @@ class P2PInterface(P2PConnection): self.send_message(msg_ping(nonce=self.ping_counter)) def test_function(): - assert self.is_connected return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter self.wait_until(test_function, timeout=timeout) @@ -609,7 +604,11 @@ class P2PDataStore(P2PInterface): self.send_message(msg_block(block=b)) else: self.send_message(msg_headers([CBlockHeader(block) for block in blocks])) - self.wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout) + self.wait_until( + lambda: blocks[-1].sha256 in self.getdata_requests, + timeout=timeout, + check_connected=success, + ) if expect_disconnect: self.wait_for_disconnect(timeout=timeout) @@ -677,6 +676,6 @@ class P2PTxInvStore(P2PInterface): The mempool should mark unbroadcast=False for these transactions. """ # Wait until invs have been received (and getdatas sent) for each txid. - self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout) + self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout) # Flush messages and wait for the getdatas to be processed self.sync_with_ping() diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index cc5f8307d3..5e35ba0fce 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -646,7 +646,7 @@ def LegacySignatureHash(script, txTo, inIdx, hashtype): tmp = txtmp.vout[outIdx] txtmp.vout = [] - for i in range(outIdx): + for _ in range(outIdx): txtmp.vout.append(CTxOut(-1)) txtmp.vout.append(tmp) diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index 66bb2c89b5..8f0d45c7f9 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -524,6 +524,7 @@ class TestNode(): p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)() self.p2ps.append(p2p_conn) + p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False) if wait_for_verack: # Wait for the node to send us the version and verack p2p_conn.wait_for_verack() @@ -637,7 +638,7 @@ class TestNodeCLI(): raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr) try: return json.loads(cli_stdout, parse_float=decimal.Decimal) - except json.JSONDecodeError: + except (json.JSONDecodeError, decimal.InvalidOperation): return cli_stdout.rstrip("\n") class RPCOverloadWrapper(): diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 506057f1fa..3362b41209 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -529,7 +529,7 @@ def create_confirmed_utxos(fee, node, count): addr2 = node.getnewaddress() if iterations <= 0: return utxos - for i in range(iterations): + for _ in range(iterations): t = utxos.pop() inputs = [] inputs.append({"txid": t["txid"], "vout": t["vout"]}) @@ -556,7 +556,7 @@ def gen_return_txouts(): # So we have big transactions (and therefore can't fit very many into each block) # create one script_pubkey script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes - for i in range(512): + for _ in range(512): script_pubkey = script_pubkey + "01" # concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change txouts = [] @@ -564,7 +564,7 @@ def gen_return_txouts(): txout = CTxOut() txout.nValue = 0 txout.scriptPubKey = hex_str_to_bytes(script_pubkey) - for k in range(128): + for _ in range(128): txouts.append(txout) return txouts diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index 95c2b7c5ec..67b9050123 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -159,6 +159,7 @@ BASE_SCRIPTS = [ 'rpc_deprecated.py', 'wallet_disable.py', 'p2p_addr_relay.py', + 'p2p_getaddr_caching.py', 'p2p_getdata.py', 'rpc_net.py', 'wallet_keypool.py', @@ -193,6 +194,7 @@ BASE_SCRIPTS = [ 'p2p_eviction.py', 'rpc_signmessage.py', 'rpc_generateblock.py', + 'rpc_generate.py', 'wallet_balance.py', 'feature_nulldummy.py', 'mempool_accept.py', @@ -712,14 +714,16 @@ class RPCCoverage(): Return a set of currently untested RPC commands. """ - # This is shared from `test/functional/test-framework/coverage.py` + # This is shared from `test/functional/test_framework/coverage.py` reference_filename = 'rpc_interface.txt' coverage_file_prefix = 'coverage.' coverage_ref_filename = os.path.join(self.dir, reference_filename) coverage_filenames = set() all_cmds = set() - covered_cmds = set() + # Consider RPC generate covered, because it is overloaded in + # test_framework/test_node.py and not seen by the coverage check. + covered_cmds = set({'generate'}) if not os.path.isfile(coverage_ref_filename): raise RuntimeError("No coverage reference found") diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py index 9dd91b2495..4766355335 100755 --- a/test/functional/wallet_backup.py +++ b/test/functional/wallet_backup.py @@ -129,7 +129,7 @@ class WalletBackupTest(BitcoinTestFramework): self.log.info("Creating transactions") # Five rounds of sending each other transactions. - for i in range(5): + for _ in range(5): self.do_one_round() self.log.info("Backing up") @@ -142,7 +142,7 @@ class WalletBackupTest(BitcoinTestFramework): self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump')) self.log.info("More transactions") - for i in range(5): + for _ in range(5): self.do_one_round() # Generate 101 more blocks, so any fees paid mature diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index 81382d94ad..d9a8b58a84 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -569,7 +569,7 @@ class WalletTest(BitcoinTestFramework): # So we should be able to generate exactly chainlimit txs for each original output sending_addr = self.nodes[1].getnewaddress() txid_list = [] - for i in range(chainlimit * 2): + for _ in range(chainlimit * 2): txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001'))) assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2) assert_equal(len(txid_list), chainlimit * 2) diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py index 72c85b8832..53496084ef 100755 --- a/test/functional/wallet_bumpfee.py +++ b/test/functional/wallet_bumpfee.py @@ -62,7 +62,7 @@ class BumpFeeTest(BitcoinTestFramework): self.log.info("Mining blocks...") peer_node.generate(110) self.sync_all() - for i in range(25): + for _ in range(25): peer_node.sendtoaddress(rbf_node_address, 0.001) self.sync_all() peer_node.generate(1) @@ -123,13 +123,19 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address): self.sync_mempools((rbf_node, peer_node)) assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool() if mode == "fee_rate": + bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": NORMAL}) bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL}) else: + bumped_psbt = rbf_node.psbtbumpfee(rbfid) bumped_tx = rbf_node.bumpfee(rbfid) assert_equal(bumped_tx["errors"], []) assert bumped_tx["fee"] > -rbftx["fee"] assert_equal(bumped_tx["origfee"], -rbftx["fee"]) assert "psbt" not in bumped_tx + assert_equal(bumped_psbt["errors"], []) + assert bumped_psbt["fee"] > -rbftx["fee"] + assert_equal(bumped_psbt["origfee"], -rbftx["fee"]) + assert "psbt" in bumped_psbt # check that bumped_tx propagates, original tx was evicted and has a wallet conflict self.sync_mempools((rbf_node, peer_node)) assert bumped_tx["txid"] in rbf_node.getrawmempool() @@ -391,7 +397,7 @@ def test_watchonly_psbt(self, peer_node, rbf_node, dest_address): assert_equal(len(watcher.decodepsbt(psbt)["tx"]["vin"]), 1) # Bump fee, obnoxiously high to add additional watchonly input - bumped_psbt = watcher.bumpfee(original_txid, {"fee_rate": HIGH}) + bumped_psbt = watcher.psbtbumpfee(original_txid, {"fee_rate": HIGH}) assert_greater_than(len(watcher.decodepsbt(bumped_psbt['psbt'])["tx"]["vin"]), 1) assert "txid" not in bumped_psbt assert_equal(bumped_psbt["origfee"], -watcher.gettransaction(original_txid)["fee"]) diff --git a/test/functional/wallet_create_tx.py b/test/functional/wallet_create_tx.py index 330de8b0fc..ed9159726a 100755 --- a/test/functional/wallet_create_tx.py +++ b/test/functional/wallet_create_tx.py @@ -45,7 +45,7 @@ class CreateTxWalletTest(BitcoinTestFramework): def test_tx_size_too_large(self): # More than 10kB of outputs, so that we hit -maxtxfee with a high feerate - outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for i in range(400)} + outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for _ in range(400)} raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs) for fee_setting in ['-minrelaytxfee=0.01', '-mintxfee=0.01', '-paytxfee=0.01']: diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py index 289ccf43ec..9c63e8f7d3 100755 --- a/test/functional/wallet_descriptor.py +++ b/test/functional/wallet_descriptor.py @@ -107,7 +107,7 @@ class WalletDescriptorTest(BitcoinTestFramework): assert_equal(info2['desc'], info3['desc']) self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet") - for i in range(0, 500): + for _ in range(500): send_wrpc.getnewaddress() self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet") @@ -120,7 +120,7 @@ class WalletDescriptorTest(BitcoinTestFramework): }]) send_wrpc.walletlock() # Exhaust keypool of 100 - for i in range(0, 100): + for _ in range(100): send_wrpc.getnewaddress(address_type='bech32') # This should now error assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32') diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py index 6bfb468823..06f01ef191 100755 --- a/test/functional/wallet_dump.py +++ b/test/functional/wallet_dump.py @@ -116,7 +116,7 @@ class WalletDumpTest(BitcoinTestFramework): test_addr_count = 10 addrs = [] for address_type in ['legacy', 'p2sh-segwit', 'bech32']: - for i in range(0, test_addr_count): + for _ in range(test_addr_count): addr = self.nodes[0].getnewaddress(address_type=address_type) vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath addrs.append(vaddr) diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py index 9dd55b4ab1..b6fe295127 100755 --- a/test/functional/wallet_groups.py +++ b/test/functional/wallet_groups.py @@ -27,8 +27,8 @@ class WalletGroupTest(BitcoinTestFramework): self.nodes[0].generate(110) # Get some addresses from the two nodes - addr1 = [self.nodes[1].getnewaddress() for i in range(3)] - addr2 = [self.nodes[2].getnewaddress() for i in range(3)] + addr1 = [self.nodes[1].getnewaddress() for _ in range(3)] + addr2 = [self.nodes[2].getnewaddress() for _ in range(3)] addrs = addr1 + addr2 # Send 1 + 0.5 coin to each address @@ -71,7 +71,7 @@ class WalletGroupTest(BitcoinTestFramework): # Fill node2's wallet with 10000 outputs corresponding to the same # scriptPubKey - for i in range(5): + for _ in range(5): raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}]) tx = FromHex(CTransaction(), raw_tx) tx.vin = [] diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py index fb4a1f9792..cff59bd1c1 100755 --- a/test/functional/wallet_labels.py +++ b/test/functional/wallet_labels.py @@ -118,7 +118,7 @@ class WalletLabelsTest(BitcoinTestFramework): if not self.options.descriptors: for label in labels: addresses = [] - for x in range(10): + for _ in range(10): addresses.append(node.getnewaddress()) multisig_address = node.addmultisigaddress(5, addresses, label.name)['address'] label.add_address(multisig_address) diff --git a/test/functional/wallet_listsinceblock.py b/test/functional/wallet_listsinceblock.py index 6d51ca6c93..d4131deabf 100755 --- a/test/functional/wallet_listsinceblock.py +++ b/test/functional/wallet_listsinceblock.py @@ -36,6 +36,7 @@ class ListSinceBlockTest(BitcoinTestFramework): self.test_double_spend() self.test_double_send() self.double_spends_filtered() + self.test_targetconfirmations() def test_no_blockhash(self): self.log.info("Test no blockhash") @@ -74,6 +75,27 @@ class ListSinceBlockTest(BitcoinTestFramework): assert_raises_rpc_error(-8, "blockhash must be hexadecimal string (not 'Z000000000000000000000000000000000000000000000000000000000000000')", self.nodes[0].listsinceblock, "Z000000000000000000000000000000000000000000000000000000000000000") + def test_targetconfirmations(self): + ''' + This tests when the value of target_confirmations exceeds the number of + blocks in the main chain. In this case, the genesis block hash should be + given for the `lastblock` property. If target_confirmations is < 1, then + a -8 invalid parameter error is thrown. + ''' + self.log.info("Test target_confirmations") + blockhash, = self.nodes[2].generate(1) + blockheight = self.nodes[2].getblockheader(blockhash)['height'] + self.sync_all() + + assert_equal( + self.nodes[0].getblockhash(0), + self.nodes[0].listsinceblock(blockhash, blockheight + 1)['lastblock']) + assert_equal( + self.nodes[0].getblockhash(0), + self.nodes[0].listsinceblock(blockhash, blockheight + 1000)['lastblock']) + assert_raises_rpc_error(-8, "Invalid parameter", + self.nodes[0].listsinceblock, blockhash, 0) + def test_reorg(self): ''' `listsinceblock` did not behave correctly when handed a block that was diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index a54396cad3..1872545cdb 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -26,7 +26,7 @@ FEATURE_LATEST = 169900 got_loading_error = False def test_load_unload(node, name): global got_loading_error - for i in range(10): + for _ in range(10): if got_loading_error: return try: diff --git a/test/lint/lint-git-commit-check.sh b/test/lint/lint-git-commit-check.sh index 7cffd267dd..8947f67bf6 100755 --- a/test/lint/lint-git-commit-check.sh +++ b/test/lint/lint-git-commit-check.sh @@ -14,21 +14,22 @@ while getopts "?" opt; do case $opt in ?) echo "Usage: $0 [N]" - echo " TRAVIS_COMMIT_RANGE='<commit range>' $0" + echo " COMMIT_RANGE='<commit range>' $0" echo " $0 -?" echo "Checks unmerged commits, the previous N commits, or a commit range." - echo "TRAVIS_COMMIT_RANGE='47ba2c3...ee50c9e' $0" + echo "COMMIT_RANGE='47ba2c3...ee50c9e' $0" exit ${EXIT_CODE} ;; esac done -if [ -z "${TRAVIS_COMMIT_RANGE}" ]; then - if [ -n "$1" ]; then - TRAVIS_COMMIT_RANGE="HEAD~$1...HEAD" - else - TRAVIS_COMMIT_RANGE="origin/master..HEAD" - fi +if [ -z "${COMMIT_RANGE}" ]; then + if [ -n "$1" ]; then + COMMIT_RANGE="HEAD~$1...HEAD" + else + MERGE_BASE=$(git merge-base HEAD master) + COMMIT_RANGE="$MERGE_BASE..HEAD" + fi fi while IFS= read -r commit_hash || [[ -n "$commit_hash" ]]; do @@ -41,6 +42,6 @@ while IFS= read -r commit_hash || [[ -n "$commit_hash" ]]; do EXIT_CODE=1 fi done < <(git log --format=%B -n 1 "$commit_hash") -done < <(git log "${TRAVIS_COMMIT_RANGE}" --format=%H) +done < <(git log "${COMMIT_RANGE}" --format=%H) exit ${EXIT_CODE} diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh index 611bd4a8c4..fde77aea2d 100755 --- a/test/lint/lint-includes.sh +++ b/test/lint/lint-includes.sh @@ -63,6 +63,7 @@ EXPECTED_BOOST_INCLUDES=( boost/optional.hpp boost/preprocessor/cat.hpp boost/preprocessor/stringize.hpp + boost/process.hpp boost/signals2/connection.hpp boost/signals2/optional_last_value.hpp boost/signals2/signal.hpp |