aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml2
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--build-aux/m4/ax_boost_process.m4121
-rw-r--r--build_msvc/bitcoin_config.h3
-rw-r--r--build_msvc/vcpkg-packages.txt2
-rw-r--r--ci/test/00_setup_env_arm.sh2
-rw-r--r--ci/test/00_setup_env_i686_centos.sh2
-rw-r--r--ci/test/00_setup_env_mac.sh2
-rw-r--r--ci/test/00_setup_env_mac_host.sh2
-rw-r--r--ci/test/00_setup_env_native_asan.sh2
-rw-r--r--ci/test/00_setup_env_native_fuzz.sh2
-rw-r--r--ci/test/00_setup_env_native_multiprocess.sh2
-rw-r--r--ci/test/00_setup_env_native_nowallet.sh2
-rw-r--r--ci/test/00_setup_env_native_qt5.sh2
-rw-r--r--ci/test/00_setup_env_native_tsan.sh2
-rw-r--r--ci/test/00_setup_env_s390x.sh2
-rw-r--r--ci/test/00_setup_env_win64.sh2
-rw-r--r--configure.ac71
-rw-r--r--contrib/gitian-descriptors/gitian-linux.yml72
-rw-r--r--contrib/valgrind.supp1
-rw-r--r--depends/packages/boost.mk2
-rw-r--r--doc/Doxyfile.in2
-rw-r--r--doc/JSON-RPC-interface.md2
-rw-r--r--doc/build-openbsd.md14
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/files.md2
-rw-r--r--doc/tor.md18
-rw-r--r--src/Makefile.test.include3
-rw-r--r--src/chainparams.cpp2
-rw-r--r--src/consensus/validation.h3
-rw-r--r--src/core_write.cpp7
-rw-r--r--src/init.cpp4
-rw-r--r--src/interfaces/node.cpp74
-rw-r--r--src/interfaces/node.h6
-rw-r--r--src/net.cpp160
-rw-r--r--src/net.h86
-rw-r--r--src/net_processing.cpp127
-rw-r--r--src/policy/policy.cpp8
-rw-r--r--src/qt/bitcoin.cpp4
-rw-r--r--src/qt/forms/optionsdialog.ui4
-rw-r--r--src/qt/test/addressbooktests.cpp2
-rw-r--r--src/qt/test/test_main.cpp3
-rw-r--r--src/qt/test/wallettests.cpp3
-rw-r--r--src/random.cpp6
-rw-r--r--src/rpc/net.cpp2
-rw-r--r--src/rpc/rawtransaction.cpp2
-rw-r--r--src/rpc/request.cpp2
-rw-r--r--src/rpc/util.cpp2
-rw-r--r--src/script/descriptor.cpp2
-rw-r--r--src/sync.cpp22
-rw-r--r--src/sync.h14
-rw-r--r--src/test/crypto_tests.cpp2
-rw-r--r--src/test/denialofservice_tests.cpp10
-rw-r--r--src/test/fuzz/process_message.cpp2
-rw-r--r--src/test/fuzz/process_messages.cpp5
-rw-r--r--src/test/net_tests.cpp15
-rw-r--r--src/test/settings_tests.cpp2
-rw-r--r--src/test/sync_tests.cpp6
-rw-r--r--src/test/system_tests.cpp95
-rw-r--r--src/test/util/setup_common.cpp4
-rw-r--r--src/test/util_tests.cpp41
-rw-r--r--src/torcontrol.cpp4
-rw-r--r--src/uint256.cpp6
-rw-r--r--src/util/strencodings.cpp13
-rw-r--r--src/util/strencodings.h27
-rw-r--r--src/util/system.cpp43
-rw-r--r--src/util/system.h12
-rw-r--r--src/validation.cpp9
-rw-r--r--src/wallet/bdb.cpp2
-rw-r--r--src/wallet/rpcdump.cpp2
-rw-r--r--src/wallet/rpcwallet.cpp2
-rwxr-xr-xtest/functional/example_test.py2
-rwxr-xr-xtest/functional/feature_assumevalid.py4
-rwxr-xr-xtest/functional/feature_bip68_sequence.py4
-rwxr-xr-xtest/functional/feature_block.py2
-rwxr-xr-xtest/functional/feature_csv_activation.py12
-rwxr-xr-xtest/functional/feature_dbcrash.py4
-rwxr-xr-xtest/functional/feature_fee_estimation.py6
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py2
-rwxr-xr-xtest/functional/feature_pruning.py2
-rwxr-xr-xtest/functional/feature_rbf.py2
-rwxr-xr-xtest/functional/feature_segwit.py4
-rwxr-xr-xtest/functional/mempool_package_onemore.py2
-rwxr-xr-xtest/functional/mempool_packages.py10
-rwxr-xr-xtest/functional/mempool_persist.py2
-rwxr-xr-xtest/functional/mempool_updatefromblock.py2
-rwxr-xr-xtest/functional/p2p_compactblocks.py17
-rwxr-xr-xtest/functional/p2p_feefilter.py10
-rwxr-xr-xtest/functional/p2p_filter.py5
-rwxr-xr-xtest/functional/p2p_leak.py17
-rwxr-xr-xtest/functional/p2p_segwit.py40
-rwxr-xr-xtest/functional/p2p_sendheaders.py12
-rwxr-xr-xtest/functional/p2p_tx_download.py2
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py4
-rwxr-xr-xtest/functional/rpc_generateblock.py2
-rwxr-xr-xtest/functional/rpc_psbt.py2
-rwxr-xr-xtest/functional/test_framework/messages.py16
-rwxr-xr-xtest/functional/test_framework/mininode.py2
-rw-r--r--test/functional/test_framework/script.py2
-rwxr-xr-xtest/functional/test_framework/test_node.py3
-rw-r--r--test/functional/test_framework/util.py6
-rwxr-xr-xtest/functional/wallet_backup.py4
-rwxr-xr-xtest/functional/wallet_basic.py2
-rwxr-xr-xtest/functional/wallet_bumpfee.py2
-rwxr-xr-xtest/functional/wallet_create_tx.py2
-rwxr-xr-xtest/functional/wallet_descriptor.py4
-rwxr-xr-xtest/functional/wallet_dump.py2
-rwxr-xr-xtest/functional/wallet_groups.py6
-rwxr-xr-xtest/functional/wallet_labels.py2
-rwxr-xr-xtest/functional/wallet_multiwallet.py2
-rwxr-xr-xtest/lint/lint-git-commit-check.sh19
-rwxr-xr-xtest/lint/lint-includes.sh1
112 files changed, 884 insertions, 552 deletions
diff --git a/.travis.yml b/.travis.yml
index f9932cfaca..f1cee7133f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -80,7 +80,7 @@ jobs:
QEMU_USER_CMD=""
- stage: test
- name: 'Win64 [GOAL: deploy] [unit tests, no gui, no functional tests]'
+ name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]'
env: >-
FILE_ENV="./ci/test/00_setup_env_win64.sh"
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 11a0f4bac7..2e11474382 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -158,7 +158,7 @@ the pull request affects. Valid areas as:
Examples:
consensus: Add new opcode for BIP-XXXX OP_CHECKAWESOMESIG
- net: Automatically create hidden service, listen on Tor
+ net: Automatically create onion service, listen on Tor
qt: Add feed bump button
log: Fix typo in log message
diff --git a/build-aux/m4/ax_boost_process.m4 b/build-aux/m4/ax_boost_process.m4
new file mode 100644
index 0000000000..5d20e67464
--- /dev/null
+++ b/build-aux/m4/ax_boost_process.m4
@@ -0,0 +1,121 @@
+# ===========================================================================
+# https://www.gnu.org/software/autoconf-archive/ax_boost_process.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_BOOST_PROCESS
+#
+# DESCRIPTION
+#
+# Test for Process library from the Boost C++ libraries. The macro
+# requires a preceding call to AX_BOOST_BASE. Further documentation is
+# available at <http://randspringer.de/boost/index.html>.
+#
+# This macro calls:
+#
+# AC_SUBST(BOOST_PROCESS_LIB)
+#
+# And sets:
+#
+# HAVE_BOOST_PROCESS
+#
+# LICENSE
+#
+# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de>
+# Copyright (c) 2008 Michael Tindal
+# Copyright (c) 2008 Daniel Casimiro <dan.casimiro@gmail.com>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 2
+
+AC_DEFUN([AX_BOOST_PROCESS],
+[
+ AC_ARG_WITH([boost-process],
+ AS_HELP_STRING([--with-boost-process@<:@=special-lib@:>@],
+ [use the Process library from boost - it is possible to specify a certain library for the linker
+ e.g. --with-boost-process=boost_process-gcc-mt ]),
+ [
+ if test "$withval" = "no"; then
+ want_boost_process="no"
+ elif test "$withval" = "yes"; then
+ want_boost_process="yes"
+ ax_boost_user_process_lib=""
+ else
+ want_boost_process="yes"
+ ax_boost_user_process_lib="$withval"
+ fi
+ ],
+ [want_boost_process="yes"]
+ )
+
+ if test "x$want_boost_process" = "xyes"; then
+ AC_REQUIRE([AC_PROG_CC])
+ AC_REQUIRE([AC_CANONICAL_BUILD])
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
+ export CPPFLAGS
+
+ LDFLAGS_SAVED="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
+ export LDFLAGS
+
+ AC_CACHE_CHECK(whether the Boost::Process library is available,
+ ax_cv_boost_process,
+ [AC_LANG_PUSH([C++])
+ CXXFLAGS_SAVE=$CXXFLAGS
+ CXXFLAGS=
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/process.hpp>]],
+ [[boost::process::child* child = new boost::process::child; delete child;]])],
+ ax_cv_boost_process=yes, ax_cv_boost_process=no)
+ CXXFLAGS=$CXXFLAGS_SAVE
+ AC_LANG_POP([C++])
+ ])
+ if test "x$ax_cv_boost_process" = "xyes"; then
+ AC_SUBST(BOOST_CPPFLAGS)
+
+ AC_DEFINE(HAVE_BOOST_PROCESS,,[define if the Boost::Process library is available])
+ BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'`
+
+ LDFLAGS_SAVE=$LDFLAGS
+ if test "x$ax_boost_user_process_lib" = "x"; then
+ for libextension in `ls -r $BOOSTLIBDIR/libboost_process* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do
+ ax_lib=${libextension}
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+ if test "x$link_process" != "xyes"; then
+ for libextension in `ls -r $BOOSTLIBDIR/boost_process* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do
+ ax_lib=${libextension}
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+ fi
+
+ else
+ for ax_lib in $ax_boost_user_process_lib boost_process-$ax_boost_user_process_lib; do
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+
+ fi
+ if test "x$ax_lib" = "x"; then
+ AC_MSG_ERROR(Could not find a version of the Boost::Process library!)
+ fi
+ if test "x$link_process" = "xno"; then
+ AC_MSG_ERROR(Could not link against $ax_lib !)
+ fi
+ fi
+
+ CPPFLAGS="$CPPFLAGS_SAVED"
+ LDFLAGS="$LDFLAGS_SAVED"
+ fi
+])
diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h
index fbbe1a2156..9d0b50a0b4 100644
--- a/build_msvc/bitcoin_config.h
+++ b/build_msvc/bitcoin_config.h
@@ -47,6 +47,9 @@
/* define if the Boost::Filesystem library is available */
#define HAVE_BOOST_FILESYSTEM /**/
+/* define if the Boost::Process library is available */
+#define HAVE_BOOST_PROCESS /**/
+
/* define if the Boost::System library is available */
#define HAVE_BOOST_SYSTEM /**/
diff --git a/build_msvc/vcpkg-packages.txt b/build_msvc/vcpkg-packages.txt
index 307f295f08..edce8576c3 100644
--- a/build_msvc/vcpkg-packages.txt
+++ b/build_msvc/vcpkg-packages.txt
@@ -1 +1 @@
-berkeleydb boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion \ No newline at end of file
+berkeleydb boost-filesystem boost-multi-index boost-process boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion
diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh
index b70a581532..2e445c126d 100644
--- a/ci/test/00_setup_env_arm.sh
+++ b/ci/test/00_setup_env_arm.sh
@@ -25,4 +25,4 @@ export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
# -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1"
# This could be removed once the ABI change warning does not show up by default
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror"
+export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror --with-boost-process"
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index 5688799f9e..e58003ab19 100644
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -11,5 +11,5 @@ export CONTAINER_NAME=ci_i686_centos_7
export DOCKER_NAME_TAG=centos:7
export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python36-zmq which patch lbzip2 dash"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports"
+export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports --with-boost-process"
export CONFIG_SHELL="/bin/dash"
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index 7ec66eeb4f..b62f1603f4 100644
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -14,4 +14,4 @@ export XCODE_BUILD_ID=11C505
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
-export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror"
+export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process"
diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh
index b8a9ccaae5..5fb127b762 100644
--- a/ci/test/00_setup_env_mac_host.sh
+++ b/ci/test/00_setup_env_mac_host.sh
@@ -10,7 +10,7 @@ export HOST=x86_64-apple-darwin16
export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well)
export PIP_PACKAGES="zmq"
export GOAL="install"
-export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror"
+export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process"
export NO_DEPENDS=1
export OSX_SDK=""
export CCACHE_SIZE=300M
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index d57c673069..5995964f17 100644
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
@@ -12,4 +12,4 @@ export DOCKER_NAME_TAG=ubuntu:20.04
export NO_DEPENDS=1
export TEST_RUNNER_EXTRA="--timeout-factor=4" # Increase timeout because sanitizers slow down
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++"
+export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++ --with-boost-process"
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index 31f437f0e8..a32de4a6b5 100644
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
@@ -14,5 +14,5 @@ export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export RUN_FUZZ_TESTS=true
export GOAL="install"
-export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++"
+export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++ --with-boost-process"
export CCACHE_SIZE=200M
diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh
index 786f0f927f..522a5d9fc2 100644
--- a/ci/test/00_setup_env_native_multiprocess.sh
+++ b/ci/test/00_setup_env_native_multiprocess.sh
@@ -11,5 +11,5 @@ export DOCKER_NAME_TAG=ubuntu:20.04
export PACKAGES="cmake python3"
export DEP_OPTS="MULTIPROCESS=1"
export GOAL="install"
-export BITCOIN_CONFIG=""
+export BITCOIN_CONFIG="--with-boost-process"
export TEST_RUNNER_ENV="BITCOIND=bitcoin-node"
diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh
index 1a0b14b62b..0a09bfe230 100644
--- a/ci/test/00_setup_env_native_nowallet.sh
+++ b/ci/test/00_setup_env_native_nowallet.sh
@@ -11,4 +11,4 @@ export DOCKER_NAME_TAG=ubuntu:16.04 # Use xenial to have one config run the tes
export PACKAGES="python3-zmq clang-3.8 llvm-3.8" # Use clang-3.8 to test C++11 compatibility, see doc/dependencies.md
export DEP_OPTS="NO_WALLET=1"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8"
+export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8 --with-boost-process"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 6e2ff729a2..f9d869b4fd 100644
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -16,4 +16,4 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.1 v0.18.1 v0.19.1"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
+export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process"
diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh
index 5695c43ec3..fc18483425 100644
--- a/ci/test/00_setup_env_native_tsan.sh
+++ b/ci/test/00_setup_env_native_tsan.sh
@@ -12,4 +12,4 @@ export PACKAGES="clang llvm libc++abi-dev libc++-dev python3-zmq"
export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'"
export TEST_RUNNER_EXTRA="--exclude feature_block --timeout-factor=4" # Increase timeout because sanitizers slow down. Low memory on Travis machines, exclude feature_block.
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++'"
+export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++' --with-boost-process"
diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh
index c180d023de..fe330920d0 100644
--- a/ci/test/00_setup_env_s390x.sh
+++ b/ci/test/00_setup_env_s390x.sh
@@ -22,4 +22,4 @@ export DOCKER_NAME_TAG="debian:buster"
export RUN_UNIT_TESTS=true
export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
-export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb"
+export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb --with-boost-process"
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index eb8b870dd6..2b351dff6d 100644
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
@@ -13,4 +13,4 @@ export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64"
export RUN_FUNCTIONAL_TESTS=false
export RUN_SECURITY_TESTS="true"
export GOAL="deploy"
-export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests"
+export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --without-boost-process"
diff --git a/configure.ac b/configure.ac
index f11f2b2059..2381c5dd08 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1180,9 +1180,9 @@ fi
if test x$use_boost = xyes; then
dnl Minimum required Boost version
-define(MINIMUM_REQUIRED_BOOST, 1.47.0)
+define(MINIMUM_REQUIRED_BOOST, 1.58.0)
-dnl Check for boost libs
+dnl Check for Boost libs
AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST])
if test x$want_boost = xno; then
AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]])
@@ -1191,30 +1191,15 @@ AX_BOOST_SYSTEM
AX_BOOST_FILESYSTEM
AX_BOOST_THREAD
+dnl Opt-in to boost-process
+AS_IF([ test x$with_boost_process != x ], [ AX_BOOST_PROCESS ], [ ax_cv_boost_process=no ] )
+
dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic
dnl counter implementations. In 1.63 and later the std::atomic approach is default.
m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro
BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS"
-if test x$use_reduce_exports = xyes; then
- AC_MSG_CHECKING([for working boost reduced exports])
- TEMP_CPPFLAGS="$CPPFLAGS"
- CPPFLAGS="$BOOST_CPPFLAGS $CPPFLAGS"
- AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[
- @%:@include <boost/version.hpp>
- ]], [[
- #if BOOST_VERSION >= 104900
- // Everything is okay
- #else
- # error Boost version is too old
- #endif
- ]])],[
- AC_MSG_RESULT(yes)
- ],[
- AC_MSG_ERROR([boost versions < 1.49 are known to be broken with reduced exports. Use --disable-reduce-exports.])
- ])
- CPPFLAGS="$TEMP_CPPFLAGS"
-fi
+BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB"
fi
if test x$use_reduce_exports = xyes; then
@@ -1228,7 +1213,6 @@ if test x$use_tests = xyes; then
AC_MSG_ERROR(hexdump is required for tests)
fi
-
if test x$use_boost = xyes; then
AX_BOOST_UNIT_TEST_FRAMEWORK
@@ -1254,48 +1238,6 @@ if test x$use_tests = xyes; then
fi
fi
-if test x$use_boost = xyes; then
-
-BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB"
-
-
-dnl If boost (prior to 1.57) was built without c++11, it emulated scoped enums
-dnl using c++98 constructs. Unfortunately, this implementation detail leaked into
-dnl the abi. This was fixed in 1.57.
-
-dnl When building against that installed version using c++11, the headers pick up
-dnl on the native c++11 scoped enum support and enable it, however it will fail to
-dnl link. This can be worked around by disabling c++11 scoped enums if linking will
-dnl fail.
-dnl BOOST_NO_SCOPED_ENUMS was changed to BOOST_NO_CXX11_SCOPED_ENUMS in 1.51.
-
-TEMP_LIBS="$LIBS"
-LIBS="$BOOST_LIBS $LIBS"
-TEMP_CPPFLAGS="$CPPFLAGS"
-CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-AC_MSG_CHECKING([for mismatched boost c++11 scoped enums])
-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
- #include <boost/config.hpp>
- #include <boost/version.hpp>
- #if !defined(BOOST_NO_SCOPED_ENUMS) && !defined(BOOST_NO_CXX11_SCOPED_ENUMS) && BOOST_VERSION < 105700
- #define BOOST_NO_SCOPED_ENUMS
- #define BOOST_NO_CXX11_SCOPED_ENUMS
- #define CHECK
- #endif
- #include <boost/filesystem.hpp>
- ]],[[
- #if defined(CHECK)
- boost::filesystem::copy_file("foo", "bar");
- #else
- choke;
- #endif
- ]])],
- [AC_MSG_RESULT(mismatched); BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_SCOPED_ENUMS -DBOOST_NO_CXX11_SCOPED_ENUMS"], [AC_MSG_RESULT(ok)])
-LIBS="$TEMP_LIBS"
-CPPFLAGS="$TEMP_CPPFLAGS"
-
-fi
-
dnl libevent check
if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then
@@ -1683,6 +1625,7 @@ esac
echo
echo "Options used to compile and link:"
+echo " boost process = $ax_cv_boost_process"
echo " multiprocess = $build_multiprocess"
echo " with wallet = $enable_wallet"
echo " with gui / qt = $bitcoin_enable_qt"
diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml
index 0ed1e16f7e..e86ff83798 100644
--- a/contrib/gitian-descriptors/gitian-linux.yml
+++ b/contrib/gitian-descriptors/gitian-linux.yml
@@ -7,31 +7,29 @@ suites:
architectures:
- "amd64"
packages:
-- "curl"
-- "g++-aarch64-linux-gnu"
-- "g++-8-aarch64-linux-gnu"
-- "gcc-8-aarch64-linux-gnu"
-- "binutils-aarch64-linux-gnu"
-- "g++-arm-linux-gnueabihf"
-- "g++-8-arm-linux-gnueabihf"
-- "gcc-8-arm-linux-gnueabihf"
-- "binutils-arm-linux-gnueabihf"
-- "g++-riscv64-linux-gnu"
-- "g++-8-riscv64-linux-gnu"
-- "gcc-8-riscv64-linux-gnu"
-- "binutils-riscv64-linux-gnu"
-- "g++-8-multilib"
-- "gcc-8-multilib"
-- "binutils-gold"
-- "git"
-- "pkg-config"
+# Common dependencies.
- "autoconf"
-- "libtool"
- "automake"
-- "faketime"
+- "binutils"
- "bsdmainutils"
- "ca-certificates"
+- "curl"
+- "faketime"
+- "git"
+- "libtool"
+- "patch"
+- "pkg-config"
- "python3"
+# Cross compilation HOSTS:
+# - arm-linux-gnueabihf
+- "binutils-arm-linux-gnueabihf"
+- "g++-8-arm-linux-gnueabihf"
+# - aarch64-linux-gnu
+- "binutils-aarch64-linux-gnu"
+- "g++-8-aarch64-linux-gnu"
+# - riscv64-linux-gnu
+- "binutils-riscv64-linux-gnu"
+- "g++-8-riscv64-linux-gnu"
remotes:
- "url": "https://github.com/bitcoin/bitcoin.git"
"dir": "bitcoin"
@@ -93,45 +91,11 @@ script: |
create_per-host_faketime_wrappers "2000-01-01 12:00:00"
export PATH=${WRAP_DIR}:${PATH}
- EXTRA_INCLUDES_BASE=$WRAP_DIR/extra_includes
- mkdir -p $EXTRA_INCLUDES_BASE
-
- # x86 needs /usr/include/i386-linux-gnu/asm pointed to /usr/include/x86_64-linux-gnu/asm,
- # but we can't write there. Instead, create a link here and force it to be included in the
- # search paths by wrapping gcc/g++.
-
- mkdir -p $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu
- rm -f $WRAP_DIR/extra_includes/i686-pc-linux-gnu/asm
- ln -s /usr/include/x86_64-linux-gnu/asm $EXTRA_INCLUDES_BASE/i686-pc-linux-gnu/asm
-
- for prog in gcc g++; do
- rm -f ${WRAP_DIR}/${prog}
- cat << EOF > ${WRAP_DIR}/${prog}
- #!/usr/bin/env bash
- REAL="$(which -a ${prog}-8 | grep -v ${WRAP_DIR}/${prog} | head -1)"
- for var in "\$@"
- do
- if [ "\$var" = "-m32" ]; then
- export C_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu"
- export CPLUS_INCLUDE_PATH="$EXTRA_INCLUDES_BASE/i686-pc-linux-gnu"
- break
- fi
- done
- \$REAL \$@
- EOF
- chmod +x ${WRAP_DIR}/${prog}
- done
-
cd bitcoin
BASEPREFIX="${PWD}/depends"
# Build dependencies for each host
for i in $HOSTS; do
- EXTRA_INCLUDES="$EXTRA_INCLUDES_BASE/$i"
- if [ -d "$EXTRA_INCLUDES" ]; then
- export HOST_ID_SALT="$EXTRA_INCLUDES"
- fi
make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}"
- unset HOST_ID_SALT
done
# Faketime for binaries
diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp
index d2652119b4..ece02dc24e 100644
--- a/contrib/valgrind.supp
+++ b/contrib/valgrind.supp
@@ -123,7 +123,6 @@
Memcheck:Cond
...
fun:_ZN5boost10filesystem6detail11unique_pathERKNS0_4pathEPNS_6system10error_codeE
- fun:unique_path
}
{
Suppress boost warning
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index 3a7e605b4f..4f6b543aff 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -31,7 +31,9 @@ $(package)_cxxflags_linux=-fPIC
$(package)_cxxflags_android=-fPIC
endef
+# Fix unused variable in boost_process, can be removed after upgrading to 1.72
define $(package)_preprocess_cmds
+ sed -i.old "s/int ret_sig = 0;//" boost/process/detail/posix/wait_group.hpp && \
echo "using $($(package)_toolset_$(host_os)) : : $($(package)_cxx) : <cxxflags>\"$($(package)_cxxflags) $($(package)_cppflags)\" <linkflags>\"$($(package)_ldflags)\" <archiver>\"$($(package)_archiver_$(host_os))\" <striper>\"$(host_STRIP)\" <ranlib>\"$(host_RANLIB)\" <rc>\"$(host_WINDRES)\" : ;" > user-config.jam
endef
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 7e307ab7c8..2f79168212 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -2073,7 +2073,7 @@ INCLUDE_FILE_PATTERNS =
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-PREDEFINED =
+PREDEFINED = HAVE_BOOST_PROCESS
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md
index a0cfe84a3e..40d8e330e2 100644
--- a/doc/JSON-RPC-interface.md
+++ b/doc/JSON-RPC-interface.md
@@ -60,7 +60,7 @@ RPC interface will be abused.
are sent as clear text that can be read by anyone on your network
path. Additionally, the RPC interface has not been hardened to
withstand arbitrary Internet traffic, so changing the above settings
- to expose it to the Internet (even using something like a Tor hidden
+ to expose it to the Internet (even using something like a Tor onion
service) could expose you to unconsidered vulnerabilities. See
`bitcoind -help` for more information about these settings and other
settings described in this document.
diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md
index 53c647ae34..584ee43d48 100644
--- a/doc/build-openbsd.md
+++ b/doc/build-openbsd.md
@@ -1,6 +1,6 @@
OpenBSD build guide
======================
-(updated for OpenBSD 6.4)
+(updated for OpenBSD 6.7)
This guide describes how to build bitcoind and command-line utilities on OpenBSD.
@@ -15,7 +15,7 @@ Run the following as root to install the base dependencies for building:
pkg_add git gmake libevent libtool boost
pkg_add autoconf # (select highest version, e.g. 2.69)
pkg_add automake # (select highest version, e.g. 1.16)
-pkg_add python # (select highest version, e.g. 3.6)
+pkg_add python # (select highest version, e.g. 3.8)
git clone https://github.com/bitcoin/bitcoin.git
```
@@ -23,10 +23,10 @@ git clone https://github.com/bitcoin/bitcoin.git
See [dependencies.md](dependencies.md) for a complete overview.
**Important**: From OpenBSD 6.2 onwards a C++11-supporting clang compiler is
-part of the base image, and while building it is necessary to make sure that this
-compiler is used and not ancient g++ 4.2.1. This is done by appending
-`CC=cc CXX=c++` to configuration commands. Mixing different compilers
-within the same executable will result in linker errors.
+part of the base image, and while building it is necessary to make sure that
+this compiler is used and not ancient g++ 4.2.1. This is done by appending
+`CC=cc CC_FOR_BUILD=cc CXX=c++` to configuration commands. Mixing different
+compilers within the same executable will result in errors.
### Building BerkeleyDB
@@ -77,7 +77,7 @@ To configure with wallet:
To configure without wallet:
```bash
-./configure --disable-wallet --with-gui=no CC=cc CXX=c++ MAKE=gmake
+./configure --disable-wallet --with-gui=no CC=cc CC_FOR_BUILD=cc CXX=c++ MAKE=gmake
```
Build and run the tests:
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 0cb5311e8b..92dea65309 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -6,7 +6,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| Dependency | Version used | Minimum required | CVEs | Shared | [Bundled Qt library](https://doc.qt.io/qt-5/configure-options.html#third-party-libraries) |
| --- | --- | --- | --- | --- | --- |
| Berkeley DB | [4.8.30](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.x | No | | |
-| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.47.0](https://github.com/bitcoin/bitcoin/pull/8920) | No | | |
+| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.58.0](https://github.com/bitcoin/bitcoin/pull/19667) | No | | |
| Clang | | [3.3+](https://releases.llvm.org/download.html) (C++11 support) | | | |
| Expat | [2.2.7](https://libexpat.github.io/) | | No | Yes | |
| fontconfig | [2.12.1](https://www.freedesktop.org/software/fontconfig/release/) | | No | Yes | |
diff --git a/doc/files.md b/doc/files.md
index 5475826329..52e094a60b 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -56,7 +56,7 @@ Subdirectory | File(s) | Description
`./` | `fee_estimates.dat` | Stores statistics used to estimate minimum transaction fees and priorities required for confirmation
`./` | `guisettings.ini.bak` | Backup of former [GUI settings](#gui-settings) after `-resetguisettings` option is used
`./` | `mempool.dat` | Dump of the mempool's transactions
-`./` | `onion_private_key` | Cached Tor hidden service private key for `-listenonion` option
+`./` | `onion_private_key` | Cached Tor onion service private key for `-listenonion` option
`./` | `peers.dat` | Peer IP address database (custom format)
`./` | `settings.json` | Read-write settings set through GUI or RPC interfaces, augmenting manual settings from [bitcoin.conf](bitcoin-conf.md). File is created automatically if read-write settings storage is not disabled with `-nosettings` option. Path can be specified with `-settings` option
`./` | `.cookie` | Session RPC authentication cookie; if used, created at start and deleted on shutdown; can be specified by `-rpccookiefile` option
diff --git a/doc/tor.md b/doc/tor.md
index 2c54e32f84..17807856e5 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -1,6 +1,6 @@
# TOR SUPPORT IN BITCOIN
-It is possible to run Bitcoin Core as a Tor hidden service, and connect to such services.
+It is possible to run Bitcoin Core as a Tor onion service, and connect to such services.
The following directions assume you have a Tor proxy running on port 9050. Many distributions default to having a SOCKS proxy listening on port 9050, but others may not. In particular, the Tor Browser Bundle defaults to listening on port 9150. See [Tor Project FAQ:TBBSocksPort](https://www.torproject.org/docs/faq.html.en#TBBSocksPort) for how to properly
configure Tor.
@@ -14,12 +14,12 @@ outgoing connections, but more is possible.
-proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy
server will be used to try to reach .onion addresses as well.
- -onion=ip:port Set the proxy server to use for Tor hidden services. You do not
+ -onion=ip:port Set the proxy server to use for Tor onion services. You do not
need to set this if it's the same as -proxy. You can use -noonion
- to explicitly disable access to hidden services.
+ to explicitly disable access to onion services.
-listen When using -proxy, listening is disabled by default. If you want
- to run a hidden service (see next section), you'll need to enable
+ to run an onion service (see next section), you'll need to enable
it explicitly.
-connect=X When behind a Tor proxy, you can specify .onion addresses instead
@@ -94,11 +94,11 @@ for normal IPv4/IPv6 communication, use:
## 3. Automatically listen on Tor
Starting with Tor version 0.2.7.1 it is possible, through Tor's control socket
-API, to create and destroy 'ephemeral' hidden services programmatically.
+API, to create and destroy 'ephemeral' onion services programmatically.
Bitcoin Core has been updated to make use of this.
This means that if Tor is running (and proper authentication has been configured),
-Bitcoin Core automatically creates a hidden service to listen on. This will positively
+Bitcoin Core automatically creates an onion service to listen on. This will positively
affect the number of available .onion nodes.
This new feature is enabled by default if Bitcoin Core is listening (`-listen`), and
@@ -110,7 +110,7 @@ Connecting to Tor's control socket API requires one of two authentication method
configured. It also requires the control socket to be enabled, e.g. put `ControlPort 9051`
in `torrc` config file. For cookie authentication the user running bitcoind must have read
access to the `CookieAuthFile` specified in Tor configuration. In some cases this is
-preconfigured and the creation of a hidden service is automatic. If permission problems
+preconfigured and the creation of an onion service is automatic. If permission problems
are seen with `-debug=tor` they can be resolved by adding both the user running Tor and
the user running bitcoind to the same group and setting permissions appropriately. On
Debian-based systems the user running bitcoind can be added to the debian-tor group,
@@ -127,8 +127,8 @@ in the tor configuration file. The hashed password can be obtained with the comm
## 4. Privacy recommendations
-- Do not add anything but Bitcoin Core ports to the hidden service created in section 2.
- If you run a web service too, create a new hidden service for that.
+- Do not add anything but Bitcoin Core ports to the onion service created in section 2.
+ If you run a web service too, create a new onion service for that.
Otherwise it is trivial to link them, which may reduce privacy. Hidden
services created automatically (as in section 3) always have only one port
open.
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index b961f8d5b9..c3e46c0def 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -265,6 +265,7 @@ BITCOIN_TESTS =\
test/skiplist_tests.cpp \
test/streams_tests.cpp \
test/sync_tests.cpp \
+ test/system_tests.cpp \
test/util_threadnames_tests.cpp \
test/timedata_tests.cpp \
test/torcontrol_tests.cpp \
@@ -1207,7 +1208,7 @@ nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES)
$(BITCOIN_TESTS): $(GENERATED_TEST_FILES)
-CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log)
+CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno test/fuzz/*.gcda test/fuzz/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log)
CLEANFILES += $(CLEAN_BITCOIN_TEST)
diff --git a/src/chainparams.cpp b/src/chainparams.cpp
index a7c9e33f07..ffd2076c9a 100644
--- a/src/chainparams.cpp
+++ b/src/chainparams.cpp
@@ -110,7 +110,7 @@ public:
// Note that of those which support the service bits prefix, most only support a subset of
// possible options.
- // This is fine at runtime as we'll fall back to using them as a oneshot if they don't support the
+ // This is fine at runtime as we'll fall back to using them as an addrfetch if they don't support the
// service bits we want, but we should get them updated to support all service bits wanted by any
// release ASAP to avoid it where possible.
vSeeds.emplace_back("seed.bitcoin.sipa.be"); // Pieter Wuille, only supports x1, x5, x9, and xd
diff --git a/src/consensus/validation.h b/src/consensus/validation.h
index 8de7a8f2d8..2a93a090d6 100644
--- a/src/consensus/validation.h
+++ b/src/consensus/validation.h
@@ -26,7 +26,8 @@ enum class TxValidationResult {
* is uninteresting.
*/
TX_RECENT_CONSENSUS_CHANGE,
- TX_NOT_STANDARD, //!< didn't meet our local policy rules
+ TX_INPUTS_NOT_STANDARD, //!< inputs (covered by txid) failed policy rules
+ TX_NOT_STANDARD, //!< otherwise didn't meet our local policy rules
TX_MISSING_INPUTS, //!< transaction was missing some of its inputs
TX_PREMATURE_SPEND, //!< transaction spends a coinbase too early, or violates locktime/sequence locks
/**
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 34cfeecc6f..f9d918cb6d 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -48,13 +48,14 @@ std::string FormatScript(const CScript& script)
}
}
if (vch.size() > 0) {
- ret += strprintf("0x%x 0x%x ", HexStr(it2, it - vch.size()), HexStr(it - vch.size(), it));
+ ret += strprintf("0x%x 0x%x ", HexStr(std::vector<uint8_t>(it2, it - vch.size())),
+ HexStr(std::vector<uint8_t>(it - vch.size(), it)));
} else {
- ret += strprintf("0x%x ", HexStr(it2, it));
+ ret += strprintf("0x%x ", HexStr(std::vector<uint8_t>(it2, it)));
}
continue;
}
- ret += strprintf("0x%x ", HexStr(it2, script.end()));
+ ret += strprintf("0x%x ", HexStr(std::vector<uint8_t>(it2, script.end())));
break;
}
return ret.substr(0, ret.size() - 1);
diff --git a/src/init.cpp b/src/init.cpp
index 6cca21f375..08944b79a5 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -442,13 +442,13 @@ void SetupServerArgs(NodeContext& node)
argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- argsman.AddArg("-listenonion", strprintf("Automatically create Tor hidden service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor hidden services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp
index 33f0dac263..969767b90f 100644
--- a/src/interfaces/node.cpp
+++ b/src/interfaces/node.cpp
@@ -56,6 +56,7 @@ namespace {
class NodeImpl : public Node
{
public:
+ NodeImpl(NodeContext* context) { setContext(context); }
void initError(const bilingual_str& message) override { InitError(message); }
bool parseParameters(int argc, const char* const argv[], std::string& error) override
{
@@ -81,13 +82,13 @@ public:
}
bool appInitMain() override
{
- m_context.chain = MakeChain(m_context);
- return AppInitMain(m_context_ref, m_context);
+ m_context->chain = MakeChain(*m_context);
+ return AppInitMain(m_context_ref, *m_context);
}
void appShutdown() override
{
- Interrupt(m_context);
- Shutdown(m_context);
+ Interrupt(*m_context);
+ Shutdown(*m_context);
}
void startShutdown() override
{
@@ -108,19 +109,19 @@ public:
StopMapPort();
}
}
- void setupServerArgs() override { return SetupServerArgs(m_context); }
+ void setupServerArgs() override { return SetupServerArgs(*m_context); }
bool getProxy(Network net, proxyType& proxy_info) override { return GetProxy(net, proxy_info); }
size_t getNodeCount(CConnman::NumConnections flags) override
{
- return m_context.connman ? m_context.connman->GetNodeCount(flags) : 0;
+ return m_context->connman ? m_context->connman->GetNodeCount(flags) : 0;
}
bool getNodesStats(NodesStats& stats) override
{
stats.clear();
- if (m_context.connman) {
+ if (m_context->connman) {
std::vector<CNodeStats> stats_temp;
- m_context.connman->GetNodeStats(stats_temp);
+ m_context->connman->GetNodeStats(stats_temp);
stats.reserve(stats_temp.size());
for (auto& node_stats_temp : stats_temp) {
@@ -141,46 +142,46 @@ public:
}
bool getBanned(banmap_t& banmap) override
{
- if (m_context.banman) {
- m_context.banman->GetBanned(banmap);
+ if (m_context->banman) {
+ m_context->banman->GetBanned(banmap);
return true;
}
return false;
}
bool ban(const CNetAddr& net_addr, int64_t ban_time_offset) override
{
- if (m_context.banman) {
- m_context.banman->Ban(net_addr, ban_time_offset);
+ if (m_context->banman) {
+ m_context->banman->Ban(net_addr, ban_time_offset);
return true;
}
return false;
}
bool unban(const CSubNet& ip) override
{
- if (m_context.banman) {
- m_context.banman->Unban(ip);
+ if (m_context->banman) {
+ m_context->banman->Unban(ip);
return true;
}
return false;
}
bool disconnectByAddress(const CNetAddr& net_addr) override
{
- if (m_context.connman) {
- return m_context.connman->DisconnectNode(net_addr);
+ if (m_context->connman) {
+ return m_context->connman->DisconnectNode(net_addr);
}
return false;
}
bool disconnectById(NodeId id) override
{
- if (m_context.connman) {
- return m_context.connman->DisconnectNode(id);
+ if (m_context->connman) {
+ return m_context->connman->DisconnectNode(id);
}
return false;
}
- int64_t getTotalBytesRecv() override { return m_context.connman ? m_context.connman->GetTotalBytesRecv() : 0; }
- int64_t getTotalBytesSent() override { return m_context.connman ? m_context.connman->GetTotalBytesSent() : 0; }
- size_t getMempoolSize() override { return m_context.mempool ? m_context.mempool->size() : 0; }
- size_t getMempoolDynamicUsage() override { return m_context.mempool ? m_context.mempool->DynamicMemoryUsage() : 0; }
+ int64_t getTotalBytesRecv() override { return m_context->connman ? m_context->connman->GetTotalBytesRecv() : 0; }
+ int64_t getTotalBytesSent() override { return m_context->connman ? m_context->connman->GetTotalBytesSent() : 0; }
+ size_t getMempoolSize() override { return m_context->mempool ? m_context->mempool->size() : 0; }
+ size_t getMempoolDynamicUsage() override { return m_context->mempool ? m_context->mempool->DynamicMemoryUsage() : 0; }
bool getHeaderTip(int& height, int64_t& block_time) override
{
LOCK(::cs_main);
@@ -223,11 +224,11 @@ public:
bool getImporting() override { return ::fImporting; }
void setNetworkActive(bool active) override
{
- if (m_context.connman) {
- m_context.connman->SetNetworkActive(active);
+ if (m_context->connman) {
+ m_context->connman->SetNetworkActive(active);
}
}
- bool getNetworkActive() override { return m_context.connman && m_context.connman->GetNetworkActive(); }
+ bool getNetworkActive() override { return m_context->connman && m_context->connman->GetNetworkActive(); }
CFeeRate estimateSmartFee(int num_blocks, bool conservative, int* returned_target = nullptr) override
{
FeeCalculation fee_calc;
@@ -269,7 +270,7 @@ public:
std::vector<std::unique_ptr<Wallet>> getWallets() override
{
std::vector<std::unique_ptr<Wallet>> wallets;
- for (auto& client : m_context.chain_clients) {
+ for (auto& client : m_context->chain_clients) {
auto client_wallets = client->getWallets();
std::move(client_wallets.begin(), client_wallets.end(), std::back_inserter(wallets));
}
@@ -277,12 +278,12 @@ public:
}
std::unique_ptr<Wallet> loadWallet(const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings) override
{
- return MakeWallet(LoadWallet(*m_context.chain, name, error, warnings));
+ return MakeWallet(LoadWallet(*m_context->chain, name, error, warnings));
}
std::unique_ptr<Wallet> createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings, WalletCreationStatus& status) override
{
std::shared_ptr<CWallet> wallet;
- status = CreateWallet(*m_context.chain, passphrase, wallet_creation_flags, name, error, warnings, wallet);
+ status = CreateWallet(*m_context->chain, passphrase, wallet_creation_flags, name, error, warnings, wallet);
return MakeWallet(wallet);
}
std::unique_ptr<Handler> handleInitMessage(InitMessageFn fn) override
@@ -336,13 +337,22 @@ public:
/* verification progress is unused when a header was received */ 0);
}));
}
- NodeContext* context() override { return &m_context; }
- NodeContext m_context;
- util::Ref m_context_ref{m_context};
+ NodeContext* context() override { return m_context; }
+ void setContext(NodeContext* context) override
+ {
+ m_context = context;
+ if (context) {
+ m_context_ref.Set(*context);
+ } else {
+ m_context_ref.Clear();
+ }
+ }
+ NodeContext* m_context{nullptr};
+ util::Ref m_context_ref;
};
} // namespace
-std::unique_ptr<Node> MakeNode() { return MakeUnique<NodeImpl>(); }
+std::unique_ptr<Node> MakeNode(NodeContext* context) { return MakeUnique<NodeImpl>(context); }
} // namespace interfaces
diff --git a/src/interfaces/node.h b/src/interfaces/node.h
index a9680c42b5..cd3cfe487d 100644
--- a/src/interfaces/node.h
+++ b/src/interfaces/node.h
@@ -268,12 +268,14 @@ public:
std::function<void(SynchronizationState, interfaces::BlockTip tip, double verification_progress)>;
virtual std::unique_ptr<Handler> handleNotifyHeaderTip(NotifyHeaderTipFn fn) = 0;
- //! Return pointer to internal chain interface, useful for testing.
+ //! Get and set internal node context. Useful for testing, but not
+ //! accessible across processes.
virtual NodeContext* context() { return nullptr; }
+ virtual void setContext(NodeContext* context) { }
};
//! Return implementation of Node interface.
-std::unique_ptr<Node> MakeNode();
+std::unique_ptr<Node> MakeNode(NodeContext* context = nullptr);
//! Block tip (could be a header or not, depends on the subscribed signal).
struct BlockTip {
diff --git a/src/net.cpp b/src/net.cpp
index 3fd3f19b5c..6c1980735c 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -105,10 +105,10 @@ std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost);
static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {};
std::string strSubVersion;
-void CConnman::AddOneShot(const std::string& strDest)
+void CConnman::AddAddrFetch(const std::string& strDest)
{
- LOCK(cs_vOneShots);
- vOneShots.push_back(strDest);
+ LOCK(m_addr_fetches_mutex);
+ m_addr_fetches.push_back(strDest);
}
uint16_t GetListenPort()
@@ -346,7 +346,7 @@ bool CConnman::CheckIncomingNonce(uint64_t nonce)
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (!pnode->fSuccessfullyConnected && !pnode->fInbound && pnode->GetLocalNonce() == nonce)
+ if (!pnode->fSuccessfullyConnected && !pnode->IsInboundConn() && pnode->GetLocalNonce() == nonce)
return false;
}
return true;
@@ -368,8 +368,10 @@ static CAddress GetBindAddress(SOCKET sock)
return addr_bind;
}
-CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only)
+CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type)
{
+ assert(conn_type != ConnectionType::INBOUND);
+
if (pszDest == nullptr) {
if (IsLocal(addrConnect))
return nullptr;
@@ -432,7 +434,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
if (hSocket == INVALID_SOCKET) {
return nullptr;
}
- connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, manual_connection);
+ connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, conn_type == ConnectionType::MANUAL);
}
if (!proxyConnectionFailed) {
// If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to
@@ -459,7 +461,7 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo
NodeId id = GetNewNodeId();
uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize();
CAddress addr_bind = GetBindAddress(hSocket);
- CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", false, block_relay_only);
+ CNode* pnode = new CNode(id, nLocalServices, GetBestHeight(), hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type);
pnode->AddRef();
// We're making a new connection, harvest entropy from the time (and our peer count)
@@ -536,8 +538,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
LOCK(cs_SubVer);
X(cleanSubVer);
}
- X(fInbound);
- X(m_manual_connection);
+ stats.fInbound = IsInboundConn();
+ stats.m_manual_connection = IsManualConn();
X(nStartingHeight);
{
LOCK(cs_vSend);
@@ -722,8 +724,8 @@ CNetMessage V1TransportDeserializer::GetMessage(const CMessageHeader::MessageSta
if (!msg.m_valid_checksum) {
LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s\n",
SanitizeString(msg.m_command), msg.m_message_size,
- HexStr(hash.begin(), hash.begin()+CMessageHeader::CHECKSUM_SIZE),
- HexStr(hdr.pchChecksum, hdr.pchChecksum+CMessageHeader::CHECKSUM_SIZE));
+ HexStr(Span<uint8_t>(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE)),
+ HexStr(hdr.pchChecksum));
}
// store receive time
@@ -872,7 +874,7 @@ bool CConnman::AttemptToEvictConnection()
for (const CNode* node : vNodes) {
if (node->HasPermission(PF_NOBAN))
continue;
- if (!node->fInbound)
+ if (!node->IsInboundConn())
continue;
if (node->fDisconnect)
continue;
@@ -983,7 +985,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (pnode->fInbound) nInbound++;
+ if (pnode->IsInboundConn()) nInbound++;
}
}
@@ -1048,7 +1050,7 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
if (NetPermissions::HasFlag(permissionFlags, PF_BLOOMFILTER)) {
nodeServices = static_cast<ServiceFlags>(nodeServices | NODE_BLOOM);
}
- CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", true);
+ CNode* pnode = new CNode(id, nodeServices, GetBestHeight(), hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", ConnectionType::INBOUND);
pnode->AddRef();
pnode->m_permissionFlags = permissionFlags;
// If this flag is present, the user probably expect that RPC and QT report it as whitelisted (backward compatibility)
@@ -1646,7 +1648,7 @@ void CConnman::ThreadDNSAddressSeed()
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- nRelevant += pnode->fSuccessfullyConnected && !pnode->fFeeler && !pnode->fOneShot && !pnode->m_manual_connection && !pnode->fInbound;
+ if (pnode->fSuccessfullyConnected && pnode->IsOutboundOrBlockRelayConn()) ++nRelevant;
}
}
if (nRelevant >= 2) {
@@ -1674,7 +1676,7 @@ void CConnman::ThreadDNSAddressSeed()
LogPrintf("Loading addresses from DNS seed %s\n", seed);
if (HaveNameProxy()) {
- AddOneShot(seed);
+ AddAddrFetch(seed);
} else {
std::vector<CNetAddr> vIPs;
std::vector<CAddress> vAdd;
@@ -1696,8 +1698,8 @@ void CConnman::ThreadDNSAddressSeed()
addrman.Add(vAdd, resolveSource);
} else {
// We now avoid directly using results from DNS Seeds which do not support service bit filtering,
- // instead using them as a oneshot to get nodes with our desired service bits.
- AddOneShot(seed);
+ // instead using them as a addrfetch to get nodes with our desired service bits.
+ AddAddrFetch(seed);
}
}
--seeds_right_now;
@@ -1705,17 +1707,6 @@ void CConnman::ThreadDNSAddressSeed()
LogPrintf("%d addresses found from DNS seeds\n", found);
}
-
-
-
-
-
-
-
-
-
-
-
void CConnman::DumpAddresses()
{
int64_t nStart = GetTimeMillis();
@@ -1727,20 +1718,20 @@ void CConnman::DumpAddresses()
addrman.size(), GetTimeMillis() - nStart);
}
-void CConnman::ProcessOneShot()
+void CConnman::ProcessAddrFetch()
{
std::string strDest;
{
- LOCK(cs_vOneShots);
- if (vOneShots.empty())
+ LOCK(m_addr_fetches_mutex);
+ if (m_addr_fetches.empty())
return;
- strDest = vOneShots.front();
- vOneShots.pop_front();
+ strDest = m_addr_fetches.front();
+ m_addr_fetches.pop_front();
}
CAddress addr;
CSemaphoreGrant grant(*semOutbound, true);
if (grant) {
- OpenNetworkConnection(addr, false, &grant, strDest.c_str(), true);
+ OpenNetworkConnection(addr, false, &grant, strDest.c_str(), ConnectionType::ADDR_FETCH);
}
}
@@ -1767,7 +1758,7 @@ int CConnman::GetExtraOutboundCount()
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (!pnode->fInbound && !pnode->m_manual_connection && !pnode->fFeeler && !pnode->fDisconnect && !pnode->fOneShot && pnode->fSuccessfullyConnected) {
+ if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsOutboundOrBlockRelayConn()) {
++nOutbound;
}
}
@@ -1782,11 +1773,11 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
{
for (int64_t nLoop = 0;; nLoop++)
{
- ProcessOneShot();
+ ProcessAddrFetch();
for (const std::string& strAddr : connect)
{
CAddress addr(CService(), NODE_NONE);
- OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), false, false, true);
+ OpenNetworkConnection(addr, false, nullptr, strAddr.c_str(), ConnectionType::MANUAL);
for (int i = 0; i < 10 && i < nLoop; i++)
{
if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
@@ -1805,7 +1796,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL);
while (!interruptNet)
{
- ProcessOneShot();
+ ProcessAddrFetch();
if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
return;
@@ -1838,21 +1829,27 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
int nOutboundFullRelay = 0;
int nOutboundBlockRelay = 0;
std::set<std::vector<unsigned char> > setConnected;
+
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (!pnode->fInbound && !pnode->m_manual_connection) {
- // Netgroups for inbound and addnode peers are not excluded because our goal here
- // is to not use multiple of our limited outbound slots on a single netgroup
- // but inbound and addnode peers do not use our outbound slots. Inbound peers
- // also have the added issue that they're attacker controlled and could be used
- // to prevent us from connecting to particular hosts if we used them here.
- setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap));
- if (pnode->m_tx_relay == nullptr) {
- nOutboundBlockRelay++;
- } else if (!pnode->fFeeler) {
- nOutboundFullRelay++;
- }
+ if (pnode->IsFullOutboundConn()) nOutboundFullRelay++;
+ if (pnode->IsBlockOnlyConn()) nOutboundBlockRelay++;
+
+ // Netgroups for inbound and manual peers are not excluded because our goal here
+ // is to not use multiple of our limited outbound slots on a single netgroup
+ // but inbound and manual peers do not use our outbound slots. Inbound peers
+ // also have the added issue that they could be attacker controlled and used
+ // to prevent us from connecting to particular hosts if we used them here.
+ switch(pnode->m_conn_type){
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ break;
+ case ConnectionType::OUTBOUND:
+ case ConnectionType::BLOCK_RELAY:
+ case ConnectionType::ADDR_FETCH:
+ case ConnectionType::FEELER:
+ setConnected.insert(pnode->addr.GetGroup(addrman.m_asmap));
}
}
}
@@ -1945,14 +1942,24 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
LogPrint(BCLog::NET, "Making feeler connection to %s\n", addrConnect.ToString());
}
- // Open this connection as block-relay-only if we're already at our
- // full-relay capacity, but not yet at our block-relay peer limit.
- // (It should not be possible for fFeeler to be set if we're not
- // also at our block-relay peer limit, but check against that as
- // well for sanity.)
- bool block_relay_only = nOutboundBlockRelay < m_max_outbound_block_relay && !fFeeler && nOutboundFullRelay >= m_max_outbound_full_relay;
+ ConnectionType conn_type;
+ // Determine what type of connection to open. If fFeeler is not
+ // set, open OUTBOUND connections until we meet our full-relay
+ // capacity. Then open BLOCK_RELAY connections until we hit our
+ // block-relay peer limit. Otherwise, default to opening an
+ // OUTBOUND connection.
+ if (fFeeler) {
+ conn_type = ConnectionType::FEELER;
+ } else if (nOutboundFullRelay < m_max_outbound_full_relay) {
+ conn_type = ConnectionType::OUTBOUND;
+ } else if (nOutboundBlockRelay < m_max_outbound_block_relay) {
+ conn_type = ConnectionType::BLOCK_RELAY;
+ } else {
+ // GetTryNewOutboundPeer() is true
+ conn_type = ConnectionType::OUTBOUND;
+ }
- OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, false, fFeeler, false, block_relay_only);
+ OpenNetworkConnection(addrConnect, (int)setConnected.size() >= std::min(nMaxConnections - 1, 2), &grant, nullptr, conn_type);
}
}
}
@@ -1976,11 +1983,11 @@ std::vector<AddedNodeInfo> CConnman::GetAddedNodeInfo()
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
if (pnode->addr.IsValid()) {
- mapConnected[pnode->addr] = pnode->fInbound;
+ mapConnected[pnode->addr] = pnode->IsInboundConn();
}
std::string addrName = pnode->GetAddrName();
if (!addrName.empty()) {
- mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->fInbound, static_cast<const CService&>(pnode->addr));
+ mapConnectedByName[std::move(addrName)] = std::make_pair(pnode->IsInboundConn(), static_cast<const CService&>(pnode->addr));
}
}
}
@@ -2027,7 +2034,7 @@ void CConnman::ThreadOpenAddedConnections()
}
tried = true;
CAddress addr(CService(), NODE_NONE);
- OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), false, false, true);
+ OpenNetworkConnection(addr, false, &grant, info.strAddedNode.c_str(), ConnectionType::MANUAL);
if (!interruptNet.sleep_for(std::chrono::milliseconds(500)))
return;
}
@@ -2039,8 +2046,10 @@ void CConnman::ThreadOpenAddedConnections()
}
// if successful, this moves the passed grant to the constructed node
-void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, bool fOneShot, bool fFeeler, bool manual_connection, bool block_relay_only)
+void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound, const char *pszDest, ConnectionType conn_type)
{
+ assert(conn_type != ConnectionType::INBOUND);
+
//
// Initiate outbound network connection
//
@@ -2058,18 +2067,12 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai
} else if (FindNode(std::string(pszDest)))
return;
- CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, manual_connection, block_relay_only);
+ CNode* pnode = ConnectNode(addrConnect, pszDest, fCountFailure, conn_type);
if (!pnode)
return;
if (grantOutbound)
grantOutbound->MoveTo(pnode->grantOutbound);
- if (fOneShot)
- pnode->fOneShot = true;
- if (fFeeler)
- pnode->fFeeler = true;
- if (manual_connection)
- pnode->m_manual_connection = true;
m_msgproc->InitializeNode(pnode);
{
@@ -2127,11 +2130,6 @@ void CConnman::ThreadMessageHandler()
}
}
-
-
-
-
-
bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, NetPermissionFlags permissions)
{
int nOne = 1;
@@ -2337,7 +2335,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
}
for (const auto& strDest : connOptions.vSeedNodes) {
- AddOneShot(strDest);
+ AddAddrFetch(strDest);
}
if (clientInterface) {
@@ -2390,7 +2388,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
else
threadDNSAddressSeed = std::thread(&TraceThread<std::function<void()> >, "dnsseed", std::function<void()>(std::bind(&CConnman::ThreadDNSAddressSeed, this)));
- // Initiate outbound connections from -addnode
+ // Initiate manual connections
threadOpenAddedConnections = std::thread(&TraceThread<std::function<void()> >, "addcon", std::function<void()>(std::bind(&CConnman::ThreadOpenAddedConnections, this)));
if (connOptions.m_use_addrman_outgoing && !connOptions.m_specified_outgoing.empty()) {
@@ -2581,7 +2579,7 @@ size_t CConnman::GetNodeCount(NumConnections flags)
int nNum = 0;
for (const auto& pnode : vNodes) {
- if (flags & (pnode->fInbound ? CONNECTIONS_IN : CONNECTIONS_OUT)) {
+ if (flags & (pnode->IsInboundConn() ? CONNECTIONS_IN : CONNECTIONS_OUT)) {
nNum++;
}
}
@@ -2765,26 +2763,26 @@ int CConnman::GetBestHeight() const
unsigned int CConnman::GetReceiveFloodSize() const { return nReceiveFloodSize; }
-CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, bool fInboundIn, bool block_relay_only)
+CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in)
: nTimeConnected(GetSystemTimeInSeconds()),
addr(addrIn),
addrBind(addrBindIn),
- fInbound(fInboundIn),
nKeyedNetGroup(nKeyedNetGroupIn),
// Don't relay addr messages to peers that we connect to as block-relay-only
// peers (to prevent adversaries from inferring these links from addr
// traffic).
- m_addr_known{block_relay_only ? nullptr : MakeUnique<CRollingBloomFilter>(5000, 0.001)},
id(idIn),
nLocalHostNonce(nLocalHostNonceIn),
+ m_conn_type(conn_type_in),
nLocalServices(nLocalServicesIn),
nMyStartingHeight(nMyStartingHeightIn)
{
hSocket = hSocketIn;
addrName = addrNameIn == "" ? addr.ToStringIPPort() : addrNameIn;
hashContinue = uint256();
- if (!block_relay_only) {
+ if (conn_type_in != ConnectionType::BLOCK_RELAY) {
m_tx_relay = MakeUnique<TxRelay>();
+ m_addr_known = MakeUnique<CRollingBloomFilter>(5000, 0.001);
}
for (const std::string &msg : getAllNetMessageTypes())
diff --git a/src/net.h b/src/net.h
index fa2299f012..7a8abb401d 100644
--- a/src/net.h
+++ b/src/net.h
@@ -114,6 +114,17 @@ struct CSerializedNetMsg
std::string m_type;
};
+/** Different types of connections to a peer. This enum encapsulates the
+ * information we have available at the time of opening or accepting the
+ * connection. Aside from INBOUND, all types are initiated by us. */
+enum class ConnectionType {
+ INBOUND, /**< peer initiated connections */
+ OUTBOUND, /**< full relay connections (blocks, addrs, txns) made automatically. Addresses selected from AddrMan. */
+ MANUAL, /**< connections to addresses added via addnode or the connect command line argument */
+ FEELER, /**< short lived connections used to test address validity */
+ BLOCK_RELAY, /**< only relay blocks to these automatic outbound connections. Addresses selected from AddrMan. */
+ ADDR_FETCH, /**< short lived connections used to solicit addrs when starting the node without a populated AddrMan */
+};
class NetEventsInterface;
class CConnman
@@ -198,7 +209,7 @@ public:
bool GetNetworkActive() const { return fNetworkActive; };
bool GetUseAddrmanOutgoing() const { return m_use_addrman_outgoing; };
void SetNetworkActive(bool active);
- void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, bool fOneShot = false, bool fFeeler = false, bool manual_connection = false, bool block_relay_only = false);
+ void OpenNetworkConnection(const CAddress& addrConnect, bool fCountFailure, CSemaphoreGrant *grantOutbound = nullptr, const char *strDest = nullptr, ConnectionType conn_type = ConnectionType::OUTBOUND);
bool CheckIncomingNonce(uint64_t nonce);
bool ForNode(NodeId id, std::function<bool(CNode* pnode)> func);
@@ -348,8 +359,8 @@ private:
bool Bind(const CService& addr, unsigned int flags, NetPermissionFlags permissions);
bool InitBinds(const std::vector<CService>& binds, const std::vector<NetWhitebindPermissions>& whiteBinds);
void ThreadOpenAddedConnections();
- void AddOneShot(const std::string& strDest);
- void ProcessOneShot();
+ void AddAddrFetch(const std::string& strDest);
+ void ProcessAddrFetch();
void ThreadOpenConnections(std::vector<std::string> connect);
void ThreadMessageHandler();
void AcceptConnection(const ListenSocket& hListenSocket);
@@ -370,7 +381,7 @@ private:
CNode* FindNode(const CService& addr);
bool AttemptToEvictConnection();
- CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, bool manual_connection, bool block_relay_only);
+ CNode* ConnectNode(CAddress addrConnect, const char *pszDest, bool fCountFailure, ConnectionType conn_type);
void AddWhitelistPermissionFlags(NetPermissionFlags& flags, const CNetAddr &addr) const;
void DeleteNode(CNode* pnode);
@@ -413,8 +424,8 @@ private:
std::atomic<bool> fNetworkActive{true};
bool fAddressesInitialized{false};
CAddrMan addrman;
- std::deque<std::string> vOneShots GUARDED_BY(cs_vOneShots);
- RecursiveMutex cs_vOneShots;
+ std::deque<std::string> m_addr_fetches GUARDED_BY(m_addr_fetches_mutex);
+ RecursiveMutex m_addr_fetches_mutex;
std::vector<std::string> vAddedNodes GUARDED_BY(cs_vAddedNodes);
RecursiveMutex cs_vAddedNodes;
std::vector<CNode*> vNodes GUARDED_BY(cs_vNodes);
@@ -795,12 +806,8 @@ public:
}
// This boolean is unusued in actual processing, only present for backward compatibility at RPC/QT level
bool m_legacyWhitelisted{false};
- bool fFeeler{false}; // If true this node is being used as a short lived feeler.
- bool fOneShot{false};
- bool m_manual_connection{false};
bool fClient{false}; // set by version message
bool m_limited_node{false}; //after BIP159, set by version message
- const bool fInbound;
std::atomic_bool fSuccessfullyConnected{false};
// Setting fDisconnect to true will cause the node to be disconnected the
// next time DisconnectNodes() runs
@@ -813,6 +820,60 @@ public:
std::atomic_bool fPauseRecv{false};
std::atomic_bool fPauseSend{false};
+ bool IsOutboundOrBlockRelayConn() const {
+ switch(m_conn_type) {
+ case ConnectionType::OUTBOUND:
+ case ConnectionType::BLOCK_RELAY:
+ return true;
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ case ConnectionType::ADDR_FETCH:
+ case ConnectionType::FEELER:
+ return false;
+ }
+
+ assert(false);
+ }
+
+ bool IsFullOutboundConn() const {
+ return m_conn_type == ConnectionType::OUTBOUND;
+ }
+
+ bool IsManualConn() const {
+ return m_conn_type == ConnectionType::MANUAL;
+ }
+
+ bool IsBlockOnlyConn() const {
+ return m_conn_type == ConnectionType::BLOCK_RELAY;
+ }
+
+ bool IsFeelerConn() const {
+ return m_conn_type == ConnectionType::FEELER;
+ }
+
+ bool IsAddrFetchConn() const {
+ return m_conn_type == ConnectionType::ADDR_FETCH;
+ }
+
+ bool IsInboundConn() const {
+ return m_conn_type == ConnectionType::INBOUND;
+ }
+
+ bool ExpectServicesFromConn() const {
+ switch(m_conn_type) {
+ case ConnectionType::INBOUND:
+ case ConnectionType::MANUAL:
+ case ConnectionType::FEELER:
+ return false;
+ case ConnectionType::OUTBOUND:
+ case ConnectionType::BLOCK_RELAY:
+ case ConnectionType::ADDR_FETCH:
+ return true;
+ }
+
+ assert(false);
+ }
+
protected:
mapMsgCmdSize mapSendBytesPerMsgCmd;
mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv);
@@ -823,7 +884,7 @@ public:
// flood relay
std::vector<CAddress> vAddrToSend;
- const std::unique_ptr<CRollingBloomFilter> m_addr_known;
+ std::unique_ptr<CRollingBloomFilter> m_addr_known = nullptr;
bool fGetAddr{false};
std::chrono::microseconds m_next_addr_send GUARDED_BY(cs_sendProcessing){0};
std::chrono::microseconds m_next_local_addr_send GUARDED_BY(cs_sendProcessing){0};
@@ -887,7 +948,7 @@ public:
std::set<uint256> orphan_work_set;
- CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn = "", bool fInboundIn = false, bool block_relay_only = false);
+ CNode(NodeId id, ServiceFlags nLocalServicesIn, int nMyStartingHeightIn, SOCKET hSocketIn, const CAddress &addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress &addrBindIn, const std::string &addrNameIn, ConnectionType conn_type_in);
~CNode();
CNode(const CNode&) = delete;
CNode& operator=(const CNode&) = delete;
@@ -895,6 +956,7 @@ public:
private:
const NodeId id;
const uint64_t nLocalHostNonce;
+ const ConnectionType m_conn_type;
//! Services offered to this peer.
//!
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 1530da8d7f..3e06f1fff0 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -189,7 +189,7 @@ namespace {
* million to make it highly unlikely for users to have issues with this
* filter.
*
- * We only need to add wtxids to this filter. For non-segwit
+ * We typically only add wtxids to this filter. For non-segwit
* transactions, the txid == wtxid, so this only prevents us from
* re-downloading non-segwit transactions when communicating with
* non-wtxidrelay peers -- which is important for avoiding malleation
@@ -198,6 +198,12 @@ namespace {
* the reject filter store wtxids is exactly what we want to avoid
* redownload of a rejected transaction.
*
+ * In cases where we can tell that a segwit transaction will fail
+ * validation no matter the witness, we may add the txid of such
+ * transaction to the filter as well. This can be helpful when
+ * communicating with txid-relay peers or if we were to otherwise fetch a
+ * transaction via txid (eg in our orphan handling).
+ *
* Memory used: 1.3 MB
*/
std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
@@ -475,7 +481,7 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS
nPreferredDownload -= state->fPreferredDownload;
// Whether this node should be marked as a preferred download node.
- state->fPreferredDownload = (!node.fInbound || node.HasPermission(PF_NOBAN)) && !node.fOneShot && !node.fClient;
+ state->fPreferredDownload = (!node.IsInboundConn() || node.HasPermission(PF_NOBAN)) && !node.IsAddrFetchConn() && !node.fClient;
nPreferredDownload += state->fPreferredDownload;
}
@@ -829,22 +835,15 @@ void UpdateLastBlockAnnounceTime(NodeId node, int64_t time_in_seconds)
if (state) state->m_last_block_announcement = time_in_seconds;
}
-// Returns true for outbound peers, excluding manual connections, feelers, and
-// one-shots.
-static bool IsOutboundDisconnectionCandidate(const CNode& node)
-{
- return !(node.fInbound || node.m_manual_connection || node.fFeeler || node.fOneShot);
-}
-
void PeerLogicValidation::InitializeNode(CNode *pnode) {
CAddress addr = pnode->addr;
std::string addrName = pnode->GetAddrName();
NodeId nodeid = pnode->GetId();
{
LOCK(cs_main);
- mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->fInbound, pnode->m_manual_connection));
+ mapNodeState.emplace_hint(mapNodeState.end(), std::piecewise_construct, std::forward_as_tuple(nodeid), std::forward_as_tuple(addr, std::move(addrName), pnode->IsInboundConn(), pnode->IsManualConn()));
}
- if(!pnode->fInbound)
+ if(!pnode->IsInboundConn())
PushNodeVersion(*pnode, *connman, GetTime());
}
@@ -1163,6 +1162,7 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state,
}
// Conflicting (but not necessarily invalid) data or different policy:
case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
+ case TxValidationResult::TX_INPUTS_NOT_STANDARD:
case TxValidationResult::TX_NOT_STANDARD:
case TxValidationResult::TX_MISSING_INPUTS:
case TxValidationResult::TX_PREMATURE_SPEND:
@@ -1736,16 +1736,27 @@ void static ProcessGetData(CNode& pfrom, const CChainParams& chainparams, CConnm
connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx));
mempool.RemoveUnbroadcastTx(tx->GetHash());
// As we're going to send tx, make sure its unconfirmed parents are made requestable.
- for (const auto& txin : tx->vin) {
- auto txinfo = mempool.info(txin.prevout.hash);
- if (txinfo.tx && txinfo.m_time > now - UNCONDITIONAL_RELAY_DELAY) {
- // Relaying a transaction with a recent but unconfirmed parent.
- if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(txin.prevout.hash))) {
- LOCK(cs_main);
- State(pfrom.GetId())->m_recently_announced_invs.insert(txin.prevout.hash);
+ std::vector<uint256> parent_ids_to_add;
+ {
+ LOCK(mempool.cs);
+ auto txiter = mempool.GetIter(tx->GetHash());
+ if (txiter) {
+ const CTxMemPool::setEntries& parents = mempool.GetMemPoolParents(*txiter);
+ parent_ids_to_add.reserve(parents.size());
+ for (CTxMemPool::txiter parent_iter : parents) {
+ if (parent_iter->GetTime() > now - UNCONDITIONAL_RELAY_DELAY) {
+ parent_ids_to_add.push_back(parent_iter->GetTx().GetHash());
+ }
}
}
}
+ for (const uint256& parent_txid : parent_ids_to_add) {
+ // Relaying a transaction with a recent but unconfirmed parent.
+ if (WITH_LOCK(pfrom.m_tx_relay->cs_tx_inventory, return !pfrom.m_tx_relay->filterInventoryKnown.contains(parent_txid))) {
+ LOCK(cs_main);
+ State(pfrom.GetId())->m_recently_announced_invs.insert(parent_txid);
+ }
+ }
} else {
vNotFound.push_back(inv);
}
@@ -1966,14 +1977,14 @@ static void ProcessHeadersMessage(CNode& pfrom, CConnman& connman, ChainstateMan
// until we have a headers chain that has at least
// nMinimumChainWork, even if a peer has a chain past our tip,
// as an anti-DoS measure.
- if (IsOutboundDisconnectionCandidate(pfrom)) {
+ if (pfrom.IsOutboundOrBlockRelayConn()) {
LogPrintf("Disconnecting outbound peer %d -- headers chain has insufficient work\n", pfrom.GetId());
pfrom.fDisconnect = true;
}
}
}
- if (!pfrom.fDisconnect && IsOutboundDisconnectionCandidate(pfrom) && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) {
+ if (!pfrom.fDisconnect && pfrom.IsOutboundOrBlockRelayConn() && nodestate->pindexBestKnownBlock != nullptr && pfrom.m_tx_relay != nullptr) {
// If this is an outbound full-relay peer, check to see if we should protect
// it from the bad/lagging chain logic.
// Note that block-relay-only peers are already implicitly protected, so we
@@ -2054,6 +2065,19 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(orphanTx.GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (orphan_state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && orphanTx.GetWitnessHash() != orphanTx.GetHash()) {
+ // We only add the txid if it differs from the wtxid, to
+ // avoid wasting entries in the rolling bloom filter.
+ recentRejects->insert(orphanTx.GetHash());
+ }
}
EraseOrphanTx(orphanHash);
done = true;
@@ -2323,11 +2347,11 @@ void ProcessMessage(
vRecv >> nVersion >> nServiceInt >> nTime >> addrMe;
nSendVersion = std::min(nVersion, PROTOCOL_VERSION);
nServices = ServiceFlags(nServiceInt);
- if (!pfrom.fInbound)
+ if (!pfrom.IsInboundConn())
{
connman.SetServices(pfrom.addr, nServices);
}
- if (!pfrom.fInbound && !pfrom.fFeeler && !pfrom.m_manual_connection && !HasAllDesirableServiceFlags(nServices))
+ if (pfrom.ExpectServicesFromConn() && !HasAllDesirableServiceFlags(nServices))
{
LogPrint(BCLog::NET, "peer=%d does not offer the expected services (%08x offered, %08x expected); disconnecting\n", pfrom.GetId(), nServices, GetDesirableServiceFlags(nServices));
pfrom.fDisconnect = true;
@@ -2354,20 +2378,20 @@ void ProcessMessage(
if (!vRecv.empty())
vRecv >> fRelay;
// Disconnect if we connected to ourself
- if (pfrom.fInbound && !connman.CheckIncomingNonce(nNonce))
+ if (pfrom.IsInboundConn() && !connman.CheckIncomingNonce(nNonce))
{
LogPrintf("connected to self at %s, disconnecting\n", pfrom.addr.ToString());
pfrom.fDisconnect = true;
return;
}
- if (pfrom.fInbound && addrMe.IsRoutable())
+ if (pfrom.IsInboundConn() && addrMe.IsRoutable())
{
SeenLocal(addrMe);
}
// Be shy and don't send version until we hear
- if (pfrom.fInbound)
+ if (pfrom.IsInboundConn())
PushNodeVersion(pfrom, connman, GetAdjustedTime());
if (nVersion >= WTXID_RELAY_VERSION) {
@@ -2411,7 +2435,7 @@ void ProcessMessage(
UpdatePreferredDownload(pfrom, State(pfrom.GetId()));
}
- if (!pfrom.fInbound && pfrom.IsAddrRelayPeer())
+ if (!pfrom.IsInboundConn() && pfrom.IsAddrRelayPeer())
{
// Advertise our address
if (fListen && !::ChainstateActive().IsInitialBlockDownload())
@@ -2455,8 +2479,7 @@ void ProcessMessage(
}
// Feeler connections exist only to verify if address is online.
- if (pfrom.fFeeler) {
- assert(pfrom.fInbound == false);
+ if (pfrom.IsFeelerConn()) {
pfrom.fDisconnect = true;
}
return;
@@ -2476,7 +2499,7 @@ void ProcessMessage(
{
pfrom.SetRecvVersion(std::min(pfrom.nVersion.load(), PROTOCOL_VERSION));
- if (!pfrom.fInbound) {
+ if (!pfrom.IsInboundConn()) {
// Mark this node as currently connected, so we update its timestamp later.
LOCK(cs_main);
State(pfrom.GetId())->fCurrentlyConnected = true;
@@ -2585,7 +2608,7 @@ void ProcessMessage(
connman.AddNewAddresses(vAddrOk, pfrom.addr, 2 * 60 * 60);
if (vAddr.size() < 1000)
pfrom.fGetAddr = false;
- if (pfrom.fOneShot)
+ if (pfrom.IsAddrFetchConn())
pfrom.fDisconnect = true;
return;
}
@@ -2945,7 +2968,7 @@ void ProcessMessage(
// We do the AlreadyHave() check using wtxid, rather than txid - in the
// absence of witness malleation, this is strictly better, because the
- // recent rejects filter may contain the wtxid but will never contain
+ // recent rejects filter may contain the wtxid but rarely contains
// the txid of a segwit transaction that has been rejected.
// In the presence of witness malleation, it's possible that by only
// doing the check with wtxid, we could overlook a transaction which
@@ -2981,8 +3004,19 @@ void ProcessMessage(
else if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS)
{
bool fRejectedParents = false; // It may be the case that the orphans parents have all been rejected
+
+ // Deduplicate parent txids, so that we don't have to loop over
+ // the same parent txid more than once down below.
+ std::vector<uint256> unique_parents;
+ unique_parents.reserve(tx.vin.size());
for (const CTxIn& txin : tx.vin) {
- if (recentRejects->contains(txin.prevout.hash)) {
+ // We start with all parents, and then remove duplicates below.
+ unique_parents.push_back(txin.prevout.hash);
+ }
+ std::sort(unique_parents.begin(), unique_parents.end());
+ unique_parents.erase(std::unique(unique_parents.begin(), unique_parents.end()), unique_parents.end());
+ for (const uint256& parent_txid : unique_parents) {
+ if (recentRejects->contains(parent_txid)) {
fRejectedParents = true;
break;
}
@@ -2991,14 +3025,14 @@ void ProcessMessage(
uint32_t nFetchFlags = GetFetchFlags(pfrom);
const auto current_time = GetTime<std::chrono::microseconds>();
- for (const CTxIn& txin : tx.vin) {
+ for (const uint256& parent_txid : unique_parents) {
// Here, we only have the txid (and not wtxid) of the
// inputs, so we only request in txid mode, even for
// wtxidrelay peers.
// Eventually we should replace this with an improved
// protocol for getting all unconfirmed parents.
- CInv _inv(MSG_TX | nFetchFlags, txin.prevout.hash);
- pfrom.AddKnownTx(txin.prevout.hash);
+ CInv _inv(MSG_TX | nFetchFlags, parent_txid);
+ pfrom.AddKnownTx(parent_txid);
if (!AlreadyHave(_inv, mempool)) RequestTx(State(pfrom.GetId()), ToGenTxid(_inv), current_time);
}
AddOrphanTx(ptx, pfrom.GetId());
@@ -3037,6 +3071,17 @@ void ProcessMessage(
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(tx.GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
+ recentRejects->insert(tx.GetHash());
+ }
if (RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
}
@@ -3458,7 +3503,7 @@ void ProcessMessage(
// to users' AddrMan and later request them by sending getaddr messages.
// Making nodes which are behind NAT and can only make outgoing connections ignore
// the getaddr message mitigates the attack.
- if (!pfrom.fInbound) {
+ if (!pfrom.IsInboundConn()) {
LogPrint(BCLog::NET, "Ignoring \"getaddr\" from outbound connection. peer=%d\n", pfrom.GetId());
return;
}
@@ -3741,7 +3786,7 @@ bool PeerLogicValidation::MaybeDiscourageAndDisconnect(CNode& pnode)
return false;
}
- if (pnode.m_manual_connection) {
+ if (pnode.IsManualConn()) {
// We never disconnect or discourage manual peers for bad behavior
LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id);
return false;
@@ -3862,7 +3907,7 @@ void PeerLogicValidation::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
CNodeState &state = *State(pto.GetId());
const CNetMsgMaker msgMaker(pto.GetSendVersion());
- if (!state.m_chain_sync.m_protect && IsOutboundDisconnectionCandidate(pto) && state.fSyncStarted) {
+ if (!state.m_chain_sync.m_protect && pto.IsOutboundOrBlockRelayConn() && state.fSyncStarted) {
// This is an outbound peer subject to disconnection if they don't
// announce a block with as much work as the current tip within
// CHAIN_SYNC_TIMEOUT + HEADERS_RESPONSE_TIME seconds (note: if
@@ -3924,7 +3969,7 @@ void PeerLogicValidation::EvictExtraOutboundPeers(int64_t time_in_seconds)
AssertLockHeld(cs_main);
// Ignore non-outbound peers, or nodes marked for disconnect already
- if (!IsOutboundDisconnectionCandidate(*pnode) || pnode->fDisconnect) return;
+ if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return;
CNodeState *state = State(pnode->GetId());
if (state == nullptr) return; // shouldn't be possible, but just in case
// Don't evict our protected peers
@@ -4102,7 +4147,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
// Start block sync
if (pindexBestHeader == nullptr)
pindexBestHeader = ::ChainActive().Tip();
- bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->fOneShot); // Download if this is a nice peer, or we have no nice peers and this one might do.
+ bool fFetch = state.fPreferredDownload || (nPreferredDownload == 0 && !pto->fClient && !pto->IsAddrFetchConn()); // Download if this is a nice peer, or we have no nice peers and this one might do.
if (!state.fSyncStarted && !pto->fClient && !fImporting && !fReindex) {
// Only actively request headers from a single peer, unless we're close to today.
if ((nSyncStarted == 0 && fFetch) || pindexBestHeader->GetBlockTime() > GetAdjustedTime() - 24 * 60 * 60) {
@@ -4287,7 +4332,7 @@ bool PeerLogicValidation::SendMessages(CNode* pto)
bool fSendTrickle = pto->HasPermission(PF_NOBAN);
if (pto->m_tx_relay->nNextInvSend < current_time) {
fSendTrickle = true;
- if (pto->fInbound) {
+ if (pto->IsInboundConn()) {
pto->m_tx_relay->nNextInvSend = std::chrono::microseconds{connman->PoissonNextSendInbound(nNow, INVENTORY_BROADCAST_INTERVAL)};
} else {
// Use half the delay for outbound peers, as there is less privacy concern for them.
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index c56abaf6c9..0e9820da1e 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -152,6 +152,8 @@ bool IsStandardTx(const CTransaction& tx, bool permit_bare_multisig, const CFeeR
* script can be anything; an attacker could use a very
* expensive-to-check-upon-redemption script like:
* DUP CHECKSIG DROP ... repeated 100 times... OP_1
+ *
+ * Note that only the non-witness portion of the transaction is checked here.
*/
bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
{
@@ -164,7 +166,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
std::vector<std::vector<unsigned char> > vSolutions;
TxoutType whichType = Solver(prev.scriptPubKey, vSolutions);
- if (whichType == TxoutType::NONSTANDARD) {
+ if (whichType == TxoutType::NONSTANDARD || whichType == TxoutType::WITNESS_UNKNOWN) {
+ // WITNESS_UNKNOWN failures are typically also caught with a policy
+ // flag in the script interpreter, but it can be helpful to catch
+ // this type of NONSTANDARD transaction earlier in transaction
+ // validation.
return false;
} else if (whichType == TxoutType::SCRIPTHASH) {
std::vector<std::vector<unsigned char> > stack;
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 523f5c429b..4f1e0056be 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -29,6 +29,7 @@
#include <interfaces/handler.h>
#include <interfaces/node.h>
+#include <node/context.h>
#include <noui.h>
#include <uint256.h>
#include <util/system.h>
@@ -430,7 +431,8 @@ int GuiMain(int argc, char* argv[])
SetupEnvironment();
util::ThreadSetInternalName("main");
- std::unique_ptr<interfaces::Node> node = interfaces::MakeNode();
+ NodeContext node_context;
+ std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context);
// Subscribe to global signals from core
std::unique_ptr<interfaces::Handler> handler_message_box = node->handleMessageBox(noui_ThreadSafeMessageBox);
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index fea759dee0..0016fb9739 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -459,10 +459,10 @@
<item>
<widget class="QCheckBox" name="connectSocksTor">
<property name="toolTip">
- <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor hidden services.</string>
+ <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor onion services.</string>
</property>
<property name="text">
- <string>Use separate SOCKS&amp;5 proxy to reach peers via Tor hidden services:</string>
+ <string>Use separate SOCKS&amp;5 proxy to reach peers via Tor onion services:</string>
</property>
</widget>
</item>
diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp
index 9347ff9e42..035c8196bc 100644
--- a/src/qt/test/addressbooktests.cpp
+++ b/src/qt/test/addressbooktests.cpp
@@ -18,6 +18,7 @@
#include <key.h>
#include <key_io.h>
#include <wallet/wallet.h>
+#include <walletinitinterface.h>
#include <QApplication>
#include <QTimer>
@@ -59,6 +60,7 @@ void EditAddressAndSubmit(
void TestAddAddressesToSendBook(interfaces::Node& node)
{
TestChain100Setup test;
+ node.setContext(&test.m_node);
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase());
wallet->SetupLegacyScriptPubKeyMan();
bool firstRun;
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index 12efca2503..031913bd02 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -52,7 +52,8 @@ int main(int argc, char* argv[])
BasicTestingSetup dummy{CBaseChainParams::REGTEST};
}
- std::unique_ptr<interfaces::Node> node = interfaces::MakeNode();
+ NodeContext node_context;
+ std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context);
bool fInvalid = false;
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index 6648029bae..475fd589af 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -138,8 +138,7 @@ void TestGUI(interfaces::Node& node)
for (int i = 0; i < 5; ++i) {
test.CreateAndProcessBlock({}, GetScriptForRawPubKey(test.coinbaseKey.GetPubKey()));
}
- node.context()->connman = std::move(test.m_node.connman);
- node.context()->mempool = std::move(test.m_node.mempool);
+ node.setContext(&test.m_node);
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase());
bool firstRun;
wallet->LoadWallet(firstRun);
diff --git a/src/random.cpp b/src/random.cpp
index 9c9a35709a..af9504e0ce 100644
--- a/src/random.cpp
+++ b/src/random.cpp
@@ -315,12 +315,16 @@ void GetOSRand(unsigned char *ent32)
if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
RandFailure();
}
+ // Silence a compiler warning about unused function.
+ (void)GetDevURandom;
#elif defined(HAVE_GETENTROPY_RAND) && defined(MAC_OSX)
/* getentropy() is available on macOS 10.12 and later.
*/
if (getentropy(ent32, NUM_OS_RANDOM_BYTES) != 0) {
RandFailure();
}
+ // Silence a compiler warning about unused function.
+ (void)GetDevURandom;
#elif defined(HAVE_SYSCTL_ARND)
/* FreeBSD, NetBSD and similar. It is possible for the call to return less
* bytes than requested, so need to read in a loop.
@@ -334,6 +338,8 @@ void GetOSRand(unsigned char *ent32)
}
have += len;
} while (have < NUM_OS_RANDOM_BYTES);
+ // Silence a compiler warning about unused function.
+ (void)GetDevURandom;
#else
/* Fall back to /dev/urandom if there is no specific method implemented to
* get system entropy for this OS.
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index abd02bd2c3..0dfd560896 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -264,7 +264,7 @@ static UniValue addnode(const JSONRPCRequest& request)
if (strCommand == "onetry")
{
CAddress addr;
- node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), false, false, true);
+ node.connman->OpenNetworkConnection(addr, false, nullptr, strNode.c_str(), ConnectionType::MANUAL);
return NullUniValue;
}
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index cf856af6e9..9b6ef15785 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -1348,7 +1348,7 @@ UniValue finalizepsbt(const JSONRPCRequest& request)
if (complete && extract) {
ssTx << mtx;
- result_str = HexStr(ssTx.str());
+ result_str = HexStr(ssTx);
result.pushKV("hex", result_str);
} else {
ssTx << psbtx;
diff --git a/src/rpc/request.cpp b/src/rpc/request.cpp
index 7fef45f50e..d9ad70fa37 100644
--- a/src/rpc/request.cpp
+++ b/src/rpc/request.cpp
@@ -78,7 +78,7 @@ bool GenerateAuthCookie(std::string *cookie_out)
const size_t COOKIE_SIZE = 32;
unsigned char rand_pwd[COOKIE_SIZE];
GetRandBytes(rand_pwd, COOKIE_SIZE);
- std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd, rand_pwd+COOKIE_SIZE);
+ std::string cookie = COOKIEAUTH_USER + ":" + HexStr(rand_pwd);
/** the umask determines what permissions are used to create this file -
* these are set to 077 in init.cpp unless overridden with -sysperms.
diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp
index 9f4c7bee9c..073a7688a9 100644
--- a/src/rpc/util.cpp
+++ b/src/rpc/util.cpp
@@ -260,7 +260,7 @@ public:
UniValue obj(UniValue::VOBJ);
obj.pushKV("iswitness", true);
obj.pushKV("witness_version", (int)id.version);
- obj.pushKV("witness_program", HexStr(id.program, id.program + id.length));
+ obj.pushKV("witness_program", HexStr(Span<const unsigned char>(id.program, id.length)));
return obj;
}
};
diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp
index 9978d084d5..6c0a98cca2 100644
--- a/src/script/descriptor.cpp
+++ b/src/script/descriptor.cpp
@@ -190,7 +190,7 @@ class OriginPubkeyProvider final : public PubkeyProvider
std::string OriginString() const
{
- return HexStr(std::begin(m_origin.fingerprint), std::end(m_origin.fingerprint)) + FormatHDKeypath(m_origin.path);
+ return HexStr(m_origin.fingerprint) + FormatHDKeypath(m_origin.path);
}
public:
diff --git a/src/sync.cpp b/src/sync.cpp
index 10f0483189..4be13a3c48 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -149,12 +149,17 @@ static void push_lock(void* c, const CLockLocation& locklocation)
const LockPair p1 = std::make_pair(i.first, c);
if (lockdata.lockorders.count(p1))
continue;
- lockdata.lockorders.emplace(p1, lock_stack);
const LockPair p2 = std::make_pair(c, i.first);
+ if (lockdata.lockorders.count(p2)) {
+ auto lock_stack_copy = lock_stack;
+ lock_stack.pop_back();
+ potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy);
+ // potential_deadlock_detected() does not return.
+ }
+
+ lockdata.lockorders.emplace(p1, lock_stack);
lockdata.invlockorders.insert(p2);
- if (lockdata.lockorders.count(p2))
- potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
}
}
@@ -259,6 +264,17 @@ void DeleteLock(void* cs)
}
}
+bool LockStackEmpty()
+{
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+ const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id());
+ if (it == lockdata.m_lock_stacks.end()) {
+ return true;
+ }
+ return it->second.empty();
+}
+
bool g_debug_lockorder_abort = true;
#endif /* DEBUG_LOCKORDER */
diff --git a/src/sync.h b/src/sync.h
index 77327d8bfe..05ff2ee8a9 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -56,6 +56,7 @@ template <typename MutexType>
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs);
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void DeleteLock(void* cs);
+bool LockStackEmpty();
/**
* Call abort() if a potential lock order deadlock bug is detected, instead of
@@ -64,13 +65,14 @@ void DeleteLock(void* cs);
*/
extern bool g_debug_lockorder_abort;
#else
-void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
-void static inline LeaveCritical() {}
-void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
+inline void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
+inline void LeaveCritical() {}
+inline void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
template <typename MutexType>
-void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
-void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
-void static inline DeleteLock(void* cs) {}
+inline void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
+inline void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
+inline void DeleteLock(void* cs) {}
+inline bool LockStackEmpty() { return true; }
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
#define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
diff --git a/src/test/crypto_tests.cpp b/src/test/crypto_tests.cpp
index bf5c587774..b3cc8cefd9 100644
--- a/src/test/crypto_tests.cpp
+++ b/src/test/crypto_tests.cpp
@@ -183,7 +183,7 @@ static void TestHKDF_SHA256_32(const std::string &ikm_hex, const std::string &sa
CHKDF_HMAC_SHA256_L32 hkdf32(initial_key_material.data(), initial_key_material.size(), salt_stringified);
unsigned char out[32];
hkdf32.Expand32(info_stringified, out);
- BOOST_CHECK(HexStr(out, out + 32) == okm_check_hex);
+ BOOST_CHECK(HexStr(out) == okm_check_hex);
}
static std::string LongTestString()
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index b1a635d9da..0115803e58 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -84,7 +84,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", /*fInboundIn=*/ false);
+ CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::OUTBOUND);
dummyNode1.SetSendVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode1);
@@ -136,7 +136,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerLogicValidation &peerLogic, CConnmanTest* connman)
{
CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE);
- vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", /*fInboundIn=*/ false));
+ vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK|NODE_WITNESS), 0, INVALID_SOCKET, addr, 0, 0, CAddress(), "", ConnectionType::OUTBOUND));
CNode &node = *vNodes.back();
node.SetSendVersion(PROTOCOL_VERSION);
@@ -227,7 +227,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
banman->ClearBanned();
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", true);
+ CNode dummyNode1(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::INBOUND);
dummyNode1.SetSendVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode1);
dummyNode1.nVersion = 1;
@@ -244,7 +244,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
BOOST_CHECK(!banman->IsDiscouraged(ip(0xa0b0c001|0x0000ff00))); // Different IP, not discouraged
CAddress addr2(ip(0xa0b0c002), NODE_NONE);
- CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", true);
+ CNode dummyNode2(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", ConnectionType::INBOUND);
dummyNode2.SetSendVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode2);
dummyNode2.nVersion = 1;
@@ -286,7 +286,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
SetMockTime(nStartTime); // Overrides future calls to GetTime()
CAddress addr(ip(0xa0b0c001), NODE_NONE);
- CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", true);
+ CNode dummyNode(id++, NODE_NETWORK, 0, INVALID_SOCKET, addr, 4, 4, CAddress(), "", ConnectionType::INBOUND);
dummyNode.SetSendVersion(PROTOCOL_VERSION);
peerLogic->InitializeNode(&dummyNode);
dummyNode.nVersion = 1;
diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp
index 9e40d5cd55..677b87a47a 100644
--- a/src/test/fuzz/process_message.cpp
+++ b/src/test/fuzz/process_message.cpp
@@ -80,7 +80,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
return;
}
CDataStream random_bytes_data_stream{fuzzed_data_provider.ConsumeRemainingBytes<unsigned char>(), SER_NETWORK, PROTOCOL_VERSION};
- CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, false).release();
+ CNode& p2p_node = *MakeUnique<CNode>(0, ServiceFlags(NODE_NETWORK | NODE_WITNESS | NODE_BLOOM), 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND).release();
p2p_node.fSuccessfullyConnected = true;
p2p_node.nVersion = PROTOCOL_VERSION;
p2p_node.SetSendVersion(PROTOCOL_VERSION);
diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp
index 91ebf9fb1b..ef427442e9 100644
--- a/src/test/fuzz/process_messages.cpp
+++ b/src/test/fuzz/process_messages.cpp
@@ -44,9 +44,8 @@ void test_one_input(const std::vector<uint8_t>& buffer)
const auto num_peers_to_add = fuzzed_data_provider.ConsumeIntegralInRange(1, 3);
for (int i = 0; i < num_peers_to_add; ++i) {
const ServiceFlags service_flags = ServiceFlags(fuzzed_data_provider.ConsumeIntegral<uint64_t>());
- const bool inbound{fuzzed_data_provider.ConsumeBool()};
- const bool block_relay_only{fuzzed_data_provider.ConsumeBool()};
- peers.push_back(MakeUnique<CNode>(i, service_flags, 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, inbound, block_relay_only).release());
+ const ConnectionType conn_type = fuzzed_data_provider.PickValueInArray({ConnectionType::INBOUND, ConnectionType::OUTBOUND, ConnectionType::MANUAL, ConnectionType::FEELER, ConnectionType::BLOCK_RELAY, ConnectionType::ADDR_FETCH});
+ peers.push_back(MakeUnique<CNode>(i, service_flags, 0, INVALID_SOCKET, CAddress{CService{in_addr{0x0100007f}, 7777}, NODE_NETWORK}, 0, 0, CAddress{}, std::string{}, conn_type).release());
CNode& p2p_node = *peers.back();
p2p_node.fSuccessfullyConnected = true;
diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp
index ab42be21bd..317000c771 100644
--- a/src/test/net_tests.cpp
+++ b/src/test/net_tests.cpp
@@ -180,17 +180,12 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test)
CAddress addr = CAddress(CService(ipv4Addr, 7777), NODE_NETWORK);
std::string pszDest;
- bool fInboundIn = false;
- // Test that fFeeler is false by default.
- std::unique_ptr<CNode> pnode1 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, CAddress(), pszDest, fInboundIn);
- BOOST_CHECK(pnode1->fInbound == false);
- BOOST_CHECK(pnode1->fFeeler == false);
+ std::unique_ptr<CNode> pnode1 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 0, 0, CAddress(), pszDest, ConnectionType::OUTBOUND);
+ BOOST_CHECK(pnode1->IsInboundConn() == false);
- fInboundIn = true;
- std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, CAddress(), pszDest, fInboundIn);
- BOOST_CHECK(pnode2->fInbound == true);
- BOOST_CHECK(pnode2->fFeeler == false);
+ std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>(id++, NODE_NETWORK, height, hSocket, addr, 1, 1, CAddress(), pszDest, ConnectionType::INBOUND);
+ BOOST_CHECK(pnode2->IsInboundConn() == true);
}
// prior to PR #14728, this test triggers an undefined behavior
@@ -214,7 +209,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test)
in_addr ipv4AddrPeer;
ipv4AddrPeer.s_addr = 0xa0b0c001;
CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK);
- std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, false);
+ std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, 0, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND);
pnode->fSuccessfullyConnected.store(true);
// the peer claims to be reaching us via IPv6
diff --git a/src/test/settings_tests.cpp b/src/test/settings_tests.cpp
index 91e039416c..548fd020a6 100644
--- a/src/test/settings_tests.cpp
+++ b/src/test/settings_tests.cpp
@@ -241,7 +241,7 @@ BOOST_FIXTURE_TEST_CASE(Merge, MergeTestingSetup)
unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE];
out_sha.Finalize(out_sha_bytes);
- std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes));
+ std::string out_sha_hex = HexStr(out_sha_bytes);
// If check below fails, should manually dump the results with:
//
diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp
index 3ea8714f3a..19029ebd3c 100644
--- a/src/test/sync_tests.cpp
+++ b/src/test/sync_tests.cpp
@@ -14,6 +14,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
{
LOCK2(mutex1, mutex2);
}
+ BOOST_CHECK(LockStackEmpty());
bool error_thrown = false;
try {
LOCK2(mutex2, mutex1);
@@ -21,6 +22,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
BOOST_CHECK_EQUAL(e.what(), "potential deadlock detected: mutex1 -> mutex2 -> mutex1");
error_thrown = true;
}
+ BOOST_CHECK(LockStackEmpty());
#ifdef DEBUG_LOCKORDER
BOOST_CHECK(error_thrown);
#else
@@ -40,9 +42,13 @@ BOOST_AUTO_TEST_CASE(potential_deadlock_detected)
RecursiveMutex rmutex1, rmutex2;
TestPotentialDeadLockDetected(rmutex1, rmutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(rmutex1, rmutex2);
Mutex mutex1, mutex2;
TestPotentialDeadLockDetected(mutex1, mutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(mutex1, mutex2);
#ifdef DEBUG_LOCKORDER
g_debug_lockorder_abort = prev;
diff --git a/src/test/system_tests.cpp b/src/test/system_tests.cpp
new file mode 100644
index 0000000000..a55145c738
--- /dev/null
+++ b/src/test/system_tests.cpp
@@ -0,0 +1,95 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+#include <test/util/setup_common.h>
+#include <util/system.h>
+#include <univalue.h>
+
+#ifdef HAVE_BOOST_PROCESS
+#include <boost/process.hpp>
+#endif // HAVE_BOOST_PROCESS
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(system_tests, BasicTestingSetup)
+
+// At least one test is required (in case HAVE_BOOST_PROCESS is not defined).
+// Workaround for https://github.com/bitcoin/bitcoin/issues/19128
+BOOST_AUTO_TEST_CASE(dummy)
+{
+ BOOST_CHECK(true);
+}
+
+#ifdef HAVE_BOOST_PROCESS
+
+bool checkMessage(const std::runtime_error& ex)
+{
+ // On Linux & Mac: "No such file or directory"
+ // On Windows: "The system cannot find the file specified."
+ const std::string what(ex.what());
+ BOOST_CHECK(what.find("file") != std::string::npos);
+ return true;
+}
+
+bool checkMessageFalse(const std::runtime_error& ex)
+{
+ BOOST_CHECK_EQUAL(ex.what(), std::string("RunCommandParseJSON error: process(false) returned 1: \n"));
+ return true;
+}
+
+bool checkMessageStdErr(const std::runtime_error& ex)
+{
+ const std::string what(ex.what());
+ BOOST_CHECK(what.find("RunCommandParseJSON error:") != std::string::npos);
+ return checkMessage(ex);
+}
+
+BOOST_AUTO_TEST_CASE(run_command)
+{
+ {
+ const UniValue result = RunCommandParseJSON("");
+ BOOST_CHECK(result.isNull());
+ }
+ {
+#ifdef WIN32
+ // Windows requires single quotes to prevent escaping double quotes from the JSON...
+ const UniValue result = RunCommandParseJSON("echo '{\"success\": true}'");
+#else
+ // ... but Linux and macOS echo a single quote if it's used
+ const UniValue result = RunCommandParseJSON("echo \"{\"success\": true}\"");
+#endif
+ BOOST_CHECK(result.isObject());
+ const UniValue& success = find_value(result, "success");
+ BOOST_CHECK(!success.isNull());
+ BOOST_CHECK_EQUAL(success.getBool(), true);
+ }
+ {
+ // An invalid command is handled by Boost
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("invalid_command"), boost::process::process_error, checkMessage); // Command failed
+ }
+ {
+ // Return non-zero exit code, no output to stderr
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("false"), std::runtime_error, checkMessageFalse);
+ }
+ {
+ // Return non-zero exit code, with error message for stderr
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("ls nosuchfile"), std::runtime_error, checkMessageStdErr);
+ }
+ {
+ BOOST_REQUIRE_THROW(RunCommandParseJSON("echo \"{\""), std::runtime_error); // Unable to parse JSON
+ }
+ // Test std::in, except for Windows
+#ifndef WIN32
+ {
+ const UniValue result = RunCommandParseJSON("cat", "{\"success\": true}");
+ BOOST_CHECK(result.isObject());
+ const UniValue& success = find_value(result, "success");
+ BOOST_CHECK(!success.isNull());
+ BOOST_CHECK_EQUAL(success.getBool(), true);
+ }
+#endif
+}
+#endif // HAVE_BOOST_PROCESS
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 14f65dcb7c..b2ae1cb845 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -11,6 +11,7 @@
#include <consensus/validation.h>
#include <crypto/sha256.h>
#include <init.h>
+#include <interfaces/chain.h>
#include <miner.h>
#include <net.h>
#include <net_processing.h>
@@ -32,6 +33,7 @@
#include <util/vector.h>
#include <validation.h>
#include <validationinterface.h>
+#include <walletinitinterface.h>
#include <functional>
@@ -104,6 +106,8 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
SetupNetworking();
InitSignatureCache();
InitScriptExecutionCache();
+ m_node.chain = interfaces::MakeChain(m_node);
+ g_wallet_init_interface.Construct(m_node);
fCheckBlockIndex = true;
static bool noui_connected = false;
if (!noui_connected) {
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 15a2c1e300..b49370c967 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -105,47 +105,24 @@ BOOST_AUTO_TEST_CASE(util_ParseHex)
BOOST_AUTO_TEST_CASE(util_HexStr)
{
BOOST_CHECK_EQUAL(
- HexStr(ParseHex_expected, ParseHex_expected + sizeof(ParseHex_expected)),
+ HexStr(ParseHex_expected),
"04678afdb0fe5548271967f1a67130b7105cd6a828e03909a67962e0ea1f61deb649f6bc3f4cef38c4f35504e51ec112de5c384df7ba0b8d578a4c702b6bf11d5f");
BOOST_CHECK_EQUAL(
- HexStr(ParseHex_expected + sizeof(ParseHex_expected),
- ParseHex_expected + sizeof(ParseHex_expected)),
+ HexStr(Span<const unsigned char>(
+ ParseHex_expected + sizeof(ParseHex_expected),
+ ParseHex_expected + sizeof(ParseHex_expected))),
"");
BOOST_CHECK_EQUAL(
- HexStr(ParseHex_expected, ParseHex_expected),
+ HexStr(Span<const unsigned char>(ParseHex_expected, ParseHex_expected)),
"");
std::vector<unsigned char> ParseHex_vec(ParseHex_expected, ParseHex_expected + 5);
BOOST_CHECK_EQUAL(
- HexStr(ParseHex_vec.rbegin(), ParseHex_vec.rend()),
- "b0fd8a6704"
- );
-
- BOOST_CHECK_EQUAL(
- HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected),
- std::reverse_iterator<const uint8_t *>(ParseHex_expected)),
- ""
- );
-
- BOOST_CHECK_EQUAL(
- HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 1),
- std::reverse_iterator<const uint8_t *>(ParseHex_expected)),
- "04"
- );
-
- BOOST_CHECK_EQUAL(
- HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 5),
- std::reverse_iterator<const uint8_t *>(ParseHex_expected)),
- "b0fd8a6704"
- );
-
- BOOST_CHECK_EQUAL(
- HexStr(std::reverse_iterator<const uint8_t *>(ParseHex_expected + 65),
- std::reverse_iterator<const uint8_t *>(ParseHex_expected)),
- "5f1df16b2b704c8a578d0bbaf74d385cde12c11ee50455f3c438ef4c3fbcf649b6de611feae06279a60939e028a8d65c10b73071a6f16719274855feb0fd8a6704"
+ HexStr(ParseHex_vec),
+ "04678afdb0"
);
}
@@ -1022,7 +999,7 @@ BOOST_FIXTURE_TEST_CASE(util_ArgsMerge, ArgsMergeTestingSetup)
unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE];
out_sha.Finalize(out_sha_bytes);
- std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes));
+ std::string out_sha_hex = HexStr(out_sha_bytes);
// If check below fails, should manually dump the results with:
//
@@ -1125,7 +1102,7 @@ BOOST_FIXTURE_TEST_CASE(util_ChainMerge, ChainMergeTestingSetup)
unsigned char out_sha_bytes[CSHA256::OUTPUT_SIZE];
out_sha.Finalize(out_sha_bytes);
- std::string out_sha_hex = HexStr(std::begin(out_sha_bytes), std::end(out_sha_bytes));
+ std::string out_sha_hex = HexStr(out_sha_bytes);
// If check below fails, should manually dump the results with:
//
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index 84118b36ef..5d56d1ff89 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -405,7 +405,7 @@ static bool WriteBinaryFile(const fs::path &filename, const std::string &data)
/****** Bitcoin specific TorController implementation ********/
/** Controller that connects to Tor control socket, authenticate, then create
- * and maintain an ephemeral hidden service.
+ * and maintain an ephemeral onion service.
*/
class TorController
{
@@ -534,7 +534,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply&
// Finally - now create the service
if (private_key.empty()) // No private key, generate one
private_key = "NEW:RSA1024"; // Explicitly request RSA1024 - see issue #9214
- // Request hidden service, redirect port.
+ // Request onion service, redirect port.
// Note that the 'virtual' port is always the default port to avoid decloaking nodes using other ports.
_conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, Params().GetDefaultPort(), GetListenPort()),
std::bind(&TorController::add_onion_cb, this, std::placeholders::_1, std::placeholders::_2));
diff --git a/src/uint256.cpp b/src/uint256.cpp
index a5dfba41e2..ee1b34eadd 100644
--- a/src/uint256.cpp
+++ b/src/uint256.cpp
@@ -19,7 +19,11 @@ base_blob<BITS>::base_blob(const std::vector<unsigned char>& vch)
template <unsigned int BITS>
std::string base_blob<BITS>::GetHex() const
{
- return HexStr(std::reverse_iterator<const uint8_t*>(m_data + sizeof(m_data)), std::reverse_iterator<const uint8_t*>(m_data));
+ uint8_t m_data_rev[WIDTH];
+ for (int i = 0; i < WIDTH; ++i) {
+ m_data_rev[i] = m_data[WIDTH - 1 - i];
+ }
+ return HexStr(m_data_rev);
}
template <unsigned int BITS>
diff --git a/src/util/strencodings.cpp b/src/util/strencodings.cpp
index 3a903b6897..d10f92ffe6 100644
--- a/src/util/strencodings.cpp
+++ b/src/util/strencodings.cpp
@@ -569,3 +569,16 @@ std::string Capitalize(std::string str)
str[0] = ToUpper(str.front());
return str;
}
+
+std::string HexStr(const Span<const uint8_t> s)
+{
+ std::string rv;
+ static constexpr char hexmap[16] = { '0', '1', '2', '3', '4', '5', '6', '7',
+ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
+ rv.reserve(s.size() * 2);
+ for (uint8_t v: s) {
+ rv.push_back(hexmap[v >> 4]);
+ rv.push_back(hexmap[v & 15]);
+ }
+ return rv;
+}
diff --git a/src/util/strencodings.h b/src/util/strencodings.h
index bd988f1410..eaa0fa9992 100644
--- a/src/util/strencodings.h
+++ b/src/util/strencodings.h
@@ -10,6 +10,7 @@
#define BITCOIN_UTIL_STRENCODINGS_H
#include <attributes.h>
+#include <span.h>
#include <cstdint>
#include <iterator>
@@ -119,27 +120,11 @@ NODISCARD bool ParseUInt64(const std::string& str, uint64_t *out);
*/
NODISCARD bool ParseDouble(const std::string& str, double *out);
-template<typename T>
-std::string HexStr(const T itbegin, const T itend)
-{
- std::string rv;
- static const char hexmap[16] = { '0', '1', '2', '3', '4', '5', '6', '7',
- '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' };
- rv.reserve(std::distance(itbegin, itend) * 2);
- for(T it = itbegin; it < itend; ++it)
- {
- unsigned char val = (unsigned char)(*it);
- rv.push_back(hexmap[val>>4]);
- rv.push_back(hexmap[val&15]);
- }
- return rv;
-}
-
-template<typename T>
-inline std::string HexStr(const T& vch)
-{
- return HexStr(vch.begin(), vch.end());
-}
+/**
+ * Convert a span of bytes to a lower-case hexadecimal string.
+ */
+std::string HexStr(const Span<const uint8_t> s);
+inline std::string HexStr(const Span<const char> s) { return HexStr(MakeUCharSpan(s)); }
/**
* Format a paragraph of text to a fixed width, adding spaces for
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 8164e884b1..7b74789b32 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -6,6 +6,10 @@
#include <sync.h>
#include <util/system.h>
+#ifdef HAVE_BOOST_PROCESS
+#include <boost/process.hpp>
+#endif // HAVE_BOOST_PROCESS
+
#include <chainparamsbase.h>
#include <util/strencodings.h>
#include <util/string.h>
@@ -1021,7 +1025,7 @@ bool FileCommit(FILE *file)
return false;
}
#else
- #if defined(__linux__) || defined(__NetBSD__)
+ #if defined(HAVE_FDATASYNC)
if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync
LogPrintf("%s: fdatasync failed: %d\n", __func__, errno);
return false;
@@ -1161,6 +1165,43 @@ void runCommand(const std::string& strCommand)
}
#endif
+#ifdef HAVE_BOOST_PROCESS
+UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in)
+{
+ namespace bp = boost::process;
+
+ UniValue result_json;
+ bp::opstream stdin_stream;
+ bp::ipstream stdout_stream;
+ bp::ipstream stderr_stream;
+
+ if (str_command.empty()) return UniValue::VNULL;
+
+ bp::child c(
+ str_command,
+ bp::std_out > stdout_stream,
+ bp::std_err > stderr_stream,
+ bp::std_in < stdin_stream
+ );
+ if (!str_std_in.empty()) {
+ stdin_stream << str_std_in << std::endl;
+ }
+ stdin_stream.pipe().close();
+
+ std::string result;
+ std::string error;
+ std::getline(stdout_stream, result);
+ std::getline(stderr_stream, error);
+
+ c.wait();
+ const int n_error = c.exit_code();
+ if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error));
+ if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result);
+
+ return result_json;
+}
+#endif // HAVE_BOOST_PROCESS
+
void SetupEnvironment()
{
#ifdef HAVE_MALLOPT_ARENA_MAX
diff --git a/src/util/system.h b/src/util/system.h
index 0bd14cc9ea..1df194ca84 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -37,6 +37,8 @@
#include <boost/thread/condition_variable.hpp> // for boost::thread_interrupted
+class UniValue;
+
// Application startup time (used for uptime calculation)
int64_t GetStartupTime();
@@ -96,6 +98,16 @@ std::string ShellEscape(const std::string& arg);
#if HAVE_SYSTEM
void runCommand(const std::string& strCommand);
#endif
+#ifdef HAVE_BOOST_PROCESS
+/**
+ * Execute a command which returns JSON, and parse the result.
+ *
+ * @param str_command The command to execute, including any arguments
+ * @param str_std_in string to pass to stdin
+ * @return parsed JSON
+ */
+UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in="");
+#endif // HAVE_BOOST_PROCESS
/**
* Most paths passed as configuration arguments are treated as relative to
diff --git a/src/validation.cpp b/src/validation.cpp
index 81af972777..cf2f9dde62 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -688,8 +688,9 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
}
// Check for non-standard pay-to-script-hash in inputs
- if (fRequireStandard && !AreInputsStandard(tx, m_view))
- return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-nonstandard-inputs");
+ if (fRequireStandard && !AreInputsStandard(tx, m_view)) {
+ return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
+ }
// Check for non-standard witness in P2WSH
if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
@@ -1198,8 +1199,8 @@ bool ReadRawBlockFromDisk(std::vector<uint8_t>& block, const FlatFilePos& pos, c
if (memcmp(blk_start, message_start, CMessageHeader::MESSAGE_START_SIZE)) {
return error("%s: Block magic mismatch for %s: %s versus expected %s", __func__, pos.ToString(),
- HexStr(blk_start, blk_start + CMessageHeader::MESSAGE_START_SIZE),
- HexStr(message_start, message_start + CMessageHeader::MESSAGE_START_SIZE));
+ HexStr(blk_start),
+ HexStr(message_start));
}
if (blk_size > MAX_SIZE) {
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index a04311fdf5..24eb2ee34c 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -38,7 +38,7 @@ void CheckUniqueFileid(const BerkeleyEnvironment& env, const std::string& filena
for (const auto& item : env.m_fileids) {
if (fileid == item.second && &fileid != &item.second) {
throw std::runtime_error(strprintf("BerkeleyDatabase: Can't open database %s (duplicates fileid %s from %s)", filename,
- HexStr(std::begin(item.second.value), std::end(item.second.value)), item.first));
+ HexStr(item.second.value), item.first));
}
}
}
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index 3b752ca936..e0c3a1287a 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -34,7 +34,7 @@ std::string static EncodeDumpString(const std::string &str) {
std::stringstream ret;
for (const unsigned char c : str) {
if (c <= 32 || c >= 128 || c == '%') {
- ret << '%' << HexStr(&c, &c + 1);
+ ret << '%' << HexStr(Span<const unsigned char>(&c, 1));
} else {
ret << c;
}
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 39d1f49e9e..58eaf54175 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -3688,7 +3688,7 @@ UniValue getaddressinfo(const JSONRPCRequest& request)
if (meta->has_key_origin) {
ret.pushKV("hdkeypath", WriteHDKeypath(meta->key_origin.path));
ret.pushKV("hdseedid", meta->hd_seed_id.GetHex());
- ret.pushKV("hdmasterfingerprint", HexStr(meta->key_origin.fingerprint, meta->key_origin.fingerprint + 4));
+ ret.pushKV("hdmasterfingerprint", HexStr(meta->key_origin.fingerprint));
}
}
}
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
index 5d782026dc..34e4999329 100755
--- a/test/functional/example_test.py
+++ b/test/functional/example_test.py
@@ -166,7 +166,7 @@ class ExampleTest(BitcoinTestFramework):
height = self.nodes[0].getblockcount()
- for i in range(10):
+ for _ in range(10):
# Use the mininode and blocktools functionality to manually build a block
# Calling the generate() rpc is easier, but this allows us to exactly
# control the blocks and transactions.
diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py
index 79777f5582..f19ee12f95 100755
--- a/test/functional/feature_assumevalid.py
+++ b/test/functional/feature_assumevalid.py
@@ -123,7 +123,7 @@ class AssumeValidTest(BitcoinTestFramework):
height += 1
# Bury the block 100 deep so the coinbase output is spendable
- for i in range(100):
+ for _ in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
@@ -149,7 +149,7 @@ class AssumeValidTest(BitcoinTestFramework):
height += 1
# Bury the assumed valid block 2100 deep
- for i in range(2100):
+ for _ in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
index 19cdc10935..1253c45418 100755
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -141,7 +141,7 @@ class BIP68Test(BitcoinTestFramework):
# some of those inputs to be sequence locked (and randomly choose
# between height/time locking). Small random chance of making the locks
# all pass.
- for i in range(400):
+ for _ in range(400):
# Randomly choose up to 10 inputs
num_inputs = random.randint(1, 10)
random.shuffle(utxos)
@@ -260,7 +260,7 @@ class BIP68Test(BitcoinTestFramework):
# Use prioritisetransaction to lower the effective feerate to 0
self.nodes[0].prioritisetransaction(txid=tx2.hash, fee_delta=int(-self.relayfee*COIN))
cur_time = int(time.time())
- for i in range(10):
+ for _ in range(10):
self.nodes[0].setmocktime(cur_time + 600)
self.nodes[0].generate(1)
cur_time += 600
diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py
index 6619d83dc4..c74761869b 100755
--- a/test/functional/feature_block.py
+++ b/test/functional/feature_block.py
@@ -125,7 +125,7 @@ class FullBlockTest(BitcoinTestFramework):
# collect spendable outputs now to avoid cluttering the code later on
out = []
- for i in range(NUM_OUTPUTS_TO_COLLECT):
+ for _ in range(NUM_OUTPUTS_TO_COLLECT):
out.append(self.get_spendable_output())
# Start by building a couple of blocks on top (which output is spent is
diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py
index c6852ef017..dfb3683143 100755
--- a/test/functional/feature_csv_activation.py
+++ b/test/functional/feature_csv_activation.py
@@ -161,7 +161,7 @@ class BIP68_112_113Test(BitcoinTestFramework):
def generate_blocks(self, number):
test_blocks = []
- for i in range(number):
+ for _ in range(number):
block = self.create_test_block([])
test_blocks.append(block)
self.last_block_time += 600
@@ -209,22 +209,22 @@ class BIP68_112_113Test(BitcoinTestFramework):
# Note we reuse inputs for v1 and v2 txs so must test these separately
# 16 normal inputs
bip68inputs = []
- for i in range(16):
+ for _ in range(16):
bip68inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
# 2 sets of 16 inputs with 10 OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112basicinputs = []
- for j in range(2):
+ for _ in range(2):
inputs = []
- for i in range(16):
+ for _ in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112basicinputs.append(inputs)
# 2 sets of 16 varied inputs with (relative_lock_time) OP_CSV OP_DROP (actually will be prepended to spending scriptSig)
bip112diverseinputs = []
- for j in range(2):
+ for _ in range(2):
inputs = []
- for i in range(16):
+ for _ in range(16):
inputs.append(send_generic_input_tx(self.nodes[0], self.coinbase_blocks, self.nodeaddress))
bip112diverseinputs.append(inputs)
diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py
index 7b38e09bf9..7a2e35c095 100755
--- a/test/functional/feature_dbcrash.py
+++ b/test/functional/feature_dbcrash.py
@@ -195,7 +195,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
while len(utxo_list) >= 2 and num_transactions < count:
tx = CTransaction()
input_amount = 0
- for i in range(2):
+ for _ in range(2):
utxo = utxo_list.pop()
tx.vin.append(CTxIn(COutPoint(int(utxo['txid'], 16), utxo['vout'])))
input_amount += int(utxo['amount'] * COIN)
@@ -205,7 +205,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework):
# Sanity check -- if we chose inputs that are too small, skip
continue
- for i in range(3):
+ for _ in range(3):
tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey'])))
# Sign and send the transaction to get into the mempool
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index 3cf0fb8f7b..702a1d9995 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -176,9 +176,9 @@ class EstimateFeeTest(BitcoinTestFramework):
# We shuffle our confirmed txout set before each set of transactions
# small_txpuzzle_randfee will use the transactions that have inputs already in the chain when possible
# resorting to tx's that depend on the mempool when those run out
- for i in range(numblocks):
+ for _ in range(numblocks):
random.shuffle(self.confutxo)
- for j in range(random.randrange(100 - 50, 100 + 50)):
+ for _ in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
(txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
self.memutxo, Decimal("0.005"), min_fee, min_fee)
@@ -243,7 +243,7 @@ class EstimateFeeTest(BitcoinTestFramework):
self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
- for i in range(2):
+ for _ in range(2):
self.log.info("Creating transactions and mining them with a block size that can't keep up")
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 5538d6d3b4..0dc2839191 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -104,7 +104,7 @@ class MaxUploadTest(BitcoinTestFramework):
assert_equal(len(self.nodes[0].getpeerinfo()), 3)
# At most a couple more tries should succeed (depending on how long
# the test has been running so far).
- for i in range(3):
+ for _ in range(3):
p2p_conns[0].send_message(getdata_request)
p2p_conns[0].wait_for_disconnect()
assert_equal(len(self.nodes[0].getpeerinfo()), 2)
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
index e46e5aacc8..02fa88f7c8 100755
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -147,7 +147,7 @@ class PruneTest(BitcoinTestFramework):
# Create stale blocks in manageable sized chunks
self.log.info("Mine 24 (stale) blocks on Node 1, followed by 25 (main chain) block reorg from Node 0, for 12 rounds")
- for j in range(12):
+ for _ in range(12):
# Disconnect node 0 so it can mine a longer reorg chain without knowing about node 1's soon-to-be-stale chain
# Node 2 stays connected, so it hears about the stale blocks and then reorg's when node0 reconnects
disconnect_nodes(self.nodes[0], 1)
diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py
index acf551ef69..1b531ad51d 100755
--- a/test/functional/feature_rbf.py
+++ b/test/functional/feature_rbf.py
@@ -376,7 +376,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1))
outputs = []
- for i in range(MAX_REPLACEMENT_LIMIT+1):
+ for _ in range(MAX_REPLACEMENT_LIMIT+1):
outputs.append(CTxOut(split_value, CScript([1])))
splitting_tx = CTransaction()
diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py
index 5195d20dcb..0842972779 100755
--- a/test/functional/feature_segwit.py
+++ b/test/functional/feature_segwit.py
@@ -126,11 +126,11 @@ class SegWitTest(BitcoinTestFramework):
assert_equal(bip173_ms_addr, script_to_p2wsh(multiscript))
p2sh_ids.append([])
wit_ids.append([])
- for v in range(2):
+ for _ in range(2):
p2sh_ids[i].append([])
wit_ids[i].append([])
- for i in range(5):
+ for _ in range(5):
for n in range(3):
for v in range(2):
wit_ids[n][v].append(send_to_witness(v, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[n], False, Decimal("49.999")))
diff --git a/test/functional/mempool_package_onemore.py b/test/functional/mempool_package_onemore.py
index 0739d7e29b..e956fe07d2 100755
--- a/test/functional/mempool_package_onemore.py
+++ b/test/functional/mempool_package_onemore.py
@@ -31,7 +31,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
for (txid, vout) in zip(parent_txids, vouts):
inputs.append({'txid' : txid, 'vout' : vout})
outputs = {}
- for i in range(num_outputs):
+ for _ in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs, 0, True)
signedtx = node.signrawtransactionwithwallet(rawtx)
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index 542d24f4be..98dac30ace 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -48,7 +48,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
send_value = satoshi_round((value - fee)/num_outputs)
inputs = [ {'txid' : parent_txid, 'vout' : vout} ]
outputs = {}
- for i in range(num_outputs):
+ for _ in range(num_outputs):
outputs[node.getnewaddress()] = send_value
rawtx = node.createrawtransaction(inputs, outputs)
signedtx = node.signrawtransactionwithwallet(rawtx)
@@ -70,7 +70,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
witness_chain = []
- for i in range(MAX_ANCESTORS):
+ for _ in range(MAX_ANCESTORS):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1)
value = sent_value
chain.append(txid)
@@ -245,7 +245,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Sign and send up to MAX_DESCENDANT transactions chained off the parent tx
chain = [] # save sent txs for the purpose of checking node1's mempool later (see below)
- for i in range(MAX_DESCENDANTS - 1):
+ for _ in range(MAX_DESCENDANTS - 1):
utxo = transaction_package.pop(0)
(txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10)
chain.append(txid)
@@ -312,7 +312,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
send_value = satoshi_round((value - fee)/2)
inputs = [ {'txid' : txid, 'vout' : vout} ]
outputs = {}
- for i in range(2):
+ for _ in range(2):
outputs[self.nodes[0].getnewaddress()] = send_value
rawtx = self.nodes[0].createrawtransaction(inputs, outputs)
signedtx = self.nodes[0].signrawtransactionwithwallet(rawtx)
@@ -326,7 +326,7 @@ class MempoolPackagesTest(BitcoinTestFramework):
# Create tx2-7
vout = 1
txid = tx0_id
- for i in range(6):
+ for _ in range(6):
(txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1)
vout = 0
value = sent_value
diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py
index 5d00648aed..85c4d6d570 100755
--- a/test/functional/mempool_persist.py
+++ b/test/functional/mempool_persist.py
@@ -62,7 +62,7 @@ class MempoolPersistTest(BitcoinTestFramework):
def run_test(self):
self.log.debug("Send 5 transactions from node2 (to its own address)")
tx_creation_time_lower = int(time.time())
- for i in range(5):
+ for _ in range(5):
last_txid = self.nodes[2].sendtoaddress(self.nodes[2].getnewaddress(), Decimal("10"))
node2_balance = self.nodes[2].getbalance()
self.sync_all()
diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py
index 8a703ef009..8baf974a0a 100755
--- a/test/functional/mempool_updatefromblock.py
+++ b/test/functional/mempool_updatefromblock.py
@@ -73,7 +73,7 @@ class MempoolUpdateFromBlockTest(BitcoinTestFramework):
n_outputs = size - tx_count
output_value = ((inputs_value - fee) / Decimal(n_outputs)).quantize(Decimal('0.00000001'))
outputs = {}
- for n in range(0, n_outputs):
+ for _ in range(n_outputs):
outputs[self.nodes[0].getnewaddress()] = output_value
else:
output_value = (inputs_value - fee).quantize(Decimal('0.00000001'))
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index 0b3738b572..225d393e1b 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -125,7 +125,7 @@ class CompactBlocksTest(BitcoinTestFramework):
out_value = total_value // 10
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(block.vtx[0].sha256, 0), b''))
- for i in range(10):
+ for _ in range(10):
tx.vout.append(CTxOut(out_value, CScript([OP_TRUE])))
tx.rehash()
@@ -266,7 +266,7 @@ class CompactBlocksTest(BitcoinTestFramework):
address = node.getnewaddress()
segwit_tx_generated = False
- for i in range(num_transactions):
+ for _ in range(num_transactions):
txid = node.sendtoaddress(address, 0.1)
hex_tx = node.gettransaction(txid)["hex"]
tx = FromHex(CTransaction(), hex_tx)
@@ -294,12 +294,11 @@ class CompactBlocksTest(BitcoinTestFramework):
block.rehash()
# Wait until the block was announced (via compact blocks)
- wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
+ wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
- assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
@@ -309,12 +308,11 @@ class CompactBlocksTest(BitcoinTestFramework):
inv = CInv(MSG_CMPCT_BLOCK, block_hash)
test_node.send_message(msg_getdata([inv]))
- wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
+ wait_until(lambda: "cmpctblock" in test_node.last_message, timeout=30, lock=mininode_lock)
# Now fetch and check the compact block
header_and_shortids = None
with mininode_lock:
- assert "cmpctblock" in test_node.last_message
# Convert the on-the-wire representation to absolute indexes
header_and_shortids = HeaderAndShortIDs(test_node.last_message["cmpctblock"].header_and_shortids)
self.check_compactblock_construction_from_block(version, header_and_shortids, block_hash, block)
@@ -418,7 +416,7 @@ class CompactBlocksTest(BitcoinTestFramework):
def build_block_with_transactions(self, node, utxo, num_transactions):
block = self.build_block_on_tip(node)
- for i in range(num_transactions):
+ for _ in range(num_transactions):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(utxo[0], utxo[1]), b''))
tx.vout.append(CTxOut(utxo[2] - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE])))
@@ -627,7 +625,7 @@ class CompactBlocksTest(BitcoinTestFramework):
# Test that requesting old compactblocks doesn't work.
MAX_CMPCTBLOCK_DEPTH = 5
new_blocks = []
- for i in range(MAX_CMPCTBLOCK_DEPTH + 1):
+ for _ in range(MAX_CMPCTBLOCK_DEPTH + 1):
test_node.clear_block_announcement()
new_blocks.append(node.generate(1)[0])
wait_until(test_node.received_block_announcement, timeout=30, lock=mininode_lock)
@@ -691,10 +689,9 @@ class CompactBlocksTest(BitcoinTestFramework):
node.submitblock(ToHex(block))
for l in listeners:
- wait_until(lambda: l.received_block_announcement(), timeout=30, lock=mininode_lock)
+ wait_until(lambda: "cmpctblock" in l.last_message, timeout=30, lock=mininode_lock)
with mininode_lock:
for l in listeners:
- assert "cmpctblock" in l.last_message
l.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
assert_equal(l.last_message["cmpctblock"].header_and_shortids.header.sha256, block.sha256)
diff --git a/test/functional/p2p_feefilter.py b/test/functional/p2p_feefilter.py
index 73afe9adc4..0b51d8f4bb 100755
--- a/test/functional/p2p_feefilter.py
+++ b/test/functional/p2p_feefilter.py
@@ -19,7 +19,7 @@ def hashToHex(hash):
# Wait up to 60 secs to see if the testnode has received all the expected invs
def allInvsMatch(invsExpected, testnode):
- for x in range(60):
+ for _ in range(60):
with mininode_lock:
if (sorted(invsExpected) == sorted(testnode.txinvs)):
return True
@@ -91,7 +91,7 @@ class FeeFilterTest(BitcoinTestFramework):
# Test that invs are received by test connection for all txs at
# feerate of .2 sat/byte
node1.settxfee(Decimal("0.00000200"))
- txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)]
assert allInvsMatch(txids, conn)
conn.clear_invs()
@@ -100,14 +100,14 @@ class FeeFilterTest(BitcoinTestFramework):
# Test that txs are still being received by test connection (paying .15 sat/byte)
node1.settxfee(Decimal("0.00000150"))
- txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)]
assert allInvsMatch(txids, conn)
conn.clear_invs()
# Change tx fee rate to .1 sat/byte and test they are no longer received
# by the test connection
node1.settxfee(Decimal("0.00000100"))
- [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)]
self.sync_mempools() # must be sure node 0 has received all txs
# Send one transaction from node0 that should be received, so that we
@@ -124,7 +124,7 @@ class FeeFilterTest(BitcoinTestFramework):
# Remove fee filter and check that txs are received again
conn.send_and_ping(msg_feefilter(0))
- txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)]
+ txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for _ in range(3)]
assert allInvsMatch(txids, conn)
conn.clear_invs()
diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py
index c8e2616b79..ce3856fc95 100755
--- a/test/functional/p2p_filter.py
+++ b/test/functional/p2p_filter.py
@@ -218,11 +218,6 @@ class FilterTest(BitcoinTestFramework):
# Add peer but do not send version yet
filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False)
# Send version with fRelay=False
- filter_peer_without_nrelay.wait_until(
- lambda: filter_peer_without_nrelay.is_connected,
- timeout=10,
- check_connected=False,
- )
version_without_fRelay = msg_version()
version_without_fRelay.nRelay = 0
filter_peer_without_nrelay.send_message(version_without_fRelay)
diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py
index fe6e236fc4..2fc5245241 100755
--- a/test/functional/p2p_leak.py
+++ b/test/functional/p2p_leak.py
@@ -63,16 +63,12 @@ class CLazyNode(P2PInterface):
def on_getblocktxn(self, message): self.bad_message(message)
def on_blocktxn(self, message): self.bad_message(message)
+
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionMisbehavior(CLazyNode):
- # Send enough veracks without a message to reach the peer discouragement
- # threshold. This should get us disconnected. NOTE: implementation-specific
- # test; update if our discouragement policy for peer misbehavior changes.
- def on_open(self):
- super().on_open()
- for _ in range(DISCOURAGEMENT_THRESHOLD):
- self.send_message(msg_verack())
+ pass
+
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
@@ -80,6 +76,7 @@ class CNodeNoVersionIdle(CLazyNode):
def __init__(self):
super().__init__()
+
# Node that sends a version but not a verack.
class CNodeNoVerackIdle(CLazyNode):
def __init__(self):
@@ -114,6 +111,11 @@ class P2PLeakTest(BitcoinTestFramework):
no_version_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVersionIdle(), send_version=False, wait_for_verack=False)
no_verack_idlenode = self.nodes[0].add_p2p_connection(CNodeNoVerackIdle(), wait_for_verack=False)
+ # Send enough veracks without a message to reach the peer discouragement
+ # threshold. This should get us disconnected.
+ for _ in range(DISCOURAGEMENT_THRESHOLD):
+ no_version_disconnect_node.send_message(msg_verack())
+
# Wait until we got the verack in response to the version. Though, don't wait for the other node to receive the
# verack, since we never sent one
no_verack_idlenode.wait_for_verack()
@@ -153,7 +155,6 @@ class P2PLeakTest(BitcoinTestFramework):
p2p_old_node = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
old_version_msg = msg_version()
old_version_msg.nVersion = 31799
- wait_until(lambda: p2p_old_node.is_connected)
with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
p2p_old_node.send_message(old_version_msg)
p2p_old_node.wait_for_disconnect()
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index 728212ca23..564e49f3d8 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -174,6 +174,9 @@ class TestP2PConn(P2PInterface):
self.last_wtxidrelay.append(message)
def announce_tx_and_wait_for_getdata(self, tx, timeout=60, success=True, use_wtxid=False):
+ if success:
+ # sanity check
+ assert (self.wtxidrelay and use_wtxid) or (not self.wtxidrelay and not use_wtxid)
with mininode_lock:
self.last_message.pop("getdata", None)
if use_wtxid:
@@ -259,6 +262,8 @@ class SegWitTest(BitcoinTestFramework):
self.old_node = self.nodes[0].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
self.std_node = self.nodes[1].add_p2p_connection(TestP2PConn(), services=NODE_NETWORK | NODE_WITNESS)
+ # self.std_wtx_node is for testing node1 with wtxid relay
+ self.std_wtx_node = self.nodes[1].add_p2p_connection(TestP2PConn(wtxidrelay=True), services=NODE_NETWORK | NODE_WITNESS)
assert self.test_node.nServices & NODE_WITNESS != 0
@@ -942,7 +947,7 @@ class SegWitTest(BitcoinTestFramework):
parent_tx = CTransaction()
parent_tx.vin.append(CTxIn(prevout, b""))
child_value = int(value / NUM_OUTPUTS)
- for i in range(NUM_OUTPUTS):
+ for _ in range(NUM_OUTPUTS):
parent_tx.vout.append(CTxOut(child_value, script_pubkey))
parent_tx.vout[0].nValue -= 50000
assert parent_tx.vout[0].nValue > 0
@@ -952,7 +957,7 @@ class SegWitTest(BitcoinTestFramework):
for i in range(NUM_OUTPUTS):
child_tx.vin.append(CTxIn(COutPoint(parent_tx.sha256, i), b""))
child_tx.vout = [CTxOut(value - 100000, CScript([OP_TRUE]))]
- for i in range(NUM_OUTPUTS):
+ for _ in range(NUM_OUTPUTS):
child_tx.wit.vtxinwit.append(CTxInWitness())
child_tx.wit.vtxinwit[-1].scriptWitness.stack = [b'a' * 195] * (2 * NUM_DROPS) + [witness_program]
child_tx.rehash()
@@ -1199,7 +1204,7 @@ class SegWitTest(BitcoinTestFramework):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
value = self.utxo[0].nValue
- for i in range(10):
+ for _ in range(10):
tx.vout.append(CTxOut(int(value / 10), script_pubkey))
tx.vout[0].nValue -= 1000
assert tx.vout[0].nValue >= 0
@@ -1319,9 +1324,14 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program2]
tx3.rehash()
- # Node will not be blinded to the transaction
+ # Node will not be blinded to the transaction, requesting it any number of times
+ # if it is being announced via txid relay.
+ # Node will be blinded to the transaction via wtxid, however.
self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True)
test_transaction_acceptance(self.nodes[1], self.std_node, tx3, True, False, 'tx-size')
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ self.std_wtx_node.announce_tx_and_wait_for_getdata(tx3, use_wtxid=True, success=False)
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue - 1000, CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))
@@ -1372,7 +1382,7 @@ class SegWitTest(BitcoinTestFramework):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
split_value = (self.utxo[0].nValue - 4000) // NUM_SEGWIT_VERSIONS
- for i in range(NUM_SEGWIT_VERSIONS):
+ for _ in range(NUM_SEGWIT_VERSIONS):
tx.vout.append(CTxOut(split_value, CScript([OP_TRUE])))
tx.rehash()
block = self.build_next_block()
@@ -1418,7 +1428,7 @@ class SegWitTest(BitcoinTestFramework):
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
- # Spend everything in temp_utxo back to an OP_TRUE output.
+ # Spend everything in temp_utxo into an segwit v1 output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
@@ -1426,8 +1436,16 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
- tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
+ tx3.vout.append(CTxOut(total_value - 1000, script_pubkey))
tx3.rehash()
+
+ # First we test this transaction against fRequireStandard=true node
+ # making sure the txid is added to the reject filter
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs")
+ # Now the node will no longer ask for getdata of this transaction when advertised by same txid
+ self.std_node.announce_tx_and_wait_for_getdata(tx3, timeout=5, success=False)
+
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
@@ -1646,7 +1664,7 @@ class SegWitTest(BitcoinTestFramework):
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(prev_utxo.sha256, prev_utxo.n), b""))
split_value = prev_utxo.nValue // NUM_SIGHASH_TESTS
- for i in range(NUM_SIGHASH_TESTS):
+ for _ in range(NUM_SIGHASH_TESTS):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.wit.vtxinwit.append(CTxInWitness())
sign_p2pk_witness_input(witness_program, tx, 0, SIGHASH_ALL, prev_utxo.nValue, key)
@@ -1676,7 +1694,7 @@ class SegWitTest(BitcoinTestFramework):
tx.wit.vtxinwit.append(CTxInWitness())
total_value += temp_utxos[i].nValue
split_value = total_value // num_outputs
- for i in range(num_outputs):
+ for _ in range(num_outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
for i in range(num_inputs):
# Now try to sign each input, using a random hashtype.
@@ -1974,7 +1992,7 @@ class SegWitTest(BitcoinTestFramework):
split_value = self.utxo[0].nValue // outputs
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b""))
- for i in range(outputs):
+ for _ in range(outputs):
tx.vout.append(CTxOut(split_value, script_pubkey))
tx.vout[-2].scriptPubKey = script_pubkey_toomany
tx.vout[-1].scriptPubKey = script_pubkey_justright
@@ -2060,7 +2078,7 @@ class SegWitTest(BitcoinTestFramework):
if (len(tx.wit.vtxinwit) != len(tx.vin)):
# vtxinwit must have the same length as vin
tx.wit.vtxinwit = tx.wit.vtxinwit[:len(tx.vin)]
- for i in range(len(tx.wit.vtxinwit), len(tx.vin)):
+ for _ in range(len(tx.wit.vtxinwit), len(tx.vin)):
tx.wit.vtxinwit.append(CTxInWitness())
r += tx.wit.serialize()
r += struct.pack("<I", tx.nLockTime)
diff --git a/test/functional/p2p_sendheaders.py b/test/functional/p2p_sendheaders.py
index 481b1c1841..126a46bd53 100755
--- a/test/functional/p2p_sendheaders.py
+++ b/test/functional/p2p_sendheaders.py
@@ -328,7 +328,7 @@ class SendHeadersTest(BitcoinTestFramework):
for j in range(2):
self.log.debug("Part 2.{}.{}: starting...".format(i, j))
blocks = []
- for b in range(i + 1):
+ for _ in range(i + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -443,7 +443,7 @@ class SendHeadersTest(BitcoinTestFramework):
# Create 2 blocks. Send the blocks, then send the headers.
blocks = []
- for b in range(2):
+ for _ in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -461,7 +461,7 @@ class SendHeadersTest(BitcoinTestFramework):
# This time, direct fetch should work
blocks = []
- for b in range(3):
+ for _ in range(3):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -482,7 +482,7 @@ class SendHeadersTest(BitcoinTestFramework):
blocks = []
# Create extra blocks for later
- for b in range(20):
+ for _ in range(20):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -529,7 +529,7 @@ class SendHeadersTest(BitcoinTestFramework):
test_node.last_message.pop("getdata", None)
blocks = []
# Create two more blocks.
- for j in range(2):
+ for _ in range(2):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
@@ -550,7 +550,7 @@ class SendHeadersTest(BitcoinTestFramework):
# Now we test that if we repeatedly don't send connecting headers, we
# don't go into an infinite loop trying to get them to connect.
MAX_UNCONNECTING_HEADERS = 10
- for j in range(MAX_UNCONNECTING_HEADERS + 1):
+ for _ in range(MAX_UNCONNECTING_HEADERS + 1):
blocks.append(create_block(tip, create_coinbase(height), block_time))
blocks[-1].solve()
tip = blocks[-1].sha256
diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py
index 2527edc135..3ea1c6e5e7 100755
--- a/test/functional/p2p_tx_download.py
+++ b/test/functional/p2p_tx_download.py
@@ -162,7 +162,7 @@ class TxDownloadTest(BitcoinTestFramework):
# Setup the p2p connections
self.peers = []
for node in self.nodes:
- for i in range(NUM_INBOUND):
+ for _ in range(NUM_INBOUND):
self.peers.append(node.add_p2p_connection(TestP2PConn()))
self.log.info("Nodes are setup with {} incoming connections each".format(NUM_INBOUND))
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index 57c8f511ac..2a0971b808 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -554,7 +554,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[1].generate(1)
self.sync_all()
- for i in range(0,20):
+ for _ in range(20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
@@ -582,7 +582,7 @@ class RawTransactionsTest(BitcoinTestFramework):
self.nodes[1].generate(1)
self.sync_all()
- for i in range(0,20):
+ for _ in range(20):
self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 0.01)
self.nodes[0].generate(1)
self.sync_all()
diff --git a/test/functional/rpc_generateblock.py b/test/functional/rpc_generateblock.py
index aa58c0af9d..08ff0fba50 100755
--- a/test/functional/rpc_generateblock.py
+++ b/test/functional/rpc_generateblock.py
@@ -55,7 +55,7 @@ class GenerateBlockTest(BitcoinTestFramework):
node.generatetoaddress(110, address)
# Generate some extra mempool transactions to verify they don't get mined
- for i in range(10):
+ for _ in range(10):
node.sendtoaddress(address, 0.001)
self.log.info('Generate block with txid')
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 4d985dd1b1..f7f23bc8f4 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -430,7 +430,7 @@ class PSBTTest(BitcoinTestFramework):
# Check that joining shuffles the inputs and outputs
# 10 attempts should be enough to get a shuffled join
shuffled = False
- for i in range(0, 10):
+ for _ in range(10):
shuffled_joined = self.nodes[0].joinpsbts([psbt, psbt2])
shuffled |= joined != shuffled_joined
if shuffled:
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index 2462a9a6db..da956a94de 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -111,7 +111,7 @@ def deser_uint256(f):
def ser_uint256(u):
rs = b""
- for i in range(8):
+ for _ in range(8):
rs += struct.pack("<I", u & 0xFFFFFFFF)
u >>= 32
return rs
@@ -134,7 +134,7 @@ def uint256_from_compact(c):
def deser_vector(f, c):
nit = deser_compact_size(f)
r = []
- for i in range(nit):
+ for _ in range(nit):
t = c()
t.deserialize(f)
r.append(t)
@@ -157,7 +157,7 @@ def ser_vector(l, ser_function_name=None):
def deser_uint256_vector(f):
nit = deser_compact_size(f)
r = []
- for i in range(nit):
+ for _ in range(nit):
t = deser_uint256(f)
r.append(t)
return r
@@ -173,7 +173,7 @@ def ser_uint256_vector(l):
def deser_string_vector(f):
nit = deser_compact_size(f)
r = []
- for i in range(nit):
+ for _ in range(nit):
t = deser_string(f)
r.append(t)
return r
@@ -467,7 +467,7 @@ class CTransaction:
else:
self.vout = deser_vector(f, CTxOut)
if flags != 0:
- self.wit.vtxinwit = [CTxInWitness() for i in range(len(self.vin))]
+ self.wit.vtxinwit = [CTxInWitness() for _ in range(len(self.vin))]
self.wit.deserialize(f)
else:
self.wit = CTxWitness()
@@ -500,7 +500,7 @@ class CTransaction:
if (len(self.wit.vtxinwit) != len(self.vin)):
# vtxinwit must have the same length as vin
self.wit.vtxinwit = self.wit.vtxinwit[:len(self.vin)]
- for i in range(len(self.wit.vtxinwit), len(self.vin)):
+ for _ in range(len(self.wit.vtxinwit), len(self.vin)):
self.wit.vtxinwit.append(CTxInWitness())
r += self.wit.serialize()
r += struct.pack("<I", self.nLockTime)
@@ -735,7 +735,7 @@ class P2PHeaderAndShortIDs:
self.header.deserialize(f)
self.nonce = struct.unpack("<Q", f.read(8))[0]
self.shortids_length = deser_compact_size(f)
- for i in range(self.shortids_length):
+ for _ in range(self.shortids_length):
# shortids are defined to be 6 bytes in the spec, so append
# two zero bytes and read it in as an 8-byte number
self.shortids.append(struct.unpack("<Q", f.read(6) + b'\x00\x00')[0])
@@ -852,7 +852,7 @@ class BlockTransactionsRequest:
def deserialize(self, f):
self.blockhash = deser_uint256(f)
indexes_length = deser_compact_size(f)
- for i in range(indexes_length):
+ for _ in range(indexes_length):
self.indexes.append(deser_compact_size(f))
def serialize(self):
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index 53d6e474d9..eaf637fbb8 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -474,7 +474,7 @@ class P2PInterface(P2PConnection):
def test_function():
return "verack" in self.last_message
- self.wait_until(test_function, timeout=timeout, check_connected=False)
+ self.wait_until(test_function, timeout=timeout)
# Message sending helper functions
diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py
index cc5f8307d3..5e35ba0fce 100644
--- a/test/functional/test_framework/script.py
+++ b/test/functional/test_framework/script.py
@@ -646,7 +646,7 @@ def LegacySignatureHash(script, txTo, inIdx, hashtype):
tmp = txtmp.vout[outIdx]
txtmp.vout = []
- for i in range(outIdx):
+ for _ in range(outIdx):
txtmp.vout.append(CTxOut(-1))
txtmp.vout.append(tmp)
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 66bb2c89b5..8f0d45c7f9 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -524,6 +524,7 @@ class TestNode():
p2p_conn.peer_connect(**kwargs, net=self.chain, timeout_factor=self.timeout_factor)()
self.p2ps.append(p2p_conn)
+ p2p_conn.wait_until(lambda: p2p_conn.is_connected, check_connected=False)
if wait_for_verack:
# Wait for the node to send us the version and verack
p2p_conn.wait_for_verack()
@@ -637,7 +638,7 @@ class TestNodeCLI():
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
- except json.JSONDecodeError:
+ except (json.JSONDecodeError, decimal.InvalidOperation):
return cli_stdout.rstrip("\n")
class RPCOverloadWrapper():
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index 506057f1fa..3362b41209 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -529,7 +529,7 @@ def create_confirmed_utxos(fee, node, count):
addr2 = node.getnewaddress()
if iterations <= 0:
return utxos
- for i in range(iterations):
+ for _ in range(iterations):
t = utxos.pop()
inputs = []
inputs.append({"txid": t["txid"], "vout": t["vout"]})
@@ -556,7 +556,7 @@ def gen_return_txouts():
# So we have big transactions (and therefore can't fit very many into each block)
# create one script_pubkey
script_pubkey = "6a4d0200" # OP_RETURN OP_PUSH2 512 bytes
- for i in range(512):
+ for _ in range(512):
script_pubkey = script_pubkey + "01"
# concatenate 128 txouts of above script_pubkey which we'll insert before the txout for change
txouts = []
@@ -564,7 +564,7 @@ def gen_return_txouts():
txout = CTxOut()
txout.nValue = 0
txout.scriptPubKey = hex_str_to_bytes(script_pubkey)
- for k in range(128):
+ for _ in range(128):
txouts.append(txout)
return txouts
diff --git a/test/functional/wallet_backup.py b/test/functional/wallet_backup.py
index 9dd91b2495..4766355335 100755
--- a/test/functional/wallet_backup.py
+++ b/test/functional/wallet_backup.py
@@ -129,7 +129,7 @@ class WalletBackupTest(BitcoinTestFramework):
self.log.info("Creating transactions")
# Five rounds of sending each other transactions.
- for i in range(5):
+ for _ in range(5):
self.do_one_round()
self.log.info("Backing up")
@@ -142,7 +142,7 @@ class WalletBackupTest(BitcoinTestFramework):
self.nodes[2].dumpwallet(os.path.join(self.nodes[2].datadir, 'wallet.dump'))
self.log.info("More transactions")
- for i in range(5):
+ for _ in range(5):
self.do_one_round()
# Generate 101 more blocks, so any fees paid mature
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index 81382d94ad..d9a8b58a84 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -569,7 +569,7 @@ class WalletTest(BitcoinTestFramework):
# So we should be able to generate exactly chainlimit txs for each original output
sending_addr = self.nodes[1].getnewaddress()
txid_list = []
- for i in range(chainlimit * 2):
+ for _ in range(chainlimit * 2):
txid_list.append(self.nodes[0].sendtoaddress(sending_addr, Decimal('0.0001')))
assert_equal(self.nodes[0].getmempoolinfo()['size'], chainlimit * 2)
assert_equal(len(txid_list), chainlimit * 2)
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index 72c85b8832..0ef78b0e1c 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -62,7 +62,7 @@ class BumpFeeTest(BitcoinTestFramework):
self.log.info("Mining blocks...")
peer_node.generate(110)
self.sync_all()
- for i in range(25):
+ for _ in range(25):
peer_node.sendtoaddress(rbf_node_address, 0.001)
self.sync_all()
peer_node.generate(1)
diff --git a/test/functional/wallet_create_tx.py b/test/functional/wallet_create_tx.py
index 330de8b0fc..ed9159726a 100755
--- a/test/functional/wallet_create_tx.py
+++ b/test/functional/wallet_create_tx.py
@@ -45,7 +45,7 @@ class CreateTxWalletTest(BitcoinTestFramework):
def test_tx_size_too_large(self):
# More than 10kB of outputs, so that we hit -maxtxfee with a high feerate
- outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for i in range(400)}
+ outputs = {self.nodes[0].getnewaddress(address_type='bech32'): 0.000025 for _ in range(400)}
raw_tx = self.nodes[0].createrawtransaction(inputs=[], outputs=outputs)
for fee_setting in ['-minrelaytxfee=0.01', '-mintxfee=0.01', '-paytxfee=0.01']:
diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py
index 289ccf43ec..9c63e8f7d3 100755
--- a/test/functional/wallet_descriptor.py
+++ b/test/functional/wallet_descriptor.py
@@ -107,7 +107,7 @@ class WalletDescriptorTest(BitcoinTestFramework):
assert_equal(info2['desc'], info3['desc'])
self.log.info("Test that getnewaddress still works after keypool is exhausted in an encrypted wallet")
- for i in range(0, 500):
+ for _ in range(500):
send_wrpc.getnewaddress()
self.log.info("Test that unlock is needed when deriving only hardened keys in an encrypted wallet")
@@ -120,7 +120,7 @@ class WalletDescriptorTest(BitcoinTestFramework):
}])
send_wrpc.walletlock()
# Exhaust keypool of 100
- for i in range(0, 100):
+ for _ in range(100):
send_wrpc.getnewaddress(address_type='bech32')
# This should now error
assert_raises_rpc_error(-12, "Keypool ran out, please call keypoolrefill first", send_wrpc.getnewaddress, '', 'bech32')
diff --git a/test/functional/wallet_dump.py b/test/functional/wallet_dump.py
index 6bfb468823..06f01ef191 100755
--- a/test/functional/wallet_dump.py
+++ b/test/functional/wallet_dump.py
@@ -116,7 +116,7 @@ class WalletDumpTest(BitcoinTestFramework):
test_addr_count = 10
addrs = []
for address_type in ['legacy', 'p2sh-segwit', 'bech32']:
- for i in range(0, test_addr_count):
+ for _ in range(test_addr_count):
addr = self.nodes[0].getnewaddress(address_type=address_type)
vaddr = self.nodes[0].getaddressinfo(addr) # required to get hd keypath
addrs.append(vaddr)
diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py
index 9dd55b4ab1..b6fe295127 100755
--- a/test/functional/wallet_groups.py
+++ b/test/functional/wallet_groups.py
@@ -27,8 +27,8 @@ class WalletGroupTest(BitcoinTestFramework):
self.nodes[0].generate(110)
# Get some addresses from the two nodes
- addr1 = [self.nodes[1].getnewaddress() for i in range(3)]
- addr2 = [self.nodes[2].getnewaddress() for i in range(3)]
+ addr1 = [self.nodes[1].getnewaddress() for _ in range(3)]
+ addr2 = [self.nodes[2].getnewaddress() for _ in range(3)]
addrs = addr1 + addr2
# Send 1 + 0.5 coin to each address
@@ -71,7 +71,7 @@ class WalletGroupTest(BitcoinTestFramework):
# Fill node2's wallet with 10000 outputs corresponding to the same
# scriptPubKey
- for i in range(5):
+ for _ in range(5):
raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}])
tx = FromHex(CTransaction(), raw_tx)
tx.vin = []
diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py
index fb4a1f9792..cff59bd1c1 100755
--- a/test/functional/wallet_labels.py
+++ b/test/functional/wallet_labels.py
@@ -118,7 +118,7 @@ class WalletLabelsTest(BitcoinTestFramework):
if not self.options.descriptors:
for label in labels:
addresses = []
- for x in range(10):
+ for _ in range(10):
addresses.append(node.getnewaddress())
multisig_address = node.addmultisigaddress(5, addresses, label.name)['address']
label.add_address(multisig_address)
diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py
index a54396cad3..1872545cdb 100755
--- a/test/functional/wallet_multiwallet.py
+++ b/test/functional/wallet_multiwallet.py
@@ -26,7 +26,7 @@ FEATURE_LATEST = 169900
got_loading_error = False
def test_load_unload(node, name):
global got_loading_error
- for i in range(10):
+ for _ in range(10):
if got_loading_error:
return
try:
diff --git a/test/lint/lint-git-commit-check.sh b/test/lint/lint-git-commit-check.sh
index 7cffd267dd..8947f67bf6 100755
--- a/test/lint/lint-git-commit-check.sh
+++ b/test/lint/lint-git-commit-check.sh
@@ -14,21 +14,22 @@ while getopts "?" opt; do
case $opt in
?)
echo "Usage: $0 [N]"
- echo " TRAVIS_COMMIT_RANGE='<commit range>' $0"
+ echo " COMMIT_RANGE='<commit range>' $0"
echo " $0 -?"
echo "Checks unmerged commits, the previous N commits, or a commit range."
- echo "TRAVIS_COMMIT_RANGE='47ba2c3...ee50c9e' $0"
+ echo "COMMIT_RANGE='47ba2c3...ee50c9e' $0"
exit ${EXIT_CODE}
;;
esac
done
-if [ -z "${TRAVIS_COMMIT_RANGE}" ]; then
- if [ -n "$1" ]; then
- TRAVIS_COMMIT_RANGE="HEAD~$1...HEAD"
- else
- TRAVIS_COMMIT_RANGE="origin/master..HEAD"
- fi
+if [ -z "${COMMIT_RANGE}" ]; then
+ if [ -n "$1" ]; then
+ COMMIT_RANGE="HEAD~$1...HEAD"
+ else
+ MERGE_BASE=$(git merge-base HEAD master)
+ COMMIT_RANGE="$MERGE_BASE..HEAD"
+ fi
fi
while IFS= read -r commit_hash || [[ -n "$commit_hash" ]]; do
@@ -41,6 +42,6 @@ while IFS= read -r commit_hash || [[ -n "$commit_hash" ]]; do
EXIT_CODE=1
fi
done < <(git log --format=%B -n 1 "$commit_hash")
-done < <(git log "${TRAVIS_COMMIT_RANGE}" --format=%H)
+done < <(git log "${COMMIT_RANGE}" --format=%H)
exit ${EXIT_CODE}
diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh
index 611bd4a8c4..fde77aea2d 100755
--- a/test/lint/lint-includes.sh
+++ b/test/lint/lint-includes.sh
@@ -63,6 +63,7 @@ EXPECTED_BOOST_INCLUDES=(
boost/optional.hpp
boost/preprocessor/cat.hpp
boost/preprocessor/stringize.hpp
+ boost/process.hpp
boost/signals2/connection.hpp
boost/signals2/optional_last_value.hpp
boost/signals2/signal.hpp