aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml2
-rw-r--r--CONTRIBUTING.md2
-rw-r--r--build-aux/m4/ax_boost_process.m4121
-rw-r--r--build_msvc/bitcoin_config.h3
-rw-r--r--build_msvc/vcpkg-packages.txt2
-rw-r--r--ci/test/00_setup_env_arm.sh2
-rw-r--r--ci/test/00_setup_env_i686_centos.sh2
-rw-r--r--ci/test/00_setup_env_mac.sh2
-rw-r--r--ci/test/00_setup_env_mac_host.sh2
-rw-r--r--ci/test/00_setup_env_native_asan.sh2
-rw-r--r--ci/test/00_setup_env_native_fuzz.sh2
-rw-r--r--ci/test/00_setup_env_native_multiprocess.sh2
-rw-r--r--ci/test/00_setup_env_native_nowallet.sh2
-rw-r--r--ci/test/00_setup_env_native_qt5.sh2
-rw-r--r--ci/test/00_setup_env_native_tsan.sh2
-rw-r--r--ci/test/00_setup_env_s390x.sh2
-rw-r--r--ci/test/00_setup_env_win64.sh2
-rw-r--r--configure.ac71
-rw-r--r--contrib/valgrind.supp1
-rw-r--r--depends/packages/boost.mk2
-rw-r--r--doc/Doxyfile.in2
-rw-r--r--doc/JSON-RPC-interface.md2
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/files.md2
-rw-r--r--doc/tor.md18
-rw-r--r--src/Makefile.test.include3
-rw-r--r--src/consensus/validation.h3
-rw-r--r--src/init.cpp4
-rw-r--r--src/interfaces/node.cpp74
-rw-r--r--src/interfaces/node.h6
-rw-r--r--src/net_processing.cpp35
-rw-r--r--src/policy/policy.cpp8
-rw-r--r--src/qt/bitcoin.cpp4
-rw-r--r--src/qt/forms/optionsdialog.ui4
-rw-r--r--src/qt/test/addressbooktests.cpp2
-rw-r--r--src/qt/test/test_main.cpp3
-rw-r--r--src/qt/test/wallettests.cpp3
-rw-r--r--src/sync.cpp22
-rw-r--r--src/sync.h14
-rw-r--r--src/test/sync_tests.cpp6
-rw-r--r--src/test/system_tests.cpp95
-rw-r--r--src/test/util/setup_common.cpp4
-rw-r--r--src/torcontrol.cpp4
-rw-r--r--src/util/system.cpp43
-rw-r--r--src/util/system.h12
-rw-r--r--src/validation.cpp5
-rw-r--r--src/wallet/rpcwallet.cpp2
-rwxr-xr-xtest/functional/p2p_filter.py6
-rwxr-xr-xtest/functional/p2p_ping.py21
-rwxr-xr-xtest/functional/p2p_segwit.py12
-rwxr-xr-xtest/functional/test_framework/mininode.py29
-rwxr-xr-xtest/functional/test_framework/test_node.py2
-rwxr-xr-xtest/lint/lint-git-commit-check.sh19
-rwxr-xr-xtest/lint/lint-includes.sh1
54 files changed, 504 insertions, 196 deletions
diff --git a/.travis.yml b/.travis.yml
index f9932cfaca..f1cee7133f 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -80,7 +80,7 @@ jobs:
QEMU_USER_CMD=""
- stage: test
- name: 'Win64 [GOAL: deploy] [unit tests, no gui, no functional tests]'
+ name: 'Win64 [GOAL: deploy] [unit tests, no gui, no boost::process, no functional tests]'
env: >-
FILE_ENV="./ci/test/00_setup_env_win64.sh"
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 11a0f4bac7..2e11474382 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -158,7 +158,7 @@ the pull request affects. Valid areas as:
Examples:
consensus: Add new opcode for BIP-XXXX OP_CHECKAWESOMESIG
- net: Automatically create hidden service, listen on Tor
+ net: Automatically create onion service, listen on Tor
qt: Add feed bump button
log: Fix typo in log message
diff --git a/build-aux/m4/ax_boost_process.m4 b/build-aux/m4/ax_boost_process.m4
new file mode 100644
index 0000000000..5d20e67464
--- /dev/null
+++ b/build-aux/m4/ax_boost_process.m4
@@ -0,0 +1,121 @@
+# ===========================================================================
+# https://www.gnu.org/software/autoconf-archive/ax_boost_process.html
+# ===========================================================================
+#
+# SYNOPSIS
+#
+# AX_BOOST_PROCESS
+#
+# DESCRIPTION
+#
+# Test for Process library from the Boost C++ libraries. The macro
+# requires a preceding call to AX_BOOST_BASE. Further documentation is
+# available at <http://randspringer.de/boost/index.html>.
+#
+# This macro calls:
+#
+# AC_SUBST(BOOST_PROCESS_LIB)
+#
+# And sets:
+#
+# HAVE_BOOST_PROCESS
+#
+# LICENSE
+#
+# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de>
+# Copyright (c) 2008 Michael Tindal
+# Copyright (c) 2008 Daniel Casimiro <dan.casimiro@gmail.com>
+#
+# Copying and distribution of this file, with or without modification, are
+# permitted in any medium without royalty provided the copyright notice
+# and this notice are preserved. This file is offered as-is, without any
+# warranty.
+
+#serial 2
+
+AC_DEFUN([AX_BOOST_PROCESS],
+[
+ AC_ARG_WITH([boost-process],
+ AS_HELP_STRING([--with-boost-process@<:@=special-lib@:>@],
+ [use the Process library from boost - it is possible to specify a certain library for the linker
+ e.g. --with-boost-process=boost_process-gcc-mt ]),
+ [
+ if test "$withval" = "no"; then
+ want_boost_process="no"
+ elif test "$withval" = "yes"; then
+ want_boost_process="yes"
+ ax_boost_user_process_lib=""
+ else
+ want_boost_process="yes"
+ ax_boost_user_process_lib="$withval"
+ fi
+ ],
+ [want_boost_process="yes"]
+ )
+
+ if test "x$want_boost_process" = "xyes"; then
+ AC_REQUIRE([AC_PROG_CC])
+ AC_REQUIRE([AC_CANONICAL_BUILD])
+ CPPFLAGS_SAVED="$CPPFLAGS"
+ CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
+ export CPPFLAGS
+
+ LDFLAGS_SAVED="$LDFLAGS"
+ LDFLAGS="$LDFLAGS $BOOST_LDFLAGS"
+ export LDFLAGS
+
+ AC_CACHE_CHECK(whether the Boost::Process library is available,
+ ax_cv_boost_process,
+ [AC_LANG_PUSH([C++])
+ CXXFLAGS_SAVE=$CXXFLAGS
+ CXXFLAGS=
+
+ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/process.hpp>]],
+ [[boost::process::child* child = new boost::process::child; delete child;]])],
+ ax_cv_boost_process=yes, ax_cv_boost_process=no)
+ CXXFLAGS=$CXXFLAGS_SAVE
+ AC_LANG_POP([C++])
+ ])
+ if test "x$ax_cv_boost_process" = "xyes"; then
+ AC_SUBST(BOOST_CPPFLAGS)
+
+ AC_DEFINE(HAVE_BOOST_PROCESS,,[define if the Boost::Process library is available])
+ BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'`
+
+ LDFLAGS_SAVE=$LDFLAGS
+ if test "x$ax_boost_user_process_lib" = "x"; then
+ for libextension in `ls -r $BOOSTLIBDIR/libboost_process* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do
+ ax_lib=${libextension}
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+ if test "x$link_process" != "xyes"; then
+ for libextension in `ls -r $BOOSTLIBDIR/boost_process* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do
+ ax_lib=${libextension}
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+ fi
+
+ else
+ for ax_lib in $ax_boost_user_process_lib boost_process-$ax_boost_user_process_lib; do
+ AC_CHECK_LIB($ax_lib, exit,
+ [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break],
+ [link_process="no"])
+ done
+
+ fi
+ if test "x$ax_lib" = "x"; then
+ AC_MSG_ERROR(Could not find a version of the Boost::Process library!)
+ fi
+ if test "x$link_process" = "xno"; then
+ AC_MSG_ERROR(Could not link against $ax_lib !)
+ fi
+ fi
+
+ CPPFLAGS="$CPPFLAGS_SAVED"
+ LDFLAGS="$LDFLAGS_SAVED"
+ fi
+])
diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h
index fbbe1a2156..9d0b50a0b4 100644
--- a/build_msvc/bitcoin_config.h
+++ b/build_msvc/bitcoin_config.h
@@ -47,6 +47,9 @@
/* define if the Boost::Filesystem library is available */
#define HAVE_BOOST_FILESYSTEM /**/
+/* define if the Boost::Process library is available */
+#define HAVE_BOOST_PROCESS /**/
+
/* define if the Boost::System library is available */
#define HAVE_BOOST_SYSTEM /**/
diff --git a/build_msvc/vcpkg-packages.txt b/build_msvc/vcpkg-packages.txt
index 307f295f08..edce8576c3 100644
--- a/build_msvc/vcpkg-packages.txt
+++ b/build_msvc/vcpkg-packages.txt
@@ -1 +1 @@
-berkeleydb boost-filesystem boost-multi-index boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion \ No newline at end of file
+berkeleydb boost-filesystem boost-multi-index boost-process boost-signals2 boost-test boost-thread libevent[thread] zeromq double-conversion
diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh
index b70a581532..2e445c126d 100644
--- a/ci/test/00_setup_env_arm.sh
+++ b/ci/test/00_setup_env_arm.sh
@@ -25,4 +25,4 @@ export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
# -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1"
# This could be removed once the ABI change warning does not show up by default
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror"
+export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-werror --with-boost-process"
diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh
index 5688799f9e..e58003ab19 100644
--- a/ci/test/00_setup_env_i686_centos.sh
+++ b/ci/test/00_setup_env_i686_centos.sh
@@ -11,5 +11,5 @@ export CONTAINER_NAME=ci_i686_centos_7
export DOCKER_NAME_TAG=centos:7
export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python36-zmq which patch lbzip2 dash"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports"
+export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports --with-boost-process"
export CONFIG_SHELL="/bin/dash"
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index 7ec66eeb4f..b62f1603f4 100644
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -14,4 +14,4 @@ export XCODE_BUILD_ID=11C505
export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export GOAL="deploy"
-export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror"
+export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process"
diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh
index b8a9ccaae5..5fb127b762 100644
--- a/ci/test/00_setup_env_mac_host.sh
+++ b/ci/test/00_setup_env_mac_host.sh
@@ -10,7 +10,7 @@ export HOST=x86_64-apple-darwin16
export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well)
export PIP_PACKAGES="zmq"
export GOAL="install"
-export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror"
+export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-werror --with-boost-process"
export NO_DEPENDS=1
export OSX_SDK=""
export CCACHE_SIZE=300M
diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh
index d57c673069..5995964f17 100644
--- a/ci/test/00_setup_env_native_asan.sh
+++ b/ci/test/00_setup_env_native_asan.sh
@@ -12,4 +12,4 @@ export DOCKER_NAME_TAG=ubuntu:20.04
export NO_DEPENDS=1
export TEST_RUNNER_EXTRA="--timeout-factor=4" # Increase timeout because sanitizers slow down
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++"
+export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++ --with-boost-process"
diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh
index 31f437f0e8..a32de4a6b5 100644
--- a/ci/test/00_setup_env_native_fuzz.sh
+++ b/ci/test/00_setup_env_native_fuzz.sh
@@ -14,5 +14,5 @@ export RUN_UNIT_TESTS=false
export RUN_FUNCTIONAL_TESTS=false
export RUN_FUZZ_TESTS=true
export GOAL="install"
-export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++"
+export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined CC=clang CXX=clang++ --with-boost-process"
export CCACHE_SIZE=200M
diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh
index 786f0f927f..522a5d9fc2 100644
--- a/ci/test/00_setup_env_native_multiprocess.sh
+++ b/ci/test/00_setup_env_native_multiprocess.sh
@@ -11,5 +11,5 @@ export DOCKER_NAME_TAG=ubuntu:20.04
export PACKAGES="cmake python3"
export DEP_OPTS="MULTIPROCESS=1"
export GOAL="install"
-export BITCOIN_CONFIG=""
+export BITCOIN_CONFIG="--with-boost-process"
export TEST_RUNNER_ENV="BITCOIND=bitcoin-node"
diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh
index 1a0b14b62b..0a09bfe230 100644
--- a/ci/test/00_setup_env_native_nowallet.sh
+++ b/ci/test/00_setup_env_native_nowallet.sh
@@ -11,4 +11,4 @@ export DOCKER_NAME_TAG=ubuntu:16.04 # Use xenial to have one config run the tes
export PACKAGES="python3-zmq clang-3.8 llvm-3.8" # Use clang-3.8 to test C++11 compatibility, see doc/dependencies.md
export DEP_OPTS="NO_WALLET=1"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8"
+export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-3.8 CXX=clang++-3.8 --with-boost-process"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index 6e2ff729a2..f9d869b4fd 100644
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -16,4 +16,4 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.1 v0.18.1 v0.19.1"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
+export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-c++17 --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process"
diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh
index 5695c43ec3..fc18483425 100644
--- a/ci/test/00_setup_env_native_tsan.sh
+++ b/ci/test/00_setup_env_native_tsan.sh
@@ -12,4 +12,4 @@ export PACKAGES="clang llvm libc++abi-dev libc++-dev python3-zmq"
export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'"
export TEST_RUNNER_EXTRA="--exclude feature_block --timeout-factor=4" # Increase timeout because sanitizers slow down. Low memory on Travis machines, exclude feature_block.
export GOAL="install"
-export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++'"
+export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++' --with-boost-process"
diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh
index c180d023de..fe330920d0 100644
--- a/ci/test/00_setup_env_s390x.sh
+++ b/ci/test/00_setup_env_s390x.sh
@@ -22,4 +22,4 @@ export DOCKER_NAME_TAG="debian:buster"
export RUN_UNIT_TESTS=true
export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
-export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb"
+export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb --with-boost-process"
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh
index eb8b870dd6..2b351dff6d 100644
--- a/ci/test/00_setup_env_win64.sh
+++ b/ci/test/00_setup_env_win64.sh
@@ -13,4 +13,4 @@ export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64"
export RUN_FUNCTIONAL_TESTS=false
export RUN_SECURITY_TESTS="true"
export GOAL="deploy"
-export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests"
+export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --without-boost-process"
diff --git a/configure.ac b/configure.ac
index f11f2b2059..2381c5dd08 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1180,9 +1180,9 @@ fi
if test x$use_boost = xyes; then
dnl Minimum required Boost version
-define(MINIMUM_REQUIRED_BOOST, 1.47.0)
+define(MINIMUM_REQUIRED_BOOST, 1.58.0)
-dnl Check for boost libs
+dnl Check for Boost libs
AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST])
if test x$want_boost = xno; then
AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]])
@@ -1191,30 +1191,15 @@ AX_BOOST_SYSTEM
AX_BOOST_FILESYSTEM
AX_BOOST_THREAD
+dnl Opt-in to boost-process
+AS_IF([ test x$with_boost_process != x ], [ AX_BOOST_PROCESS ], [ ax_cv_boost_process=no ] )
+
dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic
dnl counter implementations. In 1.63 and later the std::atomic approach is default.
m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro
BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS"
-if test x$use_reduce_exports = xyes; then
- AC_MSG_CHECKING([for working boost reduced exports])
- TEMP_CPPFLAGS="$CPPFLAGS"
- CPPFLAGS="$BOOST_CPPFLAGS $CPPFLAGS"
- AC_PREPROC_IFELSE([AC_LANG_PROGRAM([[
- @%:@include <boost/version.hpp>
- ]], [[
- #if BOOST_VERSION >= 104900
- // Everything is okay
- #else
- # error Boost version is too old
- #endif
- ]])],[
- AC_MSG_RESULT(yes)
- ],[
- AC_MSG_ERROR([boost versions < 1.49 are known to be broken with reduced exports. Use --disable-reduce-exports.])
- ])
- CPPFLAGS="$TEMP_CPPFLAGS"
-fi
+BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB"
fi
if test x$use_reduce_exports = xyes; then
@@ -1228,7 +1213,6 @@ if test x$use_tests = xyes; then
AC_MSG_ERROR(hexdump is required for tests)
fi
-
if test x$use_boost = xyes; then
AX_BOOST_UNIT_TEST_FRAMEWORK
@@ -1254,48 +1238,6 @@ if test x$use_tests = xyes; then
fi
fi
-if test x$use_boost = xyes; then
-
-BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB"
-
-
-dnl If boost (prior to 1.57) was built without c++11, it emulated scoped enums
-dnl using c++98 constructs. Unfortunately, this implementation detail leaked into
-dnl the abi. This was fixed in 1.57.
-
-dnl When building against that installed version using c++11, the headers pick up
-dnl on the native c++11 scoped enum support and enable it, however it will fail to
-dnl link. This can be worked around by disabling c++11 scoped enums if linking will
-dnl fail.
-dnl BOOST_NO_SCOPED_ENUMS was changed to BOOST_NO_CXX11_SCOPED_ENUMS in 1.51.
-
-TEMP_LIBS="$LIBS"
-LIBS="$BOOST_LIBS $LIBS"
-TEMP_CPPFLAGS="$CPPFLAGS"
-CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS"
-AC_MSG_CHECKING([for mismatched boost c++11 scoped enums])
-AC_LINK_IFELSE([AC_LANG_PROGRAM([[
- #include <boost/config.hpp>
- #include <boost/version.hpp>
- #if !defined(BOOST_NO_SCOPED_ENUMS) && !defined(BOOST_NO_CXX11_SCOPED_ENUMS) && BOOST_VERSION < 105700
- #define BOOST_NO_SCOPED_ENUMS
- #define BOOST_NO_CXX11_SCOPED_ENUMS
- #define CHECK
- #endif
- #include <boost/filesystem.hpp>
- ]],[[
- #if defined(CHECK)
- boost::filesystem::copy_file("foo", "bar");
- #else
- choke;
- #endif
- ]])],
- [AC_MSG_RESULT(mismatched); BOOST_CPPFLAGS="$BOOST_CPPFLAGS -DBOOST_NO_SCOPED_ENUMS -DBOOST_NO_CXX11_SCOPED_ENUMS"], [AC_MSG_RESULT(ok)])
-LIBS="$TEMP_LIBS"
-CPPFLAGS="$TEMP_CPPFLAGS"
-
-fi
-
dnl libevent check
if test x$build_bitcoin_cli$build_bitcoind$bitcoin_enable_qt$use_tests$use_bench != xnonononono; then
@@ -1683,6 +1625,7 @@ esac
echo
echo "Options used to compile and link:"
+echo " boost process = $ax_cv_boost_process"
echo " multiprocess = $build_multiprocess"
echo " with wallet = $enable_wallet"
echo " with gui / qt = $bitcoin_enable_qt"
diff --git a/contrib/valgrind.supp b/contrib/valgrind.supp
index d2652119b4..ece02dc24e 100644
--- a/contrib/valgrind.supp
+++ b/contrib/valgrind.supp
@@ -123,7 +123,6 @@
Memcheck:Cond
...
fun:_ZN5boost10filesystem6detail11unique_pathERKNS0_4pathEPNS_6system10error_codeE
- fun:unique_path
}
{
Suppress boost warning
diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk
index 3a7e605b4f..4f6b543aff 100644
--- a/depends/packages/boost.mk
+++ b/depends/packages/boost.mk
@@ -31,7 +31,9 @@ $(package)_cxxflags_linux=-fPIC
$(package)_cxxflags_android=-fPIC
endef
+# Fix unused variable in boost_process, can be removed after upgrading to 1.72
define $(package)_preprocess_cmds
+ sed -i.old "s/int ret_sig = 0;//" boost/process/detail/posix/wait_group.hpp && \
echo "using $($(package)_toolset_$(host_os)) : : $($(package)_cxx) : <cxxflags>\"$($(package)_cxxflags) $($(package)_cppflags)\" <linkflags>\"$($(package)_ldflags)\" <archiver>\"$($(package)_archiver_$(host_os))\" <striper>\"$(host_STRIP)\" <ranlib>\"$(host_RANLIB)\" <rc>\"$(host_WINDRES)\" : ;" > user-config.jam
endef
diff --git a/doc/Doxyfile.in b/doc/Doxyfile.in
index 7e307ab7c8..2f79168212 100644
--- a/doc/Doxyfile.in
+++ b/doc/Doxyfile.in
@@ -2073,7 +2073,7 @@ INCLUDE_FILE_PATTERNS =
# recursively expanded use the := operator instead of the = operator.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
-PREDEFINED =
+PREDEFINED = HAVE_BOOST_PROCESS
# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
# tag can be used to specify a list of macro names that should be expanded. The
diff --git a/doc/JSON-RPC-interface.md b/doc/JSON-RPC-interface.md
index a0cfe84a3e..40d8e330e2 100644
--- a/doc/JSON-RPC-interface.md
+++ b/doc/JSON-RPC-interface.md
@@ -60,7 +60,7 @@ RPC interface will be abused.
are sent as clear text that can be read by anyone on your network
path. Additionally, the RPC interface has not been hardened to
withstand arbitrary Internet traffic, so changing the above settings
- to expose it to the Internet (even using something like a Tor hidden
+ to expose it to the Internet (even using something like a Tor onion
service) could expose you to unconsidered vulnerabilities. See
`bitcoind -help` for more information about these settings and other
settings described in this document.
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 0cb5311e8b..92dea65309 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -6,7 +6,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| Dependency | Version used | Minimum required | CVEs | Shared | [Bundled Qt library](https://doc.qt.io/qt-5/configure-options.html#third-party-libraries) |
| --- | --- | --- | --- | --- | --- |
| Berkeley DB | [4.8.30](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.x | No | | |
-| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.47.0](https://github.com/bitcoin/bitcoin/pull/8920) | No | | |
+| Boost | [1.70.0](https://www.boost.org/users/download/) | [1.58.0](https://github.com/bitcoin/bitcoin/pull/19667) | No | | |
| Clang | | [3.3+](https://releases.llvm.org/download.html) (C++11 support) | | | |
| Expat | [2.2.7](https://libexpat.github.io/) | | No | Yes | |
| fontconfig | [2.12.1](https://www.freedesktop.org/software/fontconfig/release/) | | No | Yes | |
diff --git a/doc/files.md b/doc/files.md
index 5475826329..52e094a60b 100644
--- a/doc/files.md
+++ b/doc/files.md
@@ -56,7 +56,7 @@ Subdirectory | File(s) | Description
`./` | `fee_estimates.dat` | Stores statistics used to estimate minimum transaction fees and priorities required for confirmation
`./` | `guisettings.ini.bak` | Backup of former [GUI settings](#gui-settings) after `-resetguisettings` option is used
`./` | `mempool.dat` | Dump of the mempool's transactions
-`./` | `onion_private_key` | Cached Tor hidden service private key for `-listenonion` option
+`./` | `onion_private_key` | Cached Tor onion service private key for `-listenonion` option
`./` | `peers.dat` | Peer IP address database (custom format)
`./` | `settings.json` | Read-write settings set through GUI or RPC interfaces, augmenting manual settings from [bitcoin.conf](bitcoin-conf.md). File is created automatically if read-write settings storage is not disabled with `-nosettings` option. Path can be specified with `-settings` option
`./` | `.cookie` | Session RPC authentication cookie; if used, created at start and deleted on shutdown; can be specified by `-rpccookiefile` option
diff --git a/doc/tor.md b/doc/tor.md
index 2c54e32f84..17807856e5 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -1,6 +1,6 @@
# TOR SUPPORT IN BITCOIN
-It is possible to run Bitcoin Core as a Tor hidden service, and connect to such services.
+It is possible to run Bitcoin Core as a Tor onion service, and connect to such services.
The following directions assume you have a Tor proxy running on port 9050. Many distributions default to having a SOCKS proxy listening on port 9050, but others may not. In particular, the Tor Browser Bundle defaults to listening on port 9150. See [Tor Project FAQ:TBBSocksPort](https://www.torproject.org/docs/faq.html.en#TBBSocksPort) for how to properly
configure Tor.
@@ -14,12 +14,12 @@ outgoing connections, but more is possible.
-proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy
server will be used to try to reach .onion addresses as well.
- -onion=ip:port Set the proxy server to use for Tor hidden services. You do not
+ -onion=ip:port Set the proxy server to use for Tor onion services. You do not
need to set this if it's the same as -proxy. You can use -noonion
- to explicitly disable access to hidden services.
+ to explicitly disable access to onion services.
-listen When using -proxy, listening is disabled by default. If you want
- to run a hidden service (see next section), you'll need to enable
+ to run an onion service (see next section), you'll need to enable
it explicitly.
-connect=X When behind a Tor proxy, you can specify .onion addresses instead
@@ -94,11 +94,11 @@ for normal IPv4/IPv6 communication, use:
## 3. Automatically listen on Tor
Starting with Tor version 0.2.7.1 it is possible, through Tor's control socket
-API, to create and destroy 'ephemeral' hidden services programmatically.
+API, to create and destroy 'ephemeral' onion services programmatically.
Bitcoin Core has been updated to make use of this.
This means that if Tor is running (and proper authentication has been configured),
-Bitcoin Core automatically creates a hidden service to listen on. This will positively
+Bitcoin Core automatically creates an onion service to listen on. This will positively
affect the number of available .onion nodes.
This new feature is enabled by default if Bitcoin Core is listening (`-listen`), and
@@ -110,7 +110,7 @@ Connecting to Tor's control socket API requires one of two authentication method
configured. It also requires the control socket to be enabled, e.g. put `ControlPort 9051`
in `torrc` config file. For cookie authentication the user running bitcoind must have read
access to the `CookieAuthFile` specified in Tor configuration. In some cases this is
-preconfigured and the creation of a hidden service is automatic. If permission problems
+preconfigured and the creation of an onion service is automatic. If permission problems
are seen with `-debug=tor` they can be resolved by adding both the user running Tor and
the user running bitcoind to the same group and setting permissions appropriately. On
Debian-based systems the user running bitcoind can be added to the debian-tor group,
@@ -127,8 +127,8 @@ in the tor configuration file. The hashed password can be obtained with the comm
## 4. Privacy recommendations
-- Do not add anything but Bitcoin Core ports to the hidden service created in section 2.
- If you run a web service too, create a new hidden service for that.
+- Do not add anything but Bitcoin Core ports to the onion service created in section 2.
+ If you run a web service too, create a new onion service for that.
Otherwise it is trivial to link them, which may reduce privacy. Hidden
services created automatically (as in section 3) always have only one port
open.
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index b961f8d5b9..c3e46c0def 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -265,6 +265,7 @@ BITCOIN_TESTS =\
test/skiplist_tests.cpp \
test/streams_tests.cpp \
test/sync_tests.cpp \
+ test/system_tests.cpp \
test/util_threadnames_tests.cpp \
test/timedata_tests.cpp \
test/torcontrol_tests.cpp \
@@ -1207,7 +1208,7 @@ nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES)
$(BITCOIN_TESTS): $(GENERATED_TEST_FILES)
-CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log)
+CLEAN_BITCOIN_TEST = test/*.gcda test/*.gcno test/fuzz/*.gcda test/fuzz/*.gcno $(GENERATED_TEST_FILES) $(BITCOIN_TESTS:=.log)
CLEANFILES += $(CLEAN_BITCOIN_TEST)
diff --git a/src/consensus/validation.h b/src/consensus/validation.h
index 8de7a8f2d8..2a93a090d6 100644
--- a/src/consensus/validation.h
+++ b/src/consensus/validation.h
@@ -26,7 +26,8 @@ enum class TxValidationResult {
* is uninteresting.
*/
TX_RECENT_CONSENSUS_CHANGE,
- TX_NOT_STANDARD, //!< didn't meet our local policy rules
+ TX_INPUTS_NOT_STANDARD, //!< inputs (covered by txid) failed policy rules
+ TX_NOT_STANDARD, //!< otherwise didn't meet our local policy rules
TX_MISSING_INPUTS, //!< transaction was missing some of its inputs
TX_PREMATURE_SPEND, //!< transaction spends a coinbase too early, or violates locktime/sequence locks
/**
diff --git a/src/init.cpp b/src/init.cpp
index 6cca21f375..08944b79a5 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -442,13 +442,13 @@ void SetupServerArgs(NodeContext& node)
argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- argsman.AddArg("-listenonion", strprintf("Automatically create Tor hidden service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxconnections=<n>", strprintf("Maintain at most <n> connections to peers (default: %u)", DEFAULT_MAX_PEER_CONNECTIONS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxreceivebuffer=<n>", strprintf("Maximum per-connection receive buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXRECEIVEBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxsendbuffer=<n>", strprintf("Maximum per-connection send buffer, <n>*1000 bytes (default: %u)", DEFAULT_MAXSENDBUFFER), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
- argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor hidden services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
+ argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION);
diff --git a/src/interfaces/node.cpp b/src/interfaces/node.cpp
index 33f0dac263..969767b90f 100644
--- a/src/interfaces/node.cpp
+++ b/src/interfaces/node.cpp
@@ -56,6 +56,7 @@ namespace {
class NodeImpl : public Node
{
public:
+ NodeImpl(NodeContext* context) { setContext(context); }
void initError(const bilingual_str& message) override { InitError(message); }
bool parseParameters(int argc, const char* const argv[], std::string& error) override
{
@@ -81,13 +82,13 @@ public:
}
bool appInitMain() override
{
- m_context.chain = MakeChain(m_context);
- return AppInitMain(m_context_ref, m_context);
+ m_context->chain = MakeChain(*m_context);
+ return AppInitMain(m_context_ref, *m_context);
}
void appShutdown() override
{
- Interrupt(m_context);
- Shutdown(m_context);
+ Interrupt(*m_context);
+ Shutdown(*m_context);
}
void startShutdown() override
{
@@ -108,19 +109,19 @@ public:
StopMapPort();
}
}
- void setupServerArgs() override { return SetupServerArgs(m_context); }
+ void setupServerArgs() override { return SetupServerArgs(*m_context); }
bool getProxy(Network net, proxyType& proxy_info) override { return GetProxy(net, proxy_info); }
size_t getNodeCount(CConnman::NumConnections flags) override
{
- return m_context.connman ? m_context.connman->GetNodeCount(flags) : 0;
+ return m_context->connman ? m_context->connman->GetNodeCount(flags) : 0;
}
bool getNodesStats(NodesStats& stats) override
{
stats.clear();
- if (m_context.connman) {
+ if (m_context->connman) {
std::vector<CNodeStats> stats_temp;
- m_context.connman->GetNodeStats(stats_temp);
+ m_context->connman->GetNodeStats(stats_temp);
stats.reserve(stats_temp.size());
for (auto& node_stats_temp : stats_temp) {
@@ -141,46 +142,46 @@ public:
}
bool getBanned(banmap_t& banmap) override
{
- if (m_context.banman) {
- m_context.banman->GetBanned(banmap);
+ if (m_context->banman) {
+ m_context->banman->GetBanned(banmap);
return true;
}
return false;
}
bool ban(const CNetAddr& net_addr, int64_t ban_time_offset) override
{
- if (m_context.banman) {
- m_context.banman->Ban(net_addr, ban_time_offset);
+ if (m_context->banman) {
+ m_context->banman->Ban(net_addr, ban_time_offset);
return true;
}
return false;
}
bool unban(const CSubNet& ip) override
{
- if (m_context.banman) {
- m_context.banman->Unban(ip);
+ if (m_context->banman) {
+ m_context->banman->Unban(ip);
return true;
}
return false;
}
bool disconnectByAddress(const CNetAddr& net_addr) override
{
- if (m_context.connman) {
- return m_context.connman->DisconnectNode(net_addr);
+ if (m_context->connman) {
+ return m_context->connman->DisconnectNode(net_addr);
}
return false;
}
bool disconnectById(NodeId id) override
{
- if (m_context.connman) {
- return m_context.connman->DisconnectNode(id);
+ if (m_context->connman) {
+ return m_context->connman->DisconnectNode(id);
}
return false;
}
- int64_t getTotalBytesRecv() override { return m_context.connman ? m_context.connman->GetTotalBytesRecv() : 0; }
- int64_t getTotalBytesSent() override { return m_context.connman ? m_context.connman->GetTotalBytesSent() : 0; }
- size_t getMempoolSize() override { return m_context.mempool ? m_context.mempool->size() : 0; }
- size_t getMempoolDynamicUsage() override { return m_context.mempool ? m_context.mempool->DynamicMemoryUsage() : 0; }
+ int64_t getTotalBytesRecv() override { return m_context->connman ? m_context->connman->GetTotalBytesRecv() : 0; }
+ int64_t getTotalBytesSent() override { return m_context->connman ? m_context->connman->GetTotalBytesSent() : 0; }
+ size_t getMempoolSize() override { return m_context->mempool ? m_context->mempool->size() : 0; }
+ size_t getMempoolDynamicUsage() override { return m_context->mempool ? m_context->mempool->DynamicMemoryUsage() : 0; }
bool getHeaderTip(int& height, int64_t& block_time) override
{
LOCK(::cs_main);
@@ -223,11 +224,11 @@ public:
bool getImporting() override { return ::fImporting; }
void setNetworkActive(bool active) override
{
- if (m_context.connman) {
- m_context.connman->SetNetworkActive(active);
+ if (m_context->connman) {
+ m_context->connman->SetNetworkActive(active);
}
}
- bool getNetworkActive() override { return m_context.connman && m_context.connman->GetNetworkActive(); }
+ bool getNetworkActive() override { return m_context->connman && m_context->connman->GetNetworkActive(); }
CFeeRate estimateSmartFee(int num_blocks, bool conservative, int* returned_target = nullptr) override
{
FeeCalculation fee_calc;
@@ -269,7 +270,7 @@ public:
std::vector<std::unique_ptr<Wallet>> getWallets() override
{
std::vector<std::unique_ptr<Wallet>> wallets;
- for (auto& client : m_context.chain_clients) {
+ for (auto& client : m_context->chain_clients) {
auto client_wallets = client->getWallets();
std::move(client_wallets.begin(), client_wallets.end(), std::back_inserter(wallets));
}
@@ -277,12 +278,12 @@ public:
}
std::unique_ptr<Wallet> loadWallet(const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings) override
{
- return MakeWallet(LoadWallet(*m_context.chain, name, error, warnings));
+ return MakeWallet(LoadWallet(*m_context->chain, name, error, warnings));
}
std::unique_ptr<Wallet> createWallet(const SecureString& passphrase, uint64_t wallet_creation_flags, const std::string& name, bilingual_str& error, std::vector<bilingual_str>& warnings, WalletCreationStatus& status) override
{
std::shared_ptr<CWallet> wallet;
- status = CreateWallet(*m_context.chain, passphrase, wallet_creation_flags, name, error, warnings, wallet);
+ status = CreateWallet(*m_context->chain, passphrase, wallet_creation_flags, name, error, warnings, wallet);
return MakeWallet(wallet);
}
std::unique_ptr<Handler> handleInitMessage(InitMessageFn fn) override
@@ -336,13 +337,22 @@ public:
/* verification progress is unused when a header was received */ 0);
}));
}
- NodeContext* context() override { return &m_context; }
- NodeContext m_context;
- util::Ref m_context_ref{m_context};
+ NodeContext* context() override { return m_context; }
+ void setContext(NodeContext* context) override
+ {
+ m_context = context;
+ if (context) {
+ m_context_ref.Set(*context);
+ } else {
+ m_context_ref.Clear();
+ }
+ }
+ NodeContext* m_context{nullptr};
+ util::Ref m_context_ref;
};
} // namespace
-std::unique_ptr<Node> MakeNode() { return MakeUnique<NodeImpl>(); }
+std::unique_ptr<Node> MakeNode(NodeContext* context) { return MakeUnique<NodeImpl>(context); }
} // namespace interfaces
diff --git a/src/interfaces/node.h b/src/interfaces/node.h
index a9680c42b5..cd3cfe487d 100644
--- a/src/interfaces/node.h
+++ b/src/interfaces/node.h
@@ -268,12 +268,14 @@ public:
std::function<void(SynchronizationState, interfaces::BlockTip tip, double verification_progress)>;
virtual std::unique_ptr<Handler> handleNotifyHeaderTip(NotifyHeaderTipFn fn) = 0;
- //! Return pointer to internal chain interface, useful for testing.
+ //! Get and set internal node context. Useful for testing, but not
+ //! accessible across processes.
virtual NodeContext* context() { return nullptr; }
+ virtual void setContext(NodeContext* context) { }
};
//! Return implementation of Node interface.
-std::unique_ptr<Node> MakeNode();
+std::unique_ptr<Node> MakeNode(NodeContext* context = nullptr);
//! Block tip (could be a header or not, depends on the subscribed signal).
struct BlockTip {
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index c503a97be5..72c8e65c6b 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -187,7 +187,7 @@ namespace {
* million to make it highly unlikely for users to have issues with this
* filter.
*
- * We only need to add wtxids to this filter. For non-segwit
+ * We typically only add wtxids to this filter. For non-segwit
* transactions, the txid == wtxid, so this only prevents us from
* re-downloading non-segwit transactions when communicating with
* non-wtxidrelay peers -- which is important for avoiding malleation
@@ -196,6 +196,12 @@ namespace {
* the reject filter store wtxids is exactly what we want to avoid
* redownload of a rejected transaction.
*
+ * In cases where we can tell that a segwit transaction will fail
+ * validation no matter the witness, we may add the txid of such
+ * transaction to the filter as well. This can be helpful when
+ * communicating with txid-relay peers or if we were to otherwise fetch a
+ * transaction via txid (eg in our orphan handling).
+ *
* Memory used: 1.3 MB
*/
std::unique_ptr<CRollingBloomFilter> recentRejects GUARDED_BY(cs_main);
@@ -1161,6 +1167,7 @@ static bool MaybePunishNodeForTx(NodeId nodeid, const TxValidationState& state,
}
// Conflicting (but not necessarily invalid) data or different policy:
case TxValidationResult::TX_RECENT_CONSENSUS_CHANGE:
+ case TxValidationResult::TX_INPUTS_NOT_STANDARD:
case TxValidationResult::TX_NOT_STANDARD:
case TxValidationResult::TX_MISSING_INPUTS:
case TxValidationResult::TX_PREMATURE_SPEND:
@@ -2052,6 +2059,19 @@ void static ProcessOrphanTx(CConnman& connman, CTxMemPool& mempool, std::set<uin
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(orphanTx.GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (orphan_state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && orphanTx.GetWitnessHash() != orphanTx.GetHash()) {
+ // We only add the txid if it differs from the wtxid, to
+ // avoid wasting entries in the rolling bloom filter.
+ recentRejects->insert(orphanTx.GetHash());
+ }
}
EraseOrphanTx(orphanHash);
done = true;
@@ -2943,7 +2963,7 @@ void ProcessMessage(
// We do the AlreadyHave() check using wtxid, rather than txid - in the
// absence of witness malleation, this is strictly better, because the
- // recent rejects filter may contain the wtxid but will never contain
+ // recent rejects filter may contain the wtxid but rarely contains
// the txid of a segwit transaction that has been rejected.
// In the presence of witness malleation, it's possible that by only
// doing the check with wtxid, we could overlook a transaction which
@@ -3035,6 +3055,17 @@ void ProcessMessage(
// if we start doing this too early.
assert(recentRejects);
recentRejects->insert(tx.GetWitnessHash());
+ // If the transaction failed for TX_INPUTS_NOT_STANDARD,
+ // then we know that the witness was irrelevant to the policy
+ // failure, since this check depends only on the txid
+ // (the scriptPubKey being spent is covered by the txid).
+ // Add the txid to the reject filter to prevent repeated
+ // processing of this transaction in the event that child
+ // transactions are later received (resulting in
+ // parent-fetching by txid via the orphan-handling logic).
+ if (state.GetResult() == TxValidationResult::TX_INPUTS_NOT_STANDARD && tx.GetWitnessHash() != tx.GetHash()) {
+ recentRejects->insert(tx.GetHash());
+ }
if (RecursiveDynamicUsage(*ptx) < 100000) {
AddToCompactExtraTransactions(ptx);
}
diff --git a/src/policy/policy.cpp b/src/policy/policy.cpp
index c56abaf6c9..0e9820da1e 100644
--- a/src/policy/policy.cpp
+++ b/src/policy/policy.cpp
@@ -152,6 +152,8 @@ bool IsStandardTx(const CTransaction& tx, bool permit_bare_multisig, const CFeeR
* script can be anything; an attacker could use a very
* expensive-to-check-upon-redemption script like:
* DUP CHECKSIG DROP ... repeated 100 times... OP_1
+ *
+ * Note that only the non-witness portion of the transaction is checked here.
*/
bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
{
@@ -164,7 +166,11 @@ bool AreInputsStandard(const CTransaction& tx, const CCoinsViewCache& mapInputs)
std::vector<std::vector<unsigned char> > vSolutions;
TxoutType whichType = Solver(prev.scriptPubKey, vSolutions);
- if (whichType == TxoutType::NONSTANDARD) {
+ if (whichType == TxoutType::NONSTANDARD || whichType == TxoutType::WITNESS_UNKNOWN) {
+ // WITNESS_UNKNOWN failures are typically also caught with a policy
+ // flag in the script interpreter, but it can be helpful to catch
+ // this type of NONSTANDARD transaction earlier in transaction
+ // validation.
return false;
} else if (whichType == TxoutType::SCRIPTHASH) {
std::vector<std::vector<unsigned char> > stack;
diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp
index 523f5c429b..4f1e0056be 100644
--- a/src/qt/bitcoin.cpp
+++ b/src/qt/bitcoin.cpp
@@ -29,6 +29,7 @@
#include <interfaces/handler.h>
#include <interfaces/node.h>
+#include <node/context.h>
#include <noui.h>
#include <uint256.h>
#include <util/system.h>
@@ -430,7 +431,8 @@ int GuiMain(int argc, char* argv[])
SetupEnvironment();
util::ThreadSetInternalName("main");
- std::unique_ptr<interfaces::Node> node = interfaces::MakeNode();
+ NodeContext node_context;
+ std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context);
// Subscribe to global signals from core
std::unique_ptr<interfaces::Handler> handler_message_box = node->handleMessageBox(noui_ThreadSafeMessageBox);
diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui
index fea759dee0..0016fb9739 100644
--- a/src/qt/forms/optionsdialog.ui
+++ b/src/qt/forms/optionsdialog.ui
@@ -459,10 +459,10 @@
<item>
<widget class="QCheckBox" name="connectSocksTor">
<property name="toolTip">
- <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor hidden services.</string>
+ <string>Connect to the Bitcoin network through a separate SOCKS5 proxy for Tor onion services.</string>
</property>
<property name="text">
- <string>Use separate SOCKS&amp;5 proxy to reach peers via Tor hidden services:</string>
+ <string>Use separate SOCKS&amp;5 proxy to reach peers via Tor onion services:</string>
</property>
</widget>
</item>
diff --git a/src/qt/test/addressbooktests.cpp b/src/qt/test/addressbooktests.cpp
index 9347ff9e42..035c8196bc 100644
--- a/src/qt/test/addressbooktests.cpp
+++ b/src/qt/test/addressbooktests.cpp
@@ -18,6 +18,7 @@
#include <key.h>
#include <key_io.h>
#include <wallet/wallet.h>
+#include <walletinitinterface.h>
#include <QApplication>
#include <QTimer>
@@ -59,6 +60,7 @@ void EditAddressAndSubmit(
void TestAddAddressesToSendBook(interfaces::Node& node)
{
TestChain100Setup test;
+ node.setContext(&test.m_node);
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase());
wallet->SetupLegacyScriptPubKeyMan();
bool firstRun;
diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp
index 12efca2503..031913bd02 100644
--- a/src/qt/test/test_main.cpp
+++ b/src/qt/test/test_main.cpp
@@ -52,7 +52,8 @@ int main(int argc, char* argv[])
BasicTestingSetup dummy{CBaseChainParams::REGTEST};
}
- std::unique_ptr<interfaces::Node> node = interfaces::MakeNode();
+ NodeContext node_context;
+ std::unique_ptr<interfaces::Node> node = interfaces::MakeNode(&node_context);
bool fInvalid = false;
diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp
index 6648029bae..475fd589af 100644
--- a/src/qt/test/wallettests.cpp
+++ b/src/qt/test/wallettests.cpp
@@ -138,8 +138,7 @@ void TestGUI(interfaces::Node& node)
for (int i = 0; i < 5; ++i) {
test.CreateAndProcessBlock({}, GetScriptForRawPubKey(test.coinbaseKey.GetPubKey()));
}
- node.context()->connman = std::move(test.m_node.connman);
- node.context()->mempool = std::move(test.m_node.mempool);
+ node.setContext(&test.m_node);
std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(node.context()->chain.get(), WalletLocation(), CreateMockWalletDatabase());
bool firstRun;
wallet->LoadWallet(firstRun);
diff --git a/src/sync.cpp b/src/sync.cpp
index 10f0483189..4be13a3c48 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -149,12 +149,17 @@ static void push_lock(void* c, const CLockLocation& locklocation)
const LockPair p1 = std::make_pair(i.first, c);
if (lockdata.lockorders.count(p1))
continue;
- lockdata.lockorders.emplace(p1, lock_stack);
const LockPair p2 = std::make_pair(c, i.first);
+ if (lockdata.lockorders.count(p2)) {
+ auto lock_stack_copy = lock_stack;
+ lock_stack.pop_back();
+ potential_deadlock_detected(p1, lockdata.lockorders[p2], lock_stack_copy);
+ // potential_deadlock_detected() does not return.
+ }
+
+ lockdata.lockorders.emplace(p1, lock_stack);
lockdata.invlockorders.insert(p2);
- if (lockdata.lockorders.count(p2))
- potential_deadlock_detected(p1, lockdata.lockorders[p2], lockdata.lockorders[p1]);
}
}
@@ -259,6 +264,17 @@ void DeleteLock(void* cs)
}
}
+bool LockStackEmpty()
+{
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+ const auto it = lockdata.m_lock_stacks.find(std::this_thread::get_id());
+ if (it == lockdata.m_lock_stacks.end()) {
+ return true;
+ }
+ return it->second.empty();
+}
+
bool g_debug_lockorder_abort = true;
#endif /* DEBUG_LOCKORDER */
diff --git a/src/sync.h b/src/sync.h
index 77327d8bfe..05ff2ee8a9 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -56,6 +56,7 @@ template <typename MutexType>
void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs);
void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs);
void DeleteLock(void* cs);
+bool LockStackEmpty();
/**
* Call abort() if a potential lock order deadlock bug is detected, instead of
@@ -64,13 +65,14 @@ void DeleteLock(void* cs);
*/
extern bool g_debug_lockorder_abort;
#else
-void static inline EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
-void static inline LeaveCritical() {}
-void static inline CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
+inline void EnterCritical(const char* pszName, const char* pszFile, int nLine, void* cs, bool fTry = false) {}
+inline void LeaveCritical() {}
+inline void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line) {}
template <typename MutexType>
-void static inline AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
-void static inline AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
-void static inline DeleteLock(void* cs) {}
+inline void AssertLockHeldInternal(const char* pszName, const char* pszFile, int nLine, MutexType* cs) ASSERT_EXCLUSIVE_LOCK(cs) {}
+inline void AssertLockNotHeldInternal(const char* pszName, const char* pszFile, int nLine, void* cs) {}
+inline void DeleteLock(void* cs) {}
+inline bool LockStackEmpty() { return true; }
#endif
#define AssertLockHeld(cs) AssertLockHeldInternal(#cs, __FILE__, __LINE__, &cs)
#define AssertLockNotHeld(cs) AssertLockNotHeldInternal(#cs, __FILE__, __LINE__, &cs)
diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp
index 3ea8714f3a..19029ebd3c 100644
--- a/src/test/sync_tests.cpp
+++ b/src/test/sync_tests.cpp
@@ -14,6 +14,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
{
LOCK2(mutex1, mutex2);
}
+ BOOST_CHECK(LockStackEmpty());
bool error_thrown = false;
try {
LOCK2(mutex2, mutex1);
@@ -21,6 +22,7 @@ void TestPotentialDeadLockDetected(MutexType& mutex1, MutexType& mutex2)
BOOST_CHECK_EQUAL(e.what(), "potential deadlock detected: mutex1 -> mutex2 -> mutex1");
error_thrown = true;
}
+ BOOST_CHECK(LockStackEmpty());
#ifdef DEBUG_LOCKORDER
BOOST_CHECK(error_thrown);
#else
@@ -40,9 +42,13 @@ BOOST_AUTO_TEST_CASE(potential_deadlock_detected)
RecursiveMutex rmutex1, rmutex2;
TestPotentialDeadLockDetected(rmutex1, rmutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(rmutex1, rmutex2);
Mutex mutex1, mutex2;
TestPotentialDeadLockDetected(mutex1, mutex2);
+ // The second test ensures that lock tracking data have not been broken by exception.
+ TestPotentialDeadLockDetected(mutex1, mutex2);
#ifdef DEBUG_LOCKORDER
g_debug_lockorder_abort = prev;
diff --git a/src/test/system_tests.cpp b/src/test/system_tests.cpp
new file mode 100644
index 0000000000..a55145c738
--- /dev/null
+++ b/src/test/system_tests.cpp
@@ -0,0 +1,95 @@
+// Copyright (c) 2019 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+//
+#include <test/util/setup_common.h>
+#include <util/system.h>
+#include <univalue.h>
+
+#ifdef HAVE_BOOST_PROCESS
+#include <boost/process.hpp>
+#endif // HAVE_BOOST_PROCESS
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_FIXTURE_TEST_SUITE(system_tests, BasicTestingSetup)
+
+// At least one test is required (in case HAVE_BOOST_PROCESS is not defined).
+// Workaround for https://github.com/bitcoin/bitcoin/issues/19128
+BOOST_AUTO_TEST_CASE(dummy)
+{
+ BOOST_CHECK(true);
+}
+
+#ifdef HAVE_BOOST_PROCESS
+
+bool checkMessage(const std::runtime_error& ex)
+{
+ // On Linux & Mac: "No such file or directory"
+ // On Windows: "The system cannot find the file specified."
+ const std::string what(ex.what());
+ BOOST_CHECK(what.find("file") != std::string::npos);
+ return true;
+}
+
+bool checkMessageFalse(const std::runtime_error& ex)
+{
+ BOOST_CHECK_EQUAL(ex.what(), std::string("RunCommandParseJSON error: process(false) returned 1: \n"));
+ return true;
+}
+
+bool checkMessageStdErr(const std::runtime_error& ex)
+{
+ const std::string what(ex.what());
+ BOOST_CHECK(what.find("RunCommandParseJSON error:") != std::string::npos);
+ return checkMessage(ex);
+}
+
+BOOST_AUTO_TEST_CASE(run_command)
+{
+ {
+ const UniValue result = RunCommandParseJSON("");
+ BOOST_CHECK(result.isNull());
+ }
+ {
+#ifdef WIN32
+ // Windows requires single quotes to prevent escaping double quotes from the JSON...
+ const UniValue result = RunCommandParseJSON("echo '{\"success\": true}'");
+#else
+ // ... but Linux and macOS echo a single quote if it's used
+ const UniValue result = RunCommandParseJSON("echo \"{\"success\": true}\"");
+#endif
+ BOOST_CHECK(result.isObject());
+ const UniValue& success = find_value(result, "success");
+ BOOST_CHECK(!success.isNull());
+ BOOST_CHECK_EQUAL(success.getBool(), true);
+ }
+ {
+ // An invalid command is handled by Boost
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("invalid_command"), boost::process::process_error, checkMessage); // Command failed
+ }
+ {
+ // Return non-zero exit code, no output to stderr
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("false"), std::runtime_error, checkMessageFalse);
+ }
+ {
+ // Return non-zero exit code, with error message for stderr
+ BOOST_CHECK_EXCEPTION(RunCommandParseJSON("ls nosuchfile"), std::runtime_error, checkMessageStdErr);
+ }
+ {
+ BOOST_REQUIRE_THROW(RunCommandParseJSON("echo \"{\""), std::runtime_error); // Unable to parse JSON
+ }
+ // Test std::in, except for Windows
+#ifndef WIN32
+ {
+ const UniValue result = RunCommandParseJSON("cat", "{\"success\": true}");
+ BOOST_CHECK(result.isObject());
+ const UniValue& success = find_value(result, "success");
+ BOOST_CHECK(!success.isNull());
+ BOOST_CHECK_EQUAL(success.getBool(), true);
+ }
+#endif
+}
+#endif // HAVE_BOOST_PROCESS
+
+BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 14f65dcb7c..b2ae1cb845 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -11,6 +11,7 @@
#include <consensus/validation.h>
#include <crypto/sha256.h>
#include <init.h>
+#include <interfaces/chain.h>
#include <miner.h>
#include <net.h>
#include <net_processing.h>
@@ -32,6 +33,7 @@
#include <util/vector.h>
#include <validation.h>
#include <validationinterface.h>
+#include <walletinitinterface.h>
#include <functional>
@@ -104,6 +106,8 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve
SetupNetworking();
InitSignatureCache();
InitScriptExecutionCache();
+ m_node.chain = interfaces::MakeChain(m_node);
+ g_wallet_init_interface.Construct(m_node);
fCheckBlockIndex = true;
static bool noui_connected = false;
if (!noui_connected) {
diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp
index 84118b36ef..5d56d1ff89 100644
--- a/src/torcontrol.cpp
+++ b/src/torcontrol.cpp
@@ -405,7 +405,7 @@ static bool WriteBinaryFile(const fs::path &filename, const std::string &data)
/****** Bitcoin specific TorController implementation ********/
/** Controller that connects to Tor control socket, authenticate, then create
- * and maintain an ephemeral hidden service.
+ * and maintain an ephemeral onion service.
*/
class TorController
{
@@ -534,7 +534,7 @@ void TorController::auth_cb(TorControlConnection& _conn, const TorControlReply&
// Finally - now create the service
if (private_key.empty()) // No private key, generate one
private_key = "NEW:RSA1024"; // Explicitly request RSA1024 - see issue #9214
- // Request hidden service, redirect port.
+ // Request onion service, redirect port.
// Note that the 'virtual' port is always the default port to avoid decloaking nodes using other ports.
_conn.Command(strprintf("ADD_ONION %s Port=%i,127.0.0.1:%i", private_key, Params().GetDefaultPort(), GetListenPort()),
std::bind(&TorController::add_onion_cb, this, std::placeholders::_1, std::placeholders::_2));
diff --git a/src/util/system.cpp b/src/util/system.cpp
index 8164e884b1..7b74789b32 100644
--- a/src/util/system.cpp
+++ b/src/util/system.cpp
@@ -6,6 +6,10 @@
#include <sync.h>
#include <util/system.h>
+#ifdef HAVE_BOOST_PROCESS
+#include <boost/process.hpp>
+#endif // HAVE_BOOST_PROCESS
+
#include <chainparamsbase.h>
#include <util/strencodings.h>
#include <util/string.h>
@@ -1021,7 +1025,7 @@ bool FileCommit(FILE *file)
return false;
}
#else
- #if defined(__linux__) || defined(__NetBSD__)
+ #if defined(HAVE_FDATASYNC)
if (fdatasync(fileno(file)) != 0 && errno != EINVAL) { // Ignore EINVAL for filesystems that don't support sync
LogPrintf("%s: fdatasync failed: %d\n", __func__, errno);
return false;
@@ -1161,6 +1165,43 @@ void runCommand(const std::string& strCommand)
}
#endif
+#ifdef HAVE_BOOST_PROCESS
+UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in)
+{
+ namespace bp = boost::process;
+
+ UniValue result_json;
+ bp::opstream stdin_stream;
+ bp::ipstream stdout_stream;
+ bp::ipstream stderr_stream;
+
+ if (str_command.empty()) return UniValue::VNULL;
+
+ bp::child c(
+ str_command,
+ bp::std_out > stdout_stream,
+ bp::std_err > stderr_stream,
+ bp::std_in < stdin_stream
+ );
+ if (!str_std_in.empty()) {
+ stdin_stream << str_std_in << std::endl;
+ }
+ stdin_stream.pipe().close();
+
+ std::string result;
+ std::string error;
+ std::getline(stdout_stream, result);
+ std::getline(stderr_stream, error);
+
+ c.wait();
+ const int n_error = c.exit_code();
+ if (n_error) throw std::runtime_error(strprintf("RunCommandParseJSON error: process(%s) returned %d: %s\n", str_command, n_error, error));
+ if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result);
+
+ return result_json;
+}
+#endif // HAVE_BOOST_PROCESS
+
void SetupEnvironment()
{
#ifdef HAVE_MALLOPT_ARENA_MAX
diff --git a/src/util/system.h b/src/util/system.h
index 0bd14cc9ea..1df194ca84 100644
--- a/src/util/system.h
+++ b/src/util/system.h
@@ -37,6 +37,8 @@
#include <boost/thread/condition_variable.hpp> // for boost::thread_interrupted
+class UniValue;
+
// Application startup time (used for uptime calculation)
int64_t GetStartupTime();
@@ -96,6 +98,16 @@ std::string ShellEscape(const std::string& arg);
#if HAVE_SYSTEM
void runCommand(const std::string& strCommand);
#endif
+#ifdef HAVE_BOOST_PROCESS
+/**
+ * Execute a command which returns JSON, and parse the result.
+ *
+ * @param str_command The command to execute, including any arguments
+ * @param str_std_in string to pass to stdin
+ * @return parsed JSON
+ */
+UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in="");
+#endif // HAVE_BOOST_PROCESS
/**
* Most paths passed as configuration arguments are treated as relative to
diff --git a/src/validation.cpp b/src/validation.cpp
index 82032611ce..cf2f9dde62 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -688,8 +688,9 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws)
}
// Check for non-standard pay-to-script-hash in inputs
- if (fRequireStandard && !AreInputsStandard(tx, m_view))
- return state.Invalid(TxValidationResult::TX_NOT_STANDARD, "bad-txns-nonstandard-inputs");
+ if (fRequireStandard && !AreInputsStandard(tx, m_view)) {
+ return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs");
+ }
// Check for non-standard witness in P2WSH
if (tx.HasWitness() && fRequireStandard && !IsWitnessStandard(tx, m_view))
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 558e64df12..58eaf54175 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -2338,7 +2338,7 @@ static UniValue getwalletinfo(const JSONRPCRequest& request)
{RPCResult::Type::NUM_TIME, "keypoololdest", "the " + UNIX_EPOCH_TIME + " of the oldest pre-generated key in the key pool. Legacy wallets only."},
{RPCResult::Type::NUM, "keypoolsize", "how many new keys are pre-generated (only counts external keys)"},
{RPCResult::Type::NUM, "keypoolsize_hd_internal", "how many new keys are pre-generated for internal use (used for change outputs, only appears if the wallet is using this feature, otherwise external keys are used)"},
- {RPCResult::Type::NUM_TIME, "unlocked_until", "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked"},
+ {RPCResult::Type::NUM_TIME, "unlocked_until", /* optional */ true, "the " + UNIX_EPOCH_TIME + " until which the wallet is unlocked for transfers, or 0 if the wallet is locked (only present for passphrase-encrypted wallets)"},
{RPCResult::Type::STR_AMOUNT, "paytxfee", "the transaction fee configuration, set in " + CURRENCY_UNIT + "/kB"},
{RPCResult::Type::STR_HEX, "hdseedid", /* optional */ true, "the Hash160 of the HD seed (only present when HD is enabled)"},
{RPCResult::Type::BOOL, "private_keys_enabled", "false if privatekeys are disabled for this wallet (enforced watch-only wallet)"},
diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py
index 741da3be31..c8e2616b79 100755
--- a/test/functional/p2p_filter.py
+++ b/test/functional/p2p_filter.py
@@ -218,7 +218,11 @@ class FilterTest(BitcoinTestFramework):
# Add peer but do not send version yet
filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False)
# Send version with fRelay=False
- filter_peer_without_nrelay.wait_until(lambda: filter_peer_without_nrelay.is_connected, timeout=10)
+ filter_peer_without_nrelay.wait_until(
+ lambda: filter_peer_without_nrelay.is_connected,
+ timeout=10,
+ check_connected=False,
+ )
version_without_fRelay = msg_version()
version_without_fRelay.nRelay = 0
filter_peer_without_nrelay.send_message(version_without_fRelay)
diff --git a/test/functional/p2p_ping.py b/test/functional/p2p_ping.py
index e00af88cc4..5f5fd3e104 100755
--- a/test/functional/p2p_ping.py
+++ b/test/functional/p2p_ping.py
@@ -7,13 +7,8 @@
import time
-from test_framework.messages import (
- msg_pong,
-)
-from test_framework.mininode import (
- P2PInterface,
- wait_until,
-)
+from test_framework.messages import msg_pong
+from test_framework.mininode import P2PInterface
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
@@ -78,7 +73,7 @@ class PingPongTest(BitcoinTestFramework):
with self.nodes[0].assert_debug_log(['pong peer=0: Nonce mismatch']):
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
self.mock_forward(9)
# Send the wrong pong
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce - 1))
@@ -93,27 +88,27 @@ class PingPongTest(BitcoinTestFramework):
assert 'ping' not in no_pong_node.last_message
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
ping_delay = 29
self.mock_forward(ping_delay)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))
self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)
self.log.info('Check that minping is decreased after a fast roundtrip')
# mock time PING_INTERVAL ahead to trigger node into sending a ping
self.mock_forward(PING_INTERVAL + 1)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
ping_delay = 9
self.mock_forward(ping_delay)
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
no_pong_node.send_and_ping(msg_pong(no_pong_node.last_message.pop('ping').nonce))
self.check_peer_info(pingtime=ping_delay, minping=ping_delay, pingwait=None)
self.log.info('Check that peer is disconnected after ping timeout')
assert 'ping' not in no_pong_node.last_message
self.nodes[0].ping()
- wait_until(lambda: 'ping' in no_pong_node.last_message)
+ no_pong_node.wait_until(lambda: 'ping' in no_pong_node.last_message)
with self.nodes[0].assert_debug_log(['ping timeout: 1201.000000s']):
self.mock_forward(20 * 60 + 1)
time.sleep(4) # peertimeout + 1
diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py
index 728212ca23..2233fa5998 100755
--- a/test/functional/p2p_segwit.py
+++ b/test/functional/p2p_segwit.py
@@ -1418,7 +1418,7 @@ class SegWitTest(BitcoinTestFramework):
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
- # Spend everything in temp_utxo back to an OP_TRUE output.
+ # Spend everything in temp_utxo into an segwit v1 output.
tx3 = CTransaction()
total_value = 0
for i in temp_utxo:
@@ -1426,8 +1426,16 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit.append(CTxInWitness())
total_value += i.nValue
tx3.wit.vtxinwit[-1].scriptWitness.stack = [witness_program]
- tx3.vout.append(CTxOut(total_value - 1000, CScript([OP_TRUE])))
+ tx3.vout.append(CTxOut(total_value - 1000, script_pubkey))
tx3.rehash()
+
+ # First we test this transaction against fRequireStandard=true node
+ # making sure the txid is added to the reject filter
+ self.std_node.announce_tx_and_wait_for_getdata(tx3)
+ test_transaction_acceptance(self.nodes[1], self.std_node, tx3, with_witness=True, accepted=False, reason="bad-txns-nonstandard-inputs")
+ # Now the node will no longer ask for getdata of this transaction when advertised by same txid
+ self.std_node.announce_tx_and_wait_for_getdata(tx3, timeout=5, success=False)
+
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
test_transaction_acceptance(self.nodes[0], self.test_node, tx3, with_witness=True, accepted=False, reason="reserved for soft-fork upgrades")
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index 07811667a8..53d6e474d9 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -388,18 +388,22 @@ class P2PInterface(P2PConnection):
# Connection helper methods
- def wait_until(self, test_function, timeout=60):
+ def wait_until(self, test_function_in, *, timeout=60, check_connected=True):
+ def test_function():
+ if check_connected:
+ assert self.is_connected
+ return test_function_in()
+
wait_until(test_function, timeout=timeout, lock=mininode_lock, timeout_factor=self.timeout_factor)
def wait_for_disconnect(self, timeout=60):
test_function = lambda: not self.is_connected
- self.wait_until(test_function, timeout=timeout)
+ self.wait_until(test_function, timeout=timeout, check_connected=False)
# Message receiving helper methods
def wait_for_tx(self, txid, timeout=60):
def test_function():
- assert self.is_connected
if not self.last_message.get('tx'):
return False
return self.last_message['tx'].tx.rehash() == txid
@@ -408,14 +412,12 @@ class P2PInterface(P2PConnection):
def wait_for_block(self, blockhash, timeout=60):
def test_function():
- assert self.is_connected
return self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
self.wait_until(test_function, timeout=timeout)
def wait_for_header(self, blockhash, timeout=60):
def test_function():
- assert self.is_connected
last_headers = self.last_message.get('headers')
if not last_headers:
return False
@@ -425,7 +427,6 @@ class P2PInterface(P2PConnection):
def wait_for_merkleblock(self, blockhash, timeout=60):
def test_function():
- assert self.is_connected
last_filtered_block = self.last_message.get('merkleblock')
if not last_filtered_block:
return False
@@ -437,9 +438,7 @@ class P2PInterface(P2PConnection):
"""Waits for a getdata message.
The object hashes in the inventory vector must match the provided hash_list."""
-
def test_function():
- assert self.is_connected
last_data = self.last_message.get("getdata")
if not last_data:
return False
@@ -454,9 +453,7 @@ class P2PInterface(P2PConnection):
value must be explicitly cleared before calling this method, or this will return
immediately with success. TODO: change this method to take a hash value and only
return true if the correct block header has been requested."""
-
def test_function():
- assert self.is_connected
return self.last_message.get("getheaders")
self.wait_until(test_function, timeout=timeout)
@@ -467,7 +464,6 @@ class P2PInterface(P2PConnection):
raise NotImplementedError("wait_for_inv() will only verify the first inv object")
def test_function():
- assert self.is_connected
return self.last_message.get("inv") and \
self.last_message["inv"].inv[0].type == expected_inv[0].type and \
self.last_message["inv"].inv[0].hash == expected_inv[0].hash
@@ -478,7 +474,7 @@ class P2PInterface(P2PConnection):
def test_function():
return "verack" in self.last_message
- self.wait_until(test_function, timeout=timeout)
+ self.wait_until(test_function, timeout=timeout, check_connected=False)
# Message sending helper functions
@@ -491,7 +487,6 @@ class P2PInterface(P2PConnection):
self.send_message(msg_ping(nonce=self.ping_counter))
def test_function():
- assert self.is_connected
return self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
self.wait_until(test_function, timeout=timeout)
@@ -609,7 +604,11 @@ class P2PDataStore(P2PInterface):
self.send_message(msg_block(block=b))
else:
self.send_message(msg_headers([CBlockHeader(block) for block in blocks]))
- self.wait_until(lambda: blocks[-1].sha256 in self.getdata_requests, timeout=timeout)
+ self.wait_until(
+ lambda: blocks[-1].sha256 in self.getdata_requests,
+ timeout=timeout,
+ check_connected=success,
+ )
if expect_disconnect:
self.wait_for_disconnect(timeout=timeout)
@@ -677,6 +676,6 @@ class P2PTxInvStore(P2PInterface):
The mempool should mark unbroadcast=False for these transactions.
"""
# Wait until invs have been received (and getdatas sent) for each txid.
- self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout)
+ self.wait_until(lambda: set(self.tx_invs_received.keys()) == set([int(tx, 16) for tx in txns]), timeout=timeout)
# Flush messages and wait for the getdatas to be processed
self.sync_with_ping()
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 66bb2c89b5..a5abe77870 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -637,7 +637,7 @@ class TestNodeCLI():
raise subprocess.CalledProcessError(returncode, self.binary, output=cli_stderr)
try:
return json.loads(cli_stdout, parse_float=decimal.Decimal)
- except json.JSONDecodeError:
+ except (json.JSONDecodeError, decimal.InvalidOperation):
return cli_stdout.rstrip("\n")
class RPCOverloadWrapper():
diff --git a/test/lint/lint-git-commit-check.sh b/test/lint/lint-git-commit-check.sh
index 7cffd267dd..8947f67bf6 100755
--- a/test/lint/lint-git-commit-check.sh
+++ b/test/lint/lint-git-commit-check.sh
@@ -14,21 +14,22 @@ while getopts "?" opt; do
case $opt in
?)
echo "Usage: $0 [N]"
- echo " TRAVIS_COMMIT_RANGE='<commit range>' $0"
+ echo " COMMIT_RANGE='<commit range>' $0"
echo " $0 -?"
echo "Checks unmerged commits, the previous N commits, or a commit range."
- echo "TRAVIS_COMMIT_RANGE='47ba2c3...ee50c9e' $0"
+ echo "COMMIT_RANGE='47ba2c3...ee50c9e' $0"
exit ${EXIT_CODE}
;;
esac
done
-if [ -z "${TRAVIS_COMMIT_RANGE}" ]; then
- if [ -n "$1" ]; then
- TRAVIS_COMMIT_RANGE="HEAD~$1...HEAD"
- else
- TRAVIS_COMMIT_RANGE="origin/master..HEAD"
- fi
+if [ -z "${COMMIT_RANGE}" ]; then
+ if [ -n "$1" ]; then
+ COMMIT_RANGE="HEAD~$1...HEAD"
+ else
+ MERGE_BASE=$(git merge-base HEAD master)
+ COMMIT_RANGE="$MERGE_BASE..HEAD"
+ fi
fi
while IFS= read -r commit_hash || [[ -n "$commit_hash" ]]; do
@@ -41,6 +42,6 @@ while IFS= read -r commit_hash || [[ -n "$commit_hash" ]]; do
EXIT_CODE=1
fi
done < <(git log --format=%B -n 1 "$commit_hash")
-done < <(git log "${TRAVIS_COMMIT_RANGE}" --format=%H)
+done < <(git log "${COMMIT_RANGE}" --format=%H)
exit ${EXIT_CODE}
diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh
index 611bd4a8c4..fde77aea2d 100755
--- a/test/lint/lint-includes.sh
+++ b/test/lint/lint-includes.sh
@@ -63,6 +63,7 @@ EXPECTED_BOOST_INCLUDES=(
boost/optional.hpp
boost/preprocessor/cat.hpp
boost/preprocessor/stringize.hpp
+ boost/process.hpp
boost/signals2/connection.hpp
boost/signals2/optional_last_value.hpp
boost/signals2/signal.hpp