diff options
243 files changed, 4975 insertions, 2682 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 801164c368..691582239e 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -82,10 +82,10 @@ task: FILE_ENV: "./ci/test/00_setup_env_arm.sh" task: - name: 'Win64 [unit tests, no gui tests, no boost::process, no functional tests] [bionic]' + name: 'Win64 [unit tests, no gui tests, no boost::process, no functional tests] [focal]' << : *GLOBAL_TASK_TEMPLATE container: - image: ubuntu:bionic + image: ubuntu:focal env: FILE_ENV: "./ci/test/00_setup_env_win64.sh" @@ -160,10 +160,10 @@ task: FILE_ENV: "./ci/test/00_setup_env_native_nowallet.sh" task: - name: 'macOS 10.14 [gui, no tests] [bionic]' + name: 'macOS 10.14 [gui, no tests] [focal]' << : *GLOBAL_TASK_TEMPLATE container: - image: ubuntu:bionic + image: ubuntu:focal env: FILE_ENV: "./ci/test/00_setup_env_mac.sh" diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..4967e675f6 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,26 @@ +# This is the top-most EditorConfig file. +root = true + +# For all files. +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +# Source code files +[*.{h,cpp,py,sh}] +indent_size = 4 + +# .cirrus.yml, .appveyor.yml, .fuzzbuzz.yml, etc. +[*.yml] +indent_size = 2 + +# Makefiles +[{*.am,Makefile.*.include}] +indent_style = tab + +# Autoconf scripts +[configure.ac] +indent_size = 2 diff --git a/.fuzzbuzz.yml b/.fuzzbuzz.yml index be9a1cd4e1..e40b4df165 100644 --- a/.fuzzbuzz.yml +++ b/.fuzzbuzz.yml @@ -5,7 +5,7 @@ environment: - CXXFLAGS=-fcoverage-mapping -fno-omit-frame-pointer -fprofile-instr-generate -gline-tables-only -O1 setup: - sudo apt-get update - - sudo apt-get install -y autoconf bsdmainutils clang git libboost-all-dev libc++1 libc++abi1 libc++abi-dev libc++-dev libclang1 libclang-dev libdb5.3++ libevent-dev libllvm-ocaml-dev libomp5 libomp-dev libqt5core5a libqt5dbus5 libqt5gui5 libtool llvm llvm-dev llvm-runtime pkg-config qttools5-dev qttools5-dev-tools software-properties-common + - sudo apt-get install -y autoconf bsdmainutils clang git libboost-system-dev libboost-filesystem-dev libboost-test-dev libc++1 libc++abi1 libc++abi-dev libc++-dev libclang1 libclang-dev libdb5.3++ libevent-dev libllvm-ocaml-dev libomp5 libomp-dev libqt5core5a libqt5dbus5 libqt5gui5 libtool llvm llvm-dev llvm-runtime pkg-config qttools5-dev qttools5-dev-tools software-properties-common - ./autogen.sh - CC=clang CXX=clang++ ./configure --enable-fuzz --with-sanitizers=address,fuzzer,undefined --enable-danger-fuzz-link-all - make diff --git a/Makefile.am b/Makefile.am index f6b824faaa..66be768277 100644 --- a/Makefile.am +++ b/Makefile.am @@ -4,7 +4,7 @@ # Pattern rule to print variables, e.g. make print-top_srcdir print-%: - @echo $* = $($*) + @echo '$*' = '$($*)' ACLOCAL_AMFLAGS = -I build-aux/m4 SUBDIRS = src @@ -3,6 +3,11 @@ Bitcoin Core integration/staging tree https://bitcoincore.org +For an immediately usable, binary version of the Bitcoin Core software, see +https://bitcoincore.org/en/download/. + +Further information about Bitcoin Core is available in the [doc folder](/doc). + What is Bitcoin? ---------------- @@ -12,9 +17,7 @@ with no central authority: managing transactions and issuing money are carried out collectively by the network. Bitcoin Core is the name of open source software which enables the use of this currency. -For more information, as well as an immediately usable, binary version of -the Bitcoin Core software, see https://bitcoincore.org/en/download/, or read the -[original whitepaper](https://bitcoincore.org/bitcoin.pdf). +For more information read the original Bitcoin whitepaper. License ------- @@ -53,7 +56,7 @@ submit new unit tests for old code. Unit tests can be compiled and run and extending unit tests can be found in [/src/test/README.md](/src/test/README.md). There are also [regression and integration tests](/test), written -in Python, that are run automatically on the build server. +in Python. These tests can be run (if the [test dependencies](/test) are installed) with: `test/functional/test_runner.py` The CI (Continuous Integration) systems make sure that every pull request is built for Windows, Linux, and macOS, @@ -77,5 +80,3 @@ Translations are periodically pulled from Transifex and merged into the git repo **Important**: We do not accept translation changes as GitHub pull requests because the next pull from Transifex would automatically overwrite them again. - -Translators should also subscribe to the [mailing list](https://groups.google.com/forum/#!forum/bitcoin-translators). diff --git a/build-aux/m4/ax_boost_process.m4 b/build-aux/m4/ax_boost_process.m4 deleted file mode 100644 index 5d20e67464..0000000000 --- a/build-aux/m4/ax_boost_process.m4 +++ /dev/null @@ -1,121 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_boost_process.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_BOOST_PROCESS -# -# DESCRIPTION -# -# Test for Process library from the Boost C++ libraries. The macro -# requires a preceding call to AX_BOOST_BASE. Further documentation is -# available at <http://randspringer.de/boost/index.html>. -# -# This macro calls: -# -# AC_SUBST(BOOST_PROCESS_LIB) -# -# And sets: -# -# HAVE_BOOST_PROCESS -# -# LICENSE -# -# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de> -# Copyright (c) 2008 Michael Tindal -# Copyright (c) 2008 Daniel Casimiro <dan.casimiro@gmail.com> -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 2 - -AC_DEFUN([AX_BOOST_PROCESS], -[ - AC_ARG_WITH([boost-process], - AS_HELP_STRING([--with-boost-process@<:@=special-lib@:>@], - [use the Process library from boost - it is possible to specify a certain library for the linker - e.g. --with-boost-process=boost_process-gcc-mt ]), - [ - if test "$withval" = "no"; then - want_boost_process="no" - elif test "$withval" = "yes"; then - want_boost_process="yes" - ax_boost_user_process_lib="" - else - want_boost_process="yes" - ax_boost_user_process_lib="$withval" - fi - ], - [want_boost_process="yes"] - ) - - if test "x$want_boost_process" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - AC_REQUIRE([AC_CANONICAL_BUILD]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - AC_CACHE_CHECK(whether the Boost::Process library is available, - ax_cv_boost_process, - [AC_LANG_PUSH([C++]) - CXXFLAGS_SAVE=$CXXFLAGS - CXXFLAGS= - - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/process.hpp>]], - [[boost::process::child* child = new boost::process::child; delete child;]])], - ax_cv_boost_process=yes, ax_cv_boost_process=no) - CXXFLAGS=$CXXFLAGS_SAVE - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_process" = "xyes"; then - AC_SUBST(BOOST_CPPFLAGS) - - AC_DEFINE(HAVE_BOOST_PROCESS,,[define if the Boost::Process library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - - LDFLAGS_SAVE=$LDFLAGS - if test "x$ax_boost_user_process_lib" = "x"; then - for libextension in `ls -r $BOOSTLIBDIR/libboost_process* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], - [link_process="no"]) - done - if test "x$link_process" != "xyes"; then - for libextension in `ls -r $BOOSTLIBDIR/boost_process* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], - [link_process="no"]) - done - fi - - else - for ax_lib in $ax_boost_user_process_lib boost_process-$ax_boost_user_process_lib; do - AC_CHECK_LIB($ax_lib, exit, - [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], - [link_process="no"]) - done - - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the Boost::Process library!) - fi - if test "x$link_process" = "xno"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_boost_thread.m4 b/build-aux/m4/ax_boost_thread.m4 deleted file mode 100644 index 75e80e6e75..0000000000 --- a/build-aux/m4/ax_boost_thread.m4 +++ /dev/null @@ -1,187 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_boost_thread.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_BOOST_THREAD -# -# DESCRIPTION -# -# Test for Thread library from the Boost C++ libraries. The macro requires -# a preceding call to AX_BOOST_BASE. Further documentation is available at -# <http://randspringer.de/boost/index.html>. -# -# This macro calls: -# -# AC_SUBST(BOOST_THREAD_LIB) -# -# And sets: -# -# HAVE_BOOST_THREAD -# -# LICENSE -# -# Copyright (c) 2009 Thomas Porschberg <thomas@randspringer.de> -# Copyright (c) 2009 Michael Tindal -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 33 - -AC_DEFUN([AX_BOOST_THREAD], -[ - AC_ARG_WITH([boost-thread], - AS_HELP_STRING([--with-boost-thread@<:@=special-lib@:>@], - [use the Thread library from boost - - it is possible to specify a certain library for the linker - e.g. --with-boost-thread=boost_thread-gcc-mt ]), - [ - if test "$withval" = "yes"; then - want_boost="yes" - ax_boost_user_thread_lib="" - else - want_boost="yes" - ax_boost_user_thread_lib="$withval" - fi - ], - [want_boost="yes"] - ) - - if test "x$want_boost" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - AC_REQUIRE([AC_CANONICAL_BUILD]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - AC_CACHE_CHECK(whether the Boost::Thread library is available, - ax_cv_boost_thread, - [AC_LANG_PUSH([C++]) - CXXFLAGS_SAVE=$CXXFLAGS - - case "x$host_os" in - xsolaris ) - CXXFLAGS="-pthreads $CXXFLAGS" - break; - ;; - xmingw32 ) - CXXFLAGS="-mthreads $CXXFLAGS" - break; - ;; - *android* ) - break; - ;; - * ) - CXXFLAGS="-pthread $CXXFLAGS" - break; - ;; - esac - - AC_COMPILE_IFELSE([ - AC_LANG_PROGRAM( - [[@%:@include <boost/thread/thread.hpp>]], - [[boost::thread_group thrds; - return 0;]])], - ax_cv_boost_thread=yes, ax_cv_boost_thread=no) - CXXFLAGS=$CXXFLAGS_SAVE - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_thread" = "xyes"; then - case "x$host_os" in - xsolaris ) - BOOST_CPPFLAGS="-pthreads $BOOST_CPPFLAGS" - break; - ;; - xmingw32 ) - BOOST_CPPFLAGS="-mthreads $BOOST_CPPFLAGS" - break; - ;; - *android* ) - break; - ;; - * ) - BOOST_CPPFLAGS="-pthread $BOOST_CPPFLAGS" - break; - ;; - esac - - AC_SUBST(BOOST_CPPFLAGS) - - AC_DEFINE(HAVE_BOOST_THREAD,, - [define if the Boost::Thread library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - - LDFLAGS_SAVE=$LDFLAGS - case "x$host_os" in - *bsd* ) - LDFLAGS="-pthread $LDFLAGS" - break; - ;; - esac - if test "x$ax_boost_user_thread_lib" = "x"; then - for libextension in `ls -r $BOOSTLIBDIR/libboost_thread* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'`; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [link_thread="yes"; break], - [link_thread="no"]) - done - if test "x$link_thread" != "xyes"; then - for libextension in `ls -r $BOOSTLIBDIR/boost_thread* 2>/dev/null | sed 's,.*/,,' | sed 's,\..*,,'`; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [link_thread="yes"; break], - [link_thread="no"]) - done - fi - - else - for ax_lib in $ax_boost_user_thread_lib boost_thread-$ax_boost_user_thread_lib; do - AC_CHECK_LIB($ax_lib, exit, - [link_thread="yes"; break], - [link_thread="no"]) - done - - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the Boost::Thread library!) - fi - if test "x$link_thread" = "xno"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - else - BOOST_THREAD_LIB="-l$ax_lib" - case "x$host_os" in - *bsd* ) - BOOST_LDFLAGS="-pthread $BOOST_LDFLAGS" - break; - ;; - xsolaris ) - BOOST_THREAD_LIB="$BOOST_THREAD_LIB -lpthread" - break; - ;; - xmingw32 ) - break; - ;; - *android* ) - break; - ;; - * ) - BOOST_THREAD_LIB="$BOOST_THREAD_LIB -lpthread" - break; - ;; - esac - AC_SUBST(BOOST_THREAD_LIB) - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_gcc_func_attribute.m4 b/build-aux/m4/ax_gcc_func_attribute.m4 deleted file mode 100644 index c788ca9bd4..0000000000 --- a/build-aux/m4/ax_gcc_func_attribute.m4 +++ /dev/null @@ -1,223 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_gcc_func_attribute.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_GCC_FUNC_ATTRIBUTE(ATTRIBUTE) -# -# DESCRIPTION -# -# This macro checks if the compiler supports one of GCC's function -# attributes; many other compilers also provide function attributes with -# the same syntax. Compiler warnings are used to detect supported -# attributes as unsupported ones are ignored by default so quieting -# warnings when using this macro will yield false positives. -# -# The ATTRIBUTE parameter holds the name of the attribute to be checked. -# -# If ATTRIBUTE is supported define HAVE_FUNC_ATTRIBUTE_<ATTRIBUTE>. -# -# The macro caches its result in the ax_cv_have_func_attribute_<attribute> -# variable. -# -# The macro currently supports the following function attributes: -# -# alias -# aligned -# alloc_size -# always_inline -# artificial -# cold -# const -# constructor -# constructor_priority for constructor attribute with priority -# deprecated -# destructor -# dllexport -# dllimport -# error -# externally_visible -# flatten -# format -# format_arg -# gnu_inline -# hot -# ifunc -# leaf -# malloc -# noclone -# noinline -# nonnull -# noreturn -# nothrow -# optimize -# pure -# unused -# used -# visibility -# warning -# warn_unused_result -# weak -# weakref -# -# Unsuppored function attributes will be tested with a prototype returning -# an int and not accepting any arguments and the result of the check might -# be wrong or meaningless so use with care. -# -# LICENSE -# -# Copyright (c) 2013 Gabriele Svelto <gabriele.svelto@gmail.com> -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 3 - -AC_DEFUN([AX_GCC_FUNC_ATTRIBUTE], [ - AS_VAR_PUSHDEF([ac_var], [ax_cv_have_func_attribute_$1]) - - AC_CACHE_CHECK([for __attribute__(($1))], [ac_var], [ - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - m4_case([$1], - [alias], [ - int foo( void ) { return 0; } - int bar( void ) __attribute__(($1("foo"))); - ], - [aligned], [ - int foo( void ) __attribute__(($1(32))); - ], - [alloc_size], [ - void *foo(int a) __attribute__(($1(1))); - ], - [always_inline], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [artificial], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [cold], [ - int foo( void ) __attribute__(($1)); - ], - [const], [ - int foo( void ) __attribute__(($1)); - ], - [constructor_priority], [ - int foo( void ) __attribute__((__constructor__(65535/2))); - ], - [constructor], [ - int foo( void ) __attribute__(($1)); - ], - [deprecated], [ - int foo( void ) __attribute__(($1(""))); - ], - [destructor], [ - int foo( void ) __attribute__(($1)); - ], - [dllexport], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [dllimport], [ - int foo( void ) __attribute__(($1)); - ], - [error], [ - int foo( void ) __attribute__(($1(""))); - ], - [externally_visible], [ - int foo( void ) __attribute__(($1)); - ], - [flatten], [ - int foo( void ) __attribute__(($1)); - ], - [format], [ - int foo(const char *p, ...) __attribute__(($1(printf, 1, 2))); - ], - [format_arg], [ - char *foo(const char *p) __attribute__(($1(1))); - ], - [gnu_inline], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [hot], [ - int foo( void ) __attribute__(($1)); - ], - [ifunc], [ - int my_foo( void ) { return 0; } - static int (*resolve_foo(void))(void) { return my_foo; } - int foo( void ) __attribute__(($1("resolve_foo"))); - ], - [leaf], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [malloc], [ - void *foo( void ) __attribute__(($1)); - ], - [noclone], [ - int foo( void ) __attribute__(($1)); - ], - [noinline], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [nonnull], [ - int foo(char *p) __attribute__(($1(1))); - ], - [noreturn], [ - void foo( void ) __attribute__(($1)); - ], - [nothrow], [ - int foo( void ) __attribute__(($1)); - ], - [optimize], [ - __attribute__(($1(3))) int foo( void ) { return 0; } - ], - [pure], [ - int foo( void ) __attribute__(($1)); - ], - [unused], [ - int foo( void ) __attribute__(($1)); - ], - [used], [ - int foo( void ) __attribute__(($1)); - ], - [visibility], [ - int foo_def( void ) __attribute__(($1("default"))); - int foo_hid( void ) __attribute__(($1("hidden"))); - int foo_int( void ) __attribute__(($1("internal"))); - int foo_pro( void ) __attribute__(($1("protected"))); - ], - [warning], [ - int foo( void ) __attribute__(($1(""))); - ], - [warn_unused_result], [ - int foo( void ) __attribute__(($1)); - ], - [weak], [ - int foo( void ) __attribute__(($1)); - ], - [weakref], [ - static int foo( void ) { return 0; } - static int bar( void ) __attribute__(($1("foo"))); - ], - [ - m4_warn([syntax], [Unsupported attribute $1, the test may fail]) - int foo( void ) __attribute__(($1)); - ] - )], []) - ], - dnl GCC doesn't exit with an error if an unknown attribute is - dnl provided but only outputs a warning, so accept the attribute - dnl only if no warning were issued. - [AS_IF([test -s conftest.err], - [AS_VAR_SET([ac_var], [no])], - [AS_VAR_SET([ac_var], [yes])])], - [AS_VAR_SET([ac_var], [no])]) - ]) - - AS_IF([test yes = AS_VAR_GET([ac_var])], - [AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_FUNC_ATTRIBUTE_$1), 1, - [Define to 1 if the system has the `$1' function attribute])], []) - - AS_VAR_POPDEF([ac_var]) -]) diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h index 53aead38b5..23c554e396 100644 --- a/build_msvc/bitcoin_config.h +++ b/build_msvc/bitcoin_config.h @@ -56,9 +56,6 @@ /* define if the Boost::System library is available */ #define HAVE_BOOST_SYSTEM /**/ -/* define if the Boost::Thread library is available */ -#define HAVE_BOOST_THREAD /**/ - /* define if the Boost::Unit_Test_Framework library is available */ #define HAVE_BOOST_UNIT_TEST_FRAMEWORK /**/ diff --git a/build_msvc/vcpkg.json b/build_msvc/vcpkg.json index dfd3929c44..42b9a5d16f 100644 --- a/build_msvc/vcpkg.json +++ b/build_msvc/vcpkg.json @@ -8,7 +8,6 @@ "boost-process", "boost-signals2", "boost-test", - "boost-thread", "sqlite3", "double-conversion", { diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh index a0b579de1e..343b82a1ad 100755 --- a/ci/lint/04_install.sh +++ b/ci/lint/04_install.sh @@ -15,6 +15,7 @@ ${CI_RETRY_EXE} pip3 install codespell==2.0.0 ${CI_RETRY_EXE} pip3 install flake8==3.8.3 ${CI_RETRY_EXE} pip3 install yq ${CI_RETRY_EXE} pip3 install mypy==0.781 +${CI_RETRY_EXE} pip3 install vulture==2.3 SHELLCHECK_VERSION=v0.7.1 curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | tar --xz -xf - --directory /tmp/ diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh index b0de2ec0bb..6da011d19b 100644 --- a/ci/test/00_setup_env_mac.sh +++ b/ci/test/00_setup_env_mac.sh @@ -7,9 +7,9 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_macos_cross -export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well) +export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos (Focal is used in the gitian build as well) export HOST=x86_64-apple-darwin18 -export PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python3-dev python3-setuptools xorriso" +export PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools libtinfo5 python3-dev python3-setuptools xorriso" export XCODE_VERSION=11.3.1 export XCODE_BUILD_ID=11C505 export RUN_UNIT_TESTS=false diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index f682486088..e47119e6fa 100644 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_asan -export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libqrencode-dev libsqlite3-dev" +export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libqrencode-dev libsqlite3-dev" export DOCKER_NAME_TAG=ubuntu:20.04 export NO_DEPENDS=1 export GOAL="install" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index b7157c608d..ebb5a1cabe 100644 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export DOCKER_NAME_TAG="ubuntu:20.04" export CONTAINER_NAME=ci_native_fuzz -export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev" +export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh index e06a40eb23..2cf672b91e 100644 --- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh +++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export DOCKER_NAME_TAG="ubuntu:20.04" export CONTAINER_NAME=ci_native_fuzz_valgrind -export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev valgrind" +export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev valgrind" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh index 567145fe47..bbc43cfabd 100644 --- a/ci/test/00_setup_env_native_qt5.sh +++ b/ci/test/00_setup_env_native_qt5.sh @@ -16,4 +16,5 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true" export RUN_UNIT_TESTS="false" export GOAL="install" export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.2 v0.18.1 v0.19.1" -export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process" +export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports +--enable-debug --disable-fuzz-binary CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process" diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh index f0c153158b..e079a7057c 100644 --- a/ci/test/00_setup_env_native_valgrind.sh +++ b/ci/test/00_setup_env_native_valgrind.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_valgrind -export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev" +export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev" export USE_VALGRIND=1 export NO_DEPENDS=1 export TEST_RUNNER_EXTRA="--exclude rpc_bind" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index affaaaa1aa..1e68d2a61a 100644 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -7,11 +7,11 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64 -export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to win64 (bionic is used in the gitian build as well) +export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to win64 (Focal is used in the gitian build as well) export HOST=x86_64-w64-mingw32 -export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 file" +export DPKG_ADD_ARCH="i386" +export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 wine32 file" export RUN_FUNCTIONAL_TESTS=false -export RUN_SECURITY_TESTS="true" export GOAL="deploy" export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --without-boost-process" diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh index 4644f28a4e..f69afd8a26 100755 --- a/ci/test/05_before_script.sh +++ b/ci/test/05_before_script.sh @@ -19,7 +19,7 @@ OSX_SDK_BASENAME="Xcode-${XCODE_VERSION}-${XCODE_BUILD_ID}-extracted-SDK-with-li OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_BASENAME}" if [ -n "$XCODE_VERSION" ] && [ ! -f "$OSX_SDK_PATH" ]; then - curl --location --fail "${SDK_URL}/${OSX_SDK_BASENAME}" -o "$OSX_SDK_PATH" + DOCKER_EXEC curl --location --fail "${SDK_URL}/${OSX_SDK_BASENAME}" -o "$OSX_SDK_PATH" fi if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then diff --git a/ci/test/wrap-wine.sh b/ci/test/wrap-wine.sh index 58a8983e6e..82964897e1 100755 --- a/ci/test/wrap-wine.sh +++ b/ci/test/wrap-wine.sh @@ -13,7 +13,7 @@ for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/univalue/{no_nul, echo "Wrap $b ..." mv "$b" "${b}_orig" echo '#!/usr/bin/env bash' > "$b" - echo "wine64 \"${b}_orig\" \"\$@\"" >> "$b" + echo "( wine \"${b}_orig\" \"\$@\" ) || ( sleep 1 && wine \"${b}_orig\" \"\$@\" )" >> "$b" chmod +x "$b" fi done diff --git a/configure.ac b/configure.ac index 23a33b8c4f..c0e3b32e3b 100644 --- a/configure.ac +++ b/configure.ac @@ -184,10 +184,16 @@ AC_ARG_ENABLE([extended-functional-tests], AC_ARG_ENABLE([fuzz], AS_HELP_STRING([--enable-fuzz], - [enable building of fuzz targets (default no). enabling this will disable all other targets]), + [build for fuzzing (default no). enabling this will disable all other targets and override --{enable,disable}-fuzz-binary]), [enable_fuzz=$enableval], [enable_fuzz=no]) +AC_ARG_ENABLE([fuzz-binary], + AS_HELP_STRING([--enable-fuzz-binary], + [enable building of fuzz binary (default yes).]), + [enable_fuzz_binary=$enableval], + [enable_fuzz_binary=yes]) + AC_ARG_ENABLE([danger_fuzz_link_all], AS_HELP_STRING([--enable-danger-fuzz-link-all], [Danger! Modifies source code. Needs git and gnu sed installed. Link each fuzz target (default no).]), @@ -332,6 +338,11 @@ AC_ARG_ENABLE([werror], [enable_werror=$enableval], [enable_werror=no]) +AC_ARG_WITH([boost-process], + [AS_HELP_STRING([--with-boost-process],[Opt in to using Boost Process (default is no)])], + [boost_process=$withval], + [boost_process=no]) + AC_LANG_PUSH([C++]) dnl Check for a flag to turn compiler warnings into errors. This is helpful for checks which may @@ -427,6 +438,7 @@ if test "x$enable_werror" = "xyes"; then AX_CHECK_COMPILE_FLAG([-Werror=suggest-override],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=suggest-override"],,[[$CXXFLAG_WERROR]], [AC_LANG_SOURCE([[struct A { virtual void f(); }; struct B : A { void f() final; };]])]) AX_CHECK_COMPILE_FLAG([-Werror=unreachable-code-loop-increment],[ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=unreachable-code-loop-increment"],,[[$CXXFLAG_WERROR]]) + AX_CHECK_COMPILE_FLAG([-Werror=mismatched-tags], [ERROR_CXXFLAGS="$ERROR_CXXFLAGS -Werror=mismatched-tags"], [], [$CXXFLAG_WERROR]) fi if test "x$CXXFLAGS_overridden" = "xno"; then @@ -650,7 +662,7 @@ case $host in AC_MSG_ERROR("windres not found") fi - CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -DBOOST_THREAD_USE_LIB -D_WIN32_WINNT=0x0601 -D_WIN32_IE=0x0501 -DWIN32_LEAN_AND_MEAN" + CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -D_WIN32_WINNT=0x0601 -D_WIN32_IE=0x0501 -DWIN32_LEAN_AND_MEAN" dnl libtool insists upon adding -nostdlib and a list of objects/libs to link against. dnl That breaks our ability to build dll's with static libgcc/libstdc++/libssp. Override @@ -806,10 +818,6 @@ if test x$ac_cv_sys_large_files != x && CPPFLAGS="$CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files" fi -AX_GCC_FUNC_ATTRIBUTE([visibility]) -AX_GCC_FUNC_ATTRIBUTE([dllexport]) -AX_GCC_FUNC_ATTRIBUTE([dllimport]) - if test x$use_glibc_compat != xno; then AX_CHECK_LINK_FLAG([[-Wl,--wrap=__divmoddi4]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=__divmoddi4"]) AX_CHECK_LINK_FLAG([[-Wl,--wrap=log2f]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=log2f"]) @@ -853,7 +861,10 @@ if test x$use_hardening != xno; then AX_CHECK_COMPILE_FLAG([-Wstack-protector],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -Wstack-protector"]) AX_CHECK_COMPILE_FLAG([-fstack-protector-all],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-protector-all"]) - AX_CHECK_COMPILE_FLAG([-fcf-protection=full],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fcf-protection=full"]) + dnl -fcf-protection used with Clang 7 causes ld to emit warnings: + dnl ld: error: ... <corrupt x86 feature size: 0x8> + dnl Use CHECK_LINK_FLAG & --fatal-warnings to ensure we wont use the flag in this case. + AX_CHECK_LINK_FLAG([-fcf-protection=full],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fcf-protection=full"],, [[$LDFLAG_WERROR]]) dnl stack-clash-protection does not work properly when building for Windows. dnl We use the test case from https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90458 @@ -974,13 +985,13 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ [ AC_MSG_RESULT(no)] ) -AC_MSG_CHECKING([for visibility attribute]) -AC_LINK_IFELSE([AC_LANG_SOURCE([ - int foo_def( void ) __attribute__((visibility("default"))); +AC_MSG_CHECKING([for default visibility attribute]) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + int foo(void) __attribute__((visibility("default"))); int main(){} ])], [ - AC_DEFINE(HAVE_VISIBILITY_ATTRIBUTE,1,[Define if the visibility attribute is supported.]) + AC_DEFINE(HAVE_DEFAULT_VISIBILITY_ATTRIBUTE,1,[Define if the visibility attribute is supported.]) AC_MSG_RESULT(yes) ], [ @@ -991,6 +1002,18 @@ AC_LINK_IFELSE([AC_LANG_SOURCE([ ] ) +AC_MSG_CHECKING([for dllexport attribute]) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + __declspec(dllexport) int foo(void); + int main(){} + ])], + [ + AC_DEFINE(HAVE_DLLEXPORT_ATTRIBUTE,1,[Define if the dllexport attribute is supported.]) + AC_MSG_RESULT(yes) + ], + [AC_MSG_RESULT(no)] +) + dnl thread_local is currently disabled when building with glibc back compat. dnl Our minimum supported glibc is 2.17, however support for thread_local dnl did not arrive in glibc until 2.18. @@ -1173,12 +1196,6 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ [ AC_MSG_RESULT(no); HAVE_WEAK_GETAUXVAL=0 ] ) -dnl Check for reduced exports -if test x$use_reduce_exports = xyes; then - AX_CHECK_COMPILE_FLAG([-fvisibility=hidden],[RE_CXXFLAGS="-fvisibility=hidden"], - [AC_MSG_ERROR([Cannot set default symbol visibility. Use --disable-reduce-exports.])]) -fi - AC_MSG_CHECKING([for std::system]) AC_LINK_IFELSE( [ AC_LANG_PROGRAM( @@ -1223,7 +1240,7 @@ AC_DEFUN([SUPPRESS_WARNINGS], dnl enable-fuzz should disable all other targets if test "x$enable_fuzz" = "xyes"; then - AC_MSG_WARN(enable-fuzz will disable all other targets) + AC_MSG_WARN(enable-fuzz will disable all other targets and force --enable-fuzz-binary=yes) build_bitcoin_utils=no build_bitcoin_cli=no build_bitcoin_tx=no @@ -1239,10 +1256,11 @@ if test "x$enable_fuzz" = "xyes"; then use_upnp=no use_natpmp=no use_zmq=no + enable_fuzz_binary=yes AX_CHECK_PREPROC_FLAG([-DABORT_ON_FAILED_ASSUME],[[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DABORT_ON_FAILED_ASSUME"]],,[[$CXXFLAG_WERROR]]) - AC_MSG_CHECKING([whether main function is needed]) + AC_MSG_CHECKING([whether main function is needed for fuzz binary]) AX_CHECK_LINK_FLAG( [[-fsanitize=$use_sanitizers]], [AC_MSG_RESULT([no])], @@ -1253,8 +1271,10 @@ if test "x$enable_fuzz" = "xyes"; then #include <cstdint> #include <cstddef> extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { return 0; } - /* unterminated comment to remove the main function ... - ]],[[]])]) + /* comment to remove the main function ... + ]],[[ + */ int not_main() { + ]])]) else BITCOIN_QT_INIT @@ -1268,6 +1288,8 @@ else QT_DBUS_INCLUDES=SUPPRESS_WARNINGS($QT_DBUS_INCLUDES) QT_TEST_INCLUDES=SUPPRESS_WARNINGS($QT_TEST_INCLUDES) fi + + CPPFLAGS="$CPPFLAGS -DPROVIDE_MAIN_FUNCTION" fi if test x$enable_wallet != xno; then @@ -1360,20 +1382,23 @@ fi if test x$use_boost = xyes; then -dnl Minimum required Boost version -define(MINIMUM_REQUIRED_BOOST, 1.58.0) - -dnl Check for Boost libs -AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST]) +dnl Check for Boost headers +AX_BOOST_BASE([1.58.0],[],[AC_MSG_ERROR([Boost is not available!])]) if test x$want_boost = xno; then AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]]) fi AX_BOOST_SYSTEM AX_BOOST_FILESYSTEM -AX_BOOST_THREAD -dnl Opt-in to boost-process -AS_IF([ test x$with_boost_process != x ], [ AX_BOOST_PROCESS ], [ ax_cv_boost_process=no ] ) +dnl Opt-in to Boost Process +if test "x$boost_process" != xno; then +AC_MSG_CHECKING(for Boost Process) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <boost/process.hpp>]], + [[ boost::process::child* child = new boost::process::child; delete child; ]])], + [ AC_MSG_RESULT(yes); AC_DEFINE([HAVE_BOOST_PROCESS],,[define if Boost::Process is available])], + [ AC_MSG_ERROR([Boost::Process is not available!])] +) +fi if test x$suppress_external_warnings != xno; then BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS) @@ -1384,12 +1409,14 @@ dnl counter implementations. In 1.63 and later the std::atomic approach is defau m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS" -BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB" +BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB" fi +dnl Check for reduced exports if test x$use_reduce_exports = xyes; then - CXXFLAGS="$CXXFLAGS $RE_CXXFLAGS" - AX_CHECK_LINK_FLAG([[-Wl,--exclude-libs,ALL]], [RELDFLAGS="-Wl,--exclude-libs,ALL"],, [[$LDFLAG_WERROR]]) + AX_CHECK_COMPILE_FLAG([-fvisibility=hidden],[CXXFLAGS="$CXXFLAGS -fvisibility=hidden"], + [AC_MSG_ERROR([Cannot set hidden symbol visibility. Use --disable-reduce-exports.])],[[$CXXFLAG_WERROR]]) + AX_CHECK_LINK_FLAG([[-Wl,--exclude-libs,ALL]],[RELDFLAGS="-Wl,--exclude-libs,ALL"],,[[$LDFLAG_WERROR]]) fi if test x$use_tests = xyes; then @@ -1713,6 +1740,7 @@ AM_CONDITIONAL([USE_BDB], [test "x$use_bdb" = "xyes"]) AM_CONDITIONAL([ENABLE_TRACING],[test x$have_sdt = xyes]) AM_CONDITIONAL([ENABLE_TESTS],[test x$BUILD_TEST = xyes]) AM_CONDITIONAL([ENABLE_FUZZ],[test x$enable_fuzz = xyes]) +AM_CONDITIONAL([ENABLE_FUZZ_BINARY],[test x$enable_fuzz_binary = xyes]) AM_CONDITIONAL([ENABLE_FUZZ_LINK_ALL],[test x$enable_danger_fuzz_link_all = xyes]) AM_CONDITIONAL([ENABLE_QT],[test x$bitcoin_enable_qt = xyes]) AM_CONDITIONAL([ENABLE_QT_TESTS],[test x$BUILD_TEST_QT = xyes]) @@ -1729,6 +1757,8 @@ AM_CONDITIONAL([ENABLE_SHANI],[test x$enable_shani = xyes]) AM_CONDITIONAL([ENABLE_ARM_CRC],[test x$enable_arm_crc = xyes]) AM_CONDITIONAL([USE_ASM],[test x$use_asm = xyes]) AM_CONDITIONAL([WORDS_BIGENDIAN],[test x$ac_cv_c_bigendian = xyes]) +AM_CONDITIONAL([USE_NATPMP],[test x$use_natpmp = xyes]) +AM_CONDITIONAL([USE_UPNP],[test x$use_upnp = xyes]) AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version]) AC_DEFINE(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR, [Minor version]) @@ -1855,8 +1885,9 @@ esac echo echo "Options used to compile and link:" -echo " boost process = $ax_cv_boost_process" +echo " boost process = $with_boost_process" echo " multiprocess = $build_multiprocess" +echo " with libs = $build_bitcoin_libs" echo " with wallet = $enable_wallet" if test "x$enable_wallet" != "xno"; then echo " with sqlite = $use_sqlite" diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py index 06b15574a7..60acb0d593 100755 --- a/contrib/gitian-build.py +++ b/contrib/gitian-build.py @@ -35,14 +35,14 @@ def setup(): if not os.path.isdir('bitcoin'): subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin/bitcoin.git']) os.chdir('gitian-builder') - make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64'] + make_image_prog = ['bin/make-base-vm', '--suite', 'focal', '--arch', 'amd64'] if args.docker: make_image_prog += ['--docker'] elif not args.kvm: - make_image_prog += ['--lxc'] + make_image_prog += ['--lxc', '--disksize', '13000'] subprocess.check_call(make_image_prog) os.chdir(workdir) - if args.is_bionic and not args.kvm and not args.docker: + if args.is_focal and not args.kvm and not args.docker: subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) print('Reboot is required') sys.exit(0) @@ -176,7 +176,7 @@ def main(): args = parser.parse_args() workdir = os.getcwd() - args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) + args.is_focal = b'focal' in subprocess.check_output(['lsb_release', '-cs']) if args.kvm and args.docker: raise Exception('Error: cannot have both kvm and docker') diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml index a0ff87b531..b06fc782a3 100644 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ b/contrib/gitian-descriptors/gitian-linux.yml @@ -3,7 +3,7 @@ name: "bitcoin-core-linux-22" enable_cache: true distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: @@ -15,6 +15,8 @@ packages: - "ca-certificates" - "curl" - "faketime" +- "g++-8" +- "gcc-8" - "git" - "libtool" - "patch" @@ -45,7 +47,7 @@ script: | WRAP_DIR=$HOME/wrapped HOSTS="x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu riscv64-linux-gnu" - CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests" + CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary" FAKETIME_HOST_PROGS="gcc g++" FAKETIME_PROGS="date ar ranlib nm" HOST_CFLAGS="-O2 -g" @@ -107,7 +109,7 @@ script: | BASEPREFIX="${PWD}/depends" # Build dependencies for each host for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" + make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" CC=${i}-gcc-8 CXX=${i}-g++-8 done # Faketime for binaries @@ -130,7 +132,7 @@ script: | # Extract the git archive into a dir for each host and build for i in ${HOSTS}; do export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - if [ "${i}" = "riscv64-linux-gnu" ] || [ "${i}" = "powerpc64-linux-gnu" ] || [ "${i}" = "powerpc64le-linux-gnu" ]; then + if [ "${i}" = "powerpc64-linux-gnu" ]; then # Workaround for https://bugs.launchpad.net/ubuntu/+source/gcc-8-cross-ports/+bug/1853740 # TODO: remove this when no longer needed HOST_LDFLAGS="${HOST_LDFLAGS_BASE} -Wl,-z,noexecstack" @@ -144,7 +146,7 @@ script: | tar --strip-components=1 -xf "${GIT_ARCHIVE}" ./autogen.sh - CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}" + CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}" CC=${i}-gcc-8 CXX=${i}-g++-8 make ${MAKEOPTS} make ${MAKEOPTS} -C src check-security make ${MAKEOPTS} -C src check-symbols diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml index 6fcb21f729..3f0c0c3332 100644 --- a/contrib/gitian-descriptors/gitian-osx-signer.yml +++ b/contrib/gitian-descriptors/gitian-osx-signer.yml @@ -2,7 +2,7 @@ name: "bitcoin-dmg-signer" distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml index 2a47e90e6e..0dc531df0e 100644 --- a/contrib/gitian-descriptors/gitian-osx.yml +++ b/contrib/gitian-descriptors/gitian-osx.yml @@ -3,7 +3,7 @@ name: "bitcoin-core-osx-22" enable_cache: true distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: @@ -29,6 +29,7 @@ packages: - "python3-setuptools" - "fonts-tuffy" - "xorriso" +- "libtinfo5" remotes: - "url": "https://github.com/bitcoin/bitcoin.git" "dir": "bitcoin" @@ -39,7 +40,7 @@ script: | WRAP_DIR=$HOME/wrapped HOSTS="x86_64-apple-darwin18" - CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests XORRISOFS=${WRAP_DIR}/xorrisofs DMG=${WRAP_DIR}/dmg" + CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary XORRISOFS=${WRAP_DIR}/xorrisofs DMG=${WRAP_DIR}/dmg" FAKETIME_HOST_PROGS="" FAKETIME_PROGS="ar ranlib date dmg xorrisofs" diff --git a/contrib/gitian-descriptors/gitian-win-signer.yml b/contrib/gitian-descriptors/gitian-win-signer.yml index 6bcd126662..c13c24c3cc 100644 --- a/contrib/gitian-descriptors/gitian-win-signer.yml +++ b/contrib/gitian-descriptors/gitian-win-signer.yml @@ -2,7 +2,7 @@ name: "bitcoin-win-signer" distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml index 1edd8b2e81..95cf0185e2 100644 --- a/contrib/gitian-descriptors/gitian-win.yml +++ b/contrib/gitian-descriptors/gitian-win.yml @@ -3,7 +3,7 @@ name: "bitcoin-core-win-22" enable_cache: true distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: @@ -31,7 +31,7 @@ script: | WRAP_DIR=$HOME/wrapped HOSTS="x86_64-w64-mingw32" - CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests" + CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary" FAKETIME_HOST_PROGS="ar ranlib nm windres strip objcopy" FAKETIME_PROGS="date makensis zip" HOST_CFLAGS="-O2 -g -fno-ident" diff --git a/contrib/guix/README.md b/contrib/guix/README.md index 5870deee39..e83f53dac3 100644 --- a/contrib/guix/README.md +++ b/contrib/guix/README.md @@ -114,6 +114,12 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum depends tree. Setting this to the same directory across multiple builds of the depends tree can eliminate unnecessary redownloading of package sources. +* _**BASE_CACHE**_ + + Set the depends tree cache for built packages. This is passed through to the + depends tree. Setting this to the same directory across multiple builds of the + depends tree can eliminate unnecessary building of packages. + * _**MAX_JOBS**_ Override the maximum number of jobs to run simultaneously, you might want to @@ -217,11 +223,11 @@ As mentioned at the bottom of [this manual page][guix/bin-install]: > > make guix-binary.x86_64-linux.tar.xz -### When will Guix be packaged in debian? +### Is Guix packaged in my operating system? -Thanks to Vagrant Cascadian's diligent work, Guix is now [in debian -experimental][debian/guix-experimental]! Hopefully it will make its way into a -release soon. +Guix is shipped starting with [Debian Bullseye][debian/guix-bullseye] and +[Ubuntu 21.04 "Hirsute Hippo"][ubuntu/guix-hirsute]. Other operating systems +are working on packaging Guix as well. [b17e]: http://bootstrappable.org/ [r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/ @@ -233,5 +239,6 @@ release soon. [guix/substitute-server-auth]: https://www.gnu.org/software/guix/manual/en/html_node/Substitute-Server-Authorization.html [guix/time-machine]: https://guix.gnu.org/manual/en/html_node/Invoking-guix-time_002dmachine.html -[debian/guix-experimental]: https://packages.debian.org/experimental/guix +[debian/guix-bullseye]: https://packages.debian.org/bullseye/guix +[ubuntu/guix-hirsute]: https://packages.ubuntu.com/hirsute/guix [fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix diff --git a/contrib/guix/guix-build.sh b/contrib/guix/guix-build.sh index da6bd13f6a..f29ea9b910 100755 --- a/contrib/guix/guix-build.sh +++ b/contrib/guix/guix-build.sh @@ -148,8 +148,9 @@ SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" time-machine() { # shellcheck disable=SC2086 guix time-machine --url=https://github.com/dongcarl/guix.git \ - --commit=b066c25026f21fb57677aa34692a5034338e7ee3 \ + --commit=7d6bd44da57926e0d4af25eba723a61c82beef98 \ --max-jobs="$MAX_JOBS" \ + --keep-failed \ ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \ -- "$@" @@ -180,6 +181,11 @@ and untracked files and directories will be wiped, allowing you to start anew. EOF } +# Create SOURCES_PATH and BASE_CACHE if they are non-empty so that we can map +# them into the container +[ -z "$SOURCES_PATH" ] || mkdir -p "$SOURCES_PATH" +[ -z "$BASE_CACHE" ] || mkdir -p "$BASE_CACHE" + # Deterministically build Bitcoin Core # shellcheck disable=SC2153 for host in $HOSTS; do @@ -254,6 +260,12 @@ EOF # make the downloaded depends sources available to it. The sources # should have been downloaded prior to this invocation. # + # --keep-failed keep build tree of failed builds + # + # When builds of the Guix environment itself (not Bitcoin Core) + # fail, it is useful for the build tree to be kept for debugging + # purposes. + # # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} # # fetch substitute from SUBSTITUTE_URLS if they are @@ -274,7 +286,9 @@ EOF --share="$OUTDIR"=/outdir \ --expose="$(git rev-parse --git-common-dir)" \ ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + ${BASE_CACHE:+--share="$BASE_CACHE"} \ --max-jobs="$MAX_JOBS" \ + --keep-failed \ ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ -- env HOST="$host" \ @@ -282,6 +296,7 @@ EOF SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ ${V:+V=1} \ ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE:+BASE_CACHE="$BASE_CACHE"} \ DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \ OUTDIR=/outdir \ bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh" diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 39d3cb9b50..24bc30e330 100644 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -44,6 +44,8 @@ store_path() { NATIVE_GCC="$(store_path gcc-toolchain)" export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC}/lib64" export CPATH="${NATIVE_GCC}/include" +unset C_INCLUDE_PATH +unset CPLUS_INCLUDE_PATH case "$HOST" in *darwin*) # When targeting darwin, some native tools built by depends require @@ -66,7 +68,8 @@ case "$HOST" in # Determine output paths to use in CROSS_* environment variables CROSS_GLIBC="$(store_path "mingw-w64-x86_64-winpthreads")" CROSS_GCC="$(store_path "gcc-cross-${HOST}")" - CROSS_GCC_LIBS=( "${CROSS_GCC}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) # The search path ordering is generally: @@ -75,7 +78,7 @@ case "$HOST" in # 2. kernel-header-related search paths (not applicable to mingw-w64 hosts) export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include" export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" - export CROSS_LIBRARY_PATH="${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" ;; *darwin*) # The CROSS toolchain for darwin uses the SDK and ignores environment variables. @@ -86,12 +89,13 @@ case "$HOST" in CROSS_GLIBC_STATIC="$(store_path "glibc-cross-${HOST}" static)" CROSS_KERNEL="$(store_path "linux-libre-headers-cross-${HOST}")" CROSS_GCC="$(store_path "gcc-cross-${HOST}")" - CROSS_GCC_LIBS=( "${CROSS_GCC}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include" export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" - export CROSS_LIBRARY_PATH="${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" ;; *) exit 1 ;; @@ -167,6 +171,7 @@ esac make -C depends --jobs="$MAX_JOBS" HOST="$HOST" \ ${V:+V=1} \ ${SOURCES_PATH+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE+BASE_CACHE="$BASE_CACHE"} \ i686_linux_CC=i686-linux-gnu-gcc \ i686_linux_CXX=i686-linux-gnu-g++ \ i686_linux_AR=i686-linux-gnu-ar \ @@ -204,7 +209,7 @@ fi ########################### # CONFIGFLAGS -CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests" +CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary" case "$HOST" in *linux*) CONFIGFLAGS+=" --enable-glibc-back-compat" ;; esac diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 3b89659263..bf02fbfd1d 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -115,7 +115,8 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" `(("binutils" ,xbinutils) ("libc" ,xlibc) ("libc:static" ,xlibc "static") - ("gcc" ,xgcc))) + ("gcc" ,xgcc) + ("gcc-lib" ,xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) @@ -159,7 +160,8 @@ desirable for building Bitcoin Core release binaries." (propagated-inputs `(("binutils" ,xbinutils) ("libc" ,pthreads-xlibc) - ("gcc" ,pthreads-xgcc))) + ("gcc" ,pthreads-xgcc) + ("gcc-lib" ,pthreads-xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) @@ -219,7 +221,7 @@ chain for " target " development.")) pkg-config ;; Scripting perl - python-3.7 + python-3 ;; Git git ;; Native gcc 7 toolchain @@ -236,5 +238,5 @@ chain for " target " development.")) ((string-contains target "-linux-") (list (make-bitcoin-cross-toolchain target))) ((string-contains target "darwin") - (list clang-8 libcap binutils imagemagick libtiff librsvg font-tuffy cmake-3.15.5 xorriso)) + (list clang-8 libcap binutils imagemagick libtiff librsvg font-tuffy cmake xorriso)) (else '()))))) diff --git a/contrib/message-capture/message-capture-docs.md b/contrib/message-capture/message-capture-docs.md new file mode 100644 index 0000000000..7301968461 --- /dev/null +++ b/contrib/message-capture/message-capture-docs.md @@ -0,0 +1,25 @@ +# Per-Peer Message Capture + +## Purpose + +This feature allows for message capture on a per-peer basis. It answers the simple question: "Can I see what messages my node is sending and receiving?" + +## Usage and Functionality + +* Run `bitcoind` with the `-capturemessages` option. +* Look in the `message_capture` folder in your datadir. + * Typically this will be `~/.bitcoin/message_capture`. + * See that there are many folders inside, one for each peer names with its IP address and port. + * Inside each peer's folder there are two `.dat` files: one is for received messages (`msgs_recv.dat`) and the other is for sent messages (`msgs_sent.dat`). +* Run `contrib/message-capture/message-capture-parser.py` with the proper arguments. + * See the `-h` option for help. + * To see all messages, both sent and received, for all peers use: + ``` + ./contrib/message-capture/message-capture-parser.py -o out.json \ + ~/.bitcoin/message_capture/**/*.dat + ``` + * Note: The messages in the given `.dat` files will be interleaved in chronological order. So, giving both received and sent `.dat` files (as above with `*.dat`) will result in all messages being interleaved in chronological order. + * If an output file is not provided (i.e. the `-o` option is not used), then the output prints to `stdout`. +* View the resulting output. + * The output file is `JSON` formatted. + * Suggestion: use `jq` to view the output, with `jq . out.json` diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py new file mode 100755 index 0000000000..9988478f1b --- /dev/null +++ b/contrib/message-capture/message-capture-parser.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Parse message capture binary files. To be used in conjunction with -capturemessages.""" + +import argparse +import os +import shutil +import sys +from io import BytesIO +import json +from pathlib import Path +from typing import Any, List, Optional + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional')) + +from test_framework.messages import ser_uint256 # noqa: E402 +from test_framework.p2p import MESSAGEMAP # noqa: E402 + +TIME_SIZE = 8 +LENGTH_SIZE = 4 +MSGTYPE_SIZE = 12 + +# The test framework classes stores hashes as large ints in many cases. +# These are variables of type uint256 in core. +# There isn't a way to distinguish between a large int and a large int that is actually a blob of bytes. +# As such, they are itemized here. +# Any variables with these names that are of type int are actually uint256 variables. +# (These can be easily found by looking for calls to deser_uint256, deser_uint256_vector, and uint256_from_str in messages.py) +HASH_INTS = [ + "blockhash", + "block_hash", + "hash", + "hashMerkleRoot", + "hashPrevBlock", + "hashstop", + "prev_header", + "sha256", + "stop_hash", +] + +HASH_INT_VECTORS = [ + "hashes", + "headers", + "vHave", + "vHash", +] + + +class ProgressBar: + def __init__(self, total: float): + self.total = total + self.running = 0 + + def set_progress(self, progress: float): + cols = shutil.get_terminal_size()[0] + if cols <= 12: + return + max_blocks = cols - 9 + num_blocks = int(max_blocks * progress) + print('\r[ {}{} ] {:3.0f}%' + .format('#' * num_blocks, + ' ' * (max_blocks - num_blocks), + progress * 100), + end ='') + + def update(self, more: float): + self.running += more + self.set_progress(self.running / self.total) + + +def to_jsonable(obj: Any) -> Any: + if hasattr(obj, "__dict__"): + return obj.__dict__ + elif hasattr(obj, "__slots__"): + ret = {} # type: Any + for slot in obj.__slots__: + val = getattr(obj, slot, None) + if slot in HASH_INTS and isinstance(val, int): + ret[slot] = ser_uint256(val).hex() + elif slot in HASH_INT_VECTORS and isinstance(val[0], int): + ret[slot] = [ser_uint256(a).hex() for a in val] + else: + ret[slot] = to_jsonable(val) + return ret + elif isinstance(obj, list): + return [to_jsonable(a) for a in obj] + elif isinstance(obj, bytes): + return obj.hex() + else: + return obj + + +def process_file(path: str, messages: List[Any], recv: bool, progress_bar: Optional[ProgressBar]) -> None: + with open(path, 'rb') as f_in: + if progress_bar: + bytes_read = 0 + + while True: + if progress_bar: + # Update progress bar + diff = f_in.tell() - bytes_read - 1 + progress_bar.update(diff) + bytes_read = f_in.tell() - 1 + + # Read the Header + tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) + if not tmp_header_raw: + break + tmp_header = BytesIO(tmp_header_raw) + time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int + msgtype = tmp_header.read(MSGTYPE_SIZE).split(b'\x00', 1)[0] # type: bytes + length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int + + # Start converting the message to a dictionary + msg_dict = {} + msg_dict["direction"] = "recv" if recv else "sent" + msg_dict["time"] = time + msg_dict["size"] = length # "size" is less readable here, but more readable in the output + + msg_ser = BytesIO(f_in.read(length)) + + # Determine message type + if msgtype not in MESSAGEMAP: + # Unrecognized message type + try: + msgtype_tmp = msgtype.decode() + if not msgtype_tmp.isprintable(): + raise UnicodeDecodeError + msg_dict["msgtype"] = msgtype_tmp + except UnicodeDecodeError: + msg_dict["msgtype"] = "UNREADABLE" + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unrecognized message type." + messages.append(msg_dict) + print(f"WARNING - Unrecognized message type {msgtype} in {path}", file=sys.stderr) + continue + + # Deserialize the message + msg = MESSAGEMAP[msgtype]() + msg_dict["msgtype"] = msgtype.decode() + + try: + msg.deserialize(msg_ser) + except KeyboardInterrupt: + raise + except Exception: + # Unable to deserialize message body + msg_ser.seek(0, os.SEEK_SET) + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unable to deserialize message." + messages.append(msg_dict) + print(f"WARNING - Unable to deserialize message in {path}", file=sys.stderr) + continue + + # Convert body of message into a jsonable object + if length: + msg_dict["body"] = to_jsonable(msg) + messages.append(msg_dict) + + if progress_bar: + # Update the progress bar to the end of the current file + # in case we exited the loop early + f_in.seek(0, os.SEEK_END) # Go to end of file + diff = f_in.tell() - bytes_read - 1 + progress_bar.update(diff) + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + epilog="EXAMPLE \n\t{0} -o out.json <data-dir>/message_capture/**/*.dat".format(sys.argv[0]), + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + "capturepaths", + nargs='+', + help="binary message capture files to parse.") + parser.add_argument( + "-o", "--output", + help="output file. If unset print to stdout") + parser.add_argument( + "-n", "--no-progress-bar", + action='store_true', + help="disable the progress bar. Automatically set if the output is not a terminal") + args = parser.parse_args() + capturepaths = [Path.cwd() / Path(capturepath) for capturepath in args.capturepaths] + output = Path.cwd() / Path(args.output) if args.output else False + use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty() + + messages = [] # type: List[Any] + if use_progress_bar: + total_size = sum(capture.stat().st_size for capture in capturepaths) + progress_bar = ProgressBar(total_size) + else: + progress_bar = None + + for capture in capturepaths: + process_file(str(capture), messages, "recv" in capture.stem, progress_bar) + + messages.sort(key=lambda msg: msg['time']) + + if use_progress_bar: + progress_bar.set_progress(1) + + jsonrep = json.dumps(messages) + if output: + with open(str(output), 'w+', encoding="utf8") as f_out: + f_out.write(jsonrep) + else: + print(jsonrep) + +if __name__ == "__main__": + main() diff --git a/depends/Makefile b/depends/Makefile index 596a46d4a2..4cd4d72fc2 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -2,7 +2,7 @@ # Pattern rule to print variables, e.g. make print-top_srcdir print-%: - @echo $* = $($*) + @echo '$*' = '$($*)' # When invoking a sub-make, keep only the command line variable definitions # matching the pattern in the filter function. @@ -112,19 +112,27 @@ include builders/$(build_os).mk include builders/default.mk include packages/packages.mk +full_env=$(shell printenv) + build_id_string:=$(BUILD_ID_SALT) -build_id_string+=$(shell $(build_CC) --version 2>/dev/null) -build_id_string+=$(shell $(build_AR) --version 2>/dev/null) -build_id_string+=$(shell $(build_CXX) --version 2>/dev/null) -build_id_string+=$(shell $(build_RANLIB) --version 2>/dev/null) -build_id_string+=$(shell $(build_STRIP) --version 2>/dev/null) + +# GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want +# the information from "-v -E -" as well, so just include both. +# +# '3>&1 1>&2 2>&3 > /dev/null' is supposed to swap stdin and stdout and silence +# stdin, since we only want the stderr output +build_id_string+=$(shell $(build_CC) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(build_CC) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null) +build_id_string+=$(shell $(build_AR) --version 2>/dev/null) $(filter AR_%,$(full_env)) ZERO_AR_DATE=$(ZERO_AR_DATE) +build_id_string+=$(shell $(build_CXX) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(build_CXX) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null) +build_id_string+=$(shell $(build_RANLIB) --version 2>/dev/null) $(filter RANLIB_%,$(full_env)) +build_id_string+=$(shell $(build_STRIP) --version 2>/dev/null) $(filter STRIP_%,$(full_env)) $(host_arch)_$(host_os)_id_string:=$(HOST_ID_SALT) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_CC) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_AR) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_CC) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(host_CC) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_AR) --version 2>/dev/null) $(filter AR_%,$(full_env)) ZERO_AR_DATE=$(ZERO_AR_DATE) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(host_CXX) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) $(filter RANLIB_%,$(full_env)) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) $(filter STRIP_%,$(full_env)) ifneq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) # Make sure that cache is invalidated when switching between system and @@ -133,6 +141,9 @@ build_id_string+=system_clang $(host_arch)_$(host_os)_id_string+=system_clang endif +build_id_string+=GUIX_ENVIRONMENT=$(GUIX_ENVIRONMENT) +$(host_arch)_$(host_os)_id_string+=GUIX_ENVIRONMENT=$(GUIX_ENVIRONMENT) + qrencode_packages_$(NO_QR) = $(qrencode_packages) qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages) $(qrencode_packages_) @@ -265,7 +276,7 @@ install: check-packages $(host_prefix)/share/config.site download-one: check-sources $(all_sources) download-osx: - @$(MAKE) -s HOST=x86_64-apple-darwin14 download-one + @$(MAKE) -s HOST=x86_64-apple-darwin download-one download-linux: @$(MAKE) -s HOST=x86_64-unknown-linux-gnu download-one download-win: diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk index ff8a252db9..29a3efdfe6 100644 --- a/depends/packages/boost.mk +++ b/depends/packages/boost.mk @@ -22,7 +22,7 @@ $(package)_toolset_$(host_os)=clang else $(package)_toolset_$(host_os)=gcc endif -$(package)_config_libraries=filesystem,system,thread,test +$(package)_config_libraries=filesystem,system,test $(package)_cxxflags=-std=c++17 -fvisibility=hidden $(package)_cxxflags_linux=-fPIC $(package)_cxxflags_android=-fPIC diff --git a/doc/REST-interface.md b/doc/REST-interface.md index 6237734390..ea06952af4 100644 --- a/doc/REST-interface.md +++ b/doc/REST-interface.md @@ -111,12 +111,7 @@ $ curl localhost:18332/rest/getutxos/checkmempool/b2cdfd7b89def827ff8af7cd9bff76 Returns various information about the TX mempool. Only supports JSON as output format. -* loaded : (boolean) if the mempool is fully loaded -* size : (numeric) the number of transactions in the TX mempool -* bytes : (numeric) size of the TX mempool in bytes -* usage : (numeric) total TX mempool memory usage -* maxmempool : (numeric) maximum memory usage for the mempool in bytes -* mempoolminfee : (numeric) minimum feerate (BTC per KB) for tx to be accepted +Refer to the `getmempoolinfo` RPC for documentation of the fields. `GET /rest/mempool/contents.json` diff --git a/doc/bips.md b/doc/bips.md index 8c20533c9b..a5e9a6c020 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -15,6 +15,9 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**): * [`BIP 35`](https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki): The 'mempool' protocol message (and the protocol version bump to 60002) has been implemented since **v0.7.0** ([PR #1641](https://github.com/bitcoin/bitcoin/pull/1641)). As of **v0.13.0**, this is only available for `NODE_BLOOM` (BIP 111) peers. * [`BIP 37`](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki): The bloom filtering for transaction relaying, partial Merkle trees for blocks, and the protocol version bump to 70001 (enabling low-bandwidth SPV clients) has been implemented since **v0.8.0** ([PR #1795](https://github.com/bitcoin/bitcoin/pull/1795)). Disabled by default since **v0.19.0**, can be enabled by the `-peerbloomfilters` option. * [`BIP 42`](https://github.com/bitcoin/bips/blob/master/bip-0042.mediawiki): The bug that would have caused the subsidy schedule to resume after block 13440000 was fixed in **v0.9.2** ([PR #3842](https://github.com/bitcoin/bitcoin/pull/3842)). +* [`BIP 43`](https://github.com/bitcoin/bips/blob/master/bip-0043.mediawiki): The experimental descriptor wallets introduced in **v0.21.0** by default use the Hierarchical Deterministic Wallet derivation proposed by BIP 43. ([PR #16528](https://github.com/bitcoin/bitcoin/pull/16528)) +* [`BIP 44`](https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki): The experimental descriptor wallets introduced in **v0.21.0** by default use the Hierarchical Deterministic Wallet derivation proposed by BIP 44. ([PR #16528](https://github.com/bitcoin/bitcoin/pull/16528)) +* [`BIP 49`](https://github.com/bitcoin/bips/blob/master/bip-0049.mediawiki): The experimental descriptor wallets introduced in **v0.21.0** by default use the Hierarchical Deterministic Wallet derivation proposed by BIP 49. ([PR #16528](https://github.com/bitcoin/bitcoin/pull/16528)) * [`BIP 61`](https://github.com/bitcoin/bips/blob/master/bip-0061.mediawiki): The 'reject' protocol message (and the protocol version bump to 70002) was added in **v0.9.0** ([PR #3185](https://github.com/bitcoin/bitcoin/pull/3185)). Starting **v0.17.0**, whether to send reject messages can be configured with the `-enablebip61` option, and support is deprecated (disabled by default) as of **v0.18.0**. Support was removed in **v0.20.0** ([PR #15437](https://github.com/bitcoin/bitcoin/pull/15437)). * [`BIP 65`](https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki): The CHECKLOCKTIMEVERIFY softfork was merged in **v0.12.0** ([PR #6351](https://github.com/bitcoin/bitcoin/pull/6351)), and backported to **v0.11.2** and **v0.10.4**. Mempool-only CLTV was added in [PR #6124](https://github.com/bitcoin/bitcoin/pull/6124). * [`BIP 66`](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki): The strict DER rules and associated version 3 blocks have been implemented since **v0.10.0** ([PR #5713](https://github.com/bitcoin/bitcoin/pull/5713)). @@ -24,6 +27,7 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**): Support can be optionally disabled at build time since **v0.18.0** ([PR 14451](https://github.com/bitcoin/bitcoin/pull/14451)), and it is disabled by default at build time since **v0.19.0** ([PR #15584](https://github.com/bitcoin/bitcoin/pull/15584)). It has been removed as of **v0.20.0** ([PR 17165](https://github.com/bitcoin/bitcoin/pull/17165)). +* [`BIP 84`](https://github.com/bitcoin/bips/blob/master/bip-0084.mediawiki): The experimental descriptor wallets introduced in **v0.21.0** by default use the Hierarchical Deterministic Wallet derivation proposed by BIP 84. ([PR #16528](https://github.com/bitcoin/bitcoin/pull/16528)) * [`BIP 90`](https://github.com/bitcoin/bips/blob/master/bip-0090.mediawiki): Trigger mechanism for activation of BIPs 34, 65, and 66 has been simplified to block height checks since **v0.14.0** ([PR #8391](https://github.com/bitcoin/bitcoin/pull/8391)). * [`BIP 111`](https://github.com/bitcoin/bips/blob/master/bip-0111.mediawiki): `NODE_BLOOM` service bit added, and enforced for all peer versions as of **v0.13.0** ([PR #6579](https://github.com/bitcoin/bitcoin/pull/6579) and [PR #6641](https://github.com/bitcoin/bitcoin/pull/6641)). * [`BIP 112`](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki): The CHECKSEQUENCEVERIFY opcode has been implemented since **v0.12.1** ([PR #7524](https://github.com/bitcoin/bitcoin/pull/7524)), and has been *buried* since **v0.19.0** ([PR #16060](https://github.com/bitcoin/bitcoin/pull/16060)). diff --git a/doc/build-unix.md b/doc/build-unix.md index 5c24886dbf..07fb9c453e 100644 --- a/doc/build-unix.md +++ b/doc/build-unix.md @@ -9,7 +9,7 @@ Note Always use absolute paths to configure and compile Bitcoin Core and the dependencies. For example, when specifying the path of the dependency: - ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=$BDB_PREFIX + ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=$BDB_PREFIX Here BDB_PREFIX must be an absolute path - it is defined using $(pwd) which ensures the usage of the absolute path. @@ -82,7 +82,7 @@ Build requirements: Now, you can either build from self-compiled [depends](/depends/README.md) or install the required dependencies: - sudo apt-get install libevent-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev + sudo apt-get install libevent-dev libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev BerkeleyDB is required for the wallet. @@ -166,9 +166,9 @@ miniupnpc https://miniupnp.tuxfamily.org/files/). UPnP support is compiled in and turned off by default. See the configure options for UPnP behavior desired: - --without-miniupnpc No UPnP support, miniupnp not required - --disable-upnp-default (the default) UPnP support turned off by default at runtime - --enable-upnp-default UPnP support turned on by default at runtime + --without-miniupnpc No UPnP support, miniupnp not required + --disable-upnp-default (the default) UPnP support turned off by default at runtime + --enable-upnp-default UPnP support turned on by default at runtime libnatpmp --------- @@ -177,9 +177,9 @@ libnatpmp from [here](https://miniupnp.tuxfamily.org/files/). NAT-PMP support is compiled in and turned off by default. See the configure options for NAT-PMP behavior desired: - --without-natpmp No NAT-PMP support, libnatpmp not required - --disable-natpmp-default (the default) NAT-PMP support turned off by default at runtime - --enable-natpmp-default NAT-PMP support turned on by default at runtime + --without-natpmp No NAT-PMP support, libnatpmp not required + --disable-natpmp-default (the default) NAT-PMP support turned off by default at runtime + --enable-natpmp-default NAT-PMP support turned on by default at runtime Berkeley DB ----------- @@ -199,9 +199,9 @@ Boost ----- If you need to build Boost yourself: - sudo su - ./bootstrap.sh - ./bjam install + sudo su + ./bootstrap.sh + ./bjam install Security @@ -212,8 +212,8 @@ This can be disabled with: Hardening Flags: - ./configure --enable-hardening - ./configure --disable-hardening + ./configure --enable-hardening + ./configure --disable-hardening Hardening enables the following features: @@ -228,7 +228,7 @@ Hardening enables the following features: To test that you have built PIE executable, install scanelf, part of paxutils, and use: - scanelf -e ./bitcoin + scanelf -e ./bitcoin The output should contain: @@ -245,8 +245,8 @@ Hardening enables the following features: `scanelf -e ./bitcoin` The output should contain: - STK/REL/PTL - RW- R-- RW- + STK/REL/PTL + RW- R-- RW- The STK RW- means that the stack is readable and writeable but not executable. diff --git a/doc/descriptors.md b/doc/descriptors.md index 63acb9167f..c4fc2a66bf 100644 --- a/doc/descriptors.md +++ b/doc/descriptors.md @@ -191,7 +191,7 @@ steps, or for dumping wallet descriptors including private key material. In order to easily represent the sets of scripts currently supported by existing Bitcoin Core wallets, a convenience function `combo` is provided, which takes as input a public key, and describes a set of P2PK, -P2PKH, P2WPKH, and P2SH-P2WPH scripts for that key. In case the key is +P2PKH, P2WPKH, and P2SH-P2WPKH scripts for that key. In case the key is uncompressed, the set only includes P2PK and P2PKH scripts. ### Checksums diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 596f65cf10..011c38321c 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -75,6 +75,11 @@ tool to clean up patches automatically before submission. on the same line as the `if`, without braces. In every other case, braces are required, and the `then` and `else` clauses must appear correctly indented on a new line. + - There's no hard limit on line width, but prefer to keep lines to <100 + characters if doing so does not decrease readability. Break up long + function declarations over multiple lines using the Clang Format + [AlignAfterOpenBracket](https://clang.llvm.org/docs/ClangFormatStyleOptions.html) + style option. - **Symbol naming conventions**. These are preferred in new code, but are not required when doing so would need changes to significant pieces of existing diff --git a/doc/fuzzing.md b/doc/fuzzing.md index 80ce821091..87df2bbbb9 100644 --- a/doc/fuzzing.md +++ b/doc/fuzzing.md @@ -157,3 +157,77 @@ $ FUZZ=process_message honggfuzz/honggfuzz -i inputs/ -- src/test/fuzz/fuzz ``` Read the [Honggfuzz documentation](https://github.com/google/honggfuzz/blob/master/docs/USAGE.md) for more information. + +## Fuzzing the Bitcoin Core P2P layer using Honggfuzz NetDriver + +Honggfuzz NetDriver allows for very easy fuzzing of TCP servers such as Bitcoin +Core without having to write any custom fuzzing harness. The `bitcoind` server +process is largely fuzzed without modification. + +This makes the fuzzing highly realistic: a bug reachable by the fuzzer is likely +also remotely triggerable by an untrusted peer. + +To quickly get started fuzzing the P2P layer using Honggfuzz NetDriver: + +```sh +$ mkdir bitcoin-honggfuzz-p2p/ +$ cd bitcoin-honggfuzz-p2p/ +$ git clone https://github.com/bitcoin/bitcoin +$ cd bitcoin/ +$ ./autogen.sh +$ git clone https://github.com/google/honggfuzz +$ cd honggfuzz/ +$ make +$ cd .. +$ CC=$(pwd)/honggfuzz/hfuzz_cc/hfuzz-clang \ + CXX=$(pwd)/honggfuzz/hfuzz_cc/hfuzz-clang++ \ + ./configure --disable-wallet --with-gui=no \ + --with-sanitizers=address,undefined +$ git apply << "EOF" +diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp +index 455a82e39..2faa3f80f 100644 +--- a/src/bitcoind.cpp ++++ b/src/bitcoind.cpp +@@ -158,7 +158,11 @@ static bool AppInit(int argc, char* argv[]) + return fRet; + } + ++#ifdef HFND_FUZZING_ENTRY_FUNCTION_CXX ++HFND_FUZZING_ENTRY_FUNCTION_CXX(int argc, char* argv[]) ++#else + int main(int argc, char* argv[]) ++#endif + { + #ifdef WIN32 + util::WinCmdLineArgs winArgs; +diff --git a/src/net.cpp b/src/net.cpp +index cf987b699..636a4176a 100644 +--- a/src/net.cpp ++++ b/src/net.cpp +@@ -709,7 +709,7 @@ int V1TransportDeserializer::readHeader(const char *pch, unsigned int nBytes) + } + + // Check start string, network magic +- if (memcmp(hdr.pchMessageStart, m_chain_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) { ++ if (false && memcmp(hdr.pchMessageStart, m_chain_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) { // skip network magic checking + LogPrint(BCLog::NET, "HEADER ERROR - MESSAGESTART (%s, %u bytes), received %s, peer=%d\n", hdr.GetCommand(), hdr.nMessageSize, HexStr(hdr.pchMessageStart), m_node_id); + return -1; + } +@@ -768,7 +768,7 @@ Optional<CNetMessage> V1TransportDeserializer::GetMessage(const std::chrono::mic + RandAddEvent(ReadLE32(hash.begin())); + + // Check checksum and header command string +- if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { ++ if (false && memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { // skip checksum checking + LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s, peer=%d\n", + SanitizeString(msg->m_command), msg->m_message_size, + HexStr(Span<uint8_t>(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE)), +EOF +$ make -C src/ bitcoind +$ mkdir -p inputs/ +$ honggfuzz/honggfuzz --exit_upon_crash --quiet --timeout 4 -n 1 -Q \ + -E HFND_TCP_PORT=18444 -f inputs/ -- \ + src/bitcoind -regtest -discover=0 -dns=0 -dnsseed=0 -listenonion=0 \ + -nodebuglogfile -bind=127.0.0.1:18444 -logthreadnames \ + -debug +``` diff --git a/doc/init.md b/doc/init.md index 99aa0a0def..399b819bf4 100644 --- a/doc/init.md +++ b/doc/init.md @@ -53,11 +53,12 @@ Paths All three configurations assume several paths that might need to be adjusted. -Binary: `/usr/bin/bitcoind` -Configuration file: `/etc/bitcoin/bitcoin.conf` -Data directory: `/var/lib/bitcoind` -PID file: `/var/run/bitcoind/bitcoind.pid` (OpenRC and Upstart) or `/run/bitcoind/bitcoind.pid` (systemd) -Lock file: `/var/lock/subsys/bitcoind` (CentOS) + Binary: /usr/bin/bitcoind + Configuration file: /etc/bitcoin/bitcoin.conf + Data directory: /var/lib/bitcoind + PID file: /var/run/bitcoind/bitcoind.pid (OpenRC and Upstart) or + /run/bitcoind/bitcoind.pid (systemd) + Lock file: /var/lock/subsys/bitcoind (CentOS) The PID directory (if applicable) and data directory should both be owned by the bitcoin user and group. It is advised for security reasons to make the @@ -83,10 +84,10 @@ OpenRC). ### macOS -Binary: `/usr/local/bin/bitcoind` -Configuration file: `~/Library/Application Support/Bitcoin/bitcoin.conf` -Data directory: `~/Library/Application Support/Bitcoin` -Lock file: `~/Library/Application Support/Bitcoin/.lock` + Binary: /usr/local/bin/bitcoind + Configuration file: ~/Library/Application Support/Bitcoin/bitcoin.conf + Data directory: ~/Library/Application Support/Bitcoin + Lock file: ~/Library/Application Support/Bitcoin/.lock Installing Service Configuration ----------------------------------- diff --git a/doc/release-notes.md b/doc/release-notes.md index 8f1e03e16b..0f248494c7 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -93,6 +93,10 @@ Tools and Utilities Wallet ------ +- A new `listdescriptors` RPC is available to inspect the contents of descriptor-enabled wallets. + The RPC returns public versions of all imported descriptors, including their timestamp and flags. + For ranged descriptors, it also returns the range boundaries and the next index to generate addresses from. (#20226) + GUI changes ----------- diff --git a/doc/release-process.md b/doc/release-process.md index 9fdb19edf1..84b208a0d8 100644 --- a/doc/release-process.md +++ b/doc/release-process.md @@ -5,7 +5,7 @@ Release Process ### Before every release candidate -* Update translations (ping wumpus on IRC) see [translation_process.md](https://github.com/bitcoin/bitcoin/blob/master/doc/translation_process.md#synchronising-translations). +* Update translations see [translation_process.md](https://github.com/bitcoin/bitcoin/blob/master/doc/translation_process.md#synchronising-translations). * Update manpages, see [gen-manpages.sh](https://github.com/bitcoin/bitcoin/blob/master/contrib/devtools/README.md#gen-manpagessh). * Update release candidate version in `configure.ac` (`CLIENT_VERSION_RC`). @@ -52,6 +52,13 @@ Release Process - Merge the release notes from the wiki into the branch. - Ensure the "Needs release note" label is removed from all relevant pull requests and issues. +#### Tagging a release (candidate) + +To tag the version (or release candidate) in git, use the `make-tag.py` script from [bitcoin-maintainer-tools](https://github.com/bitcoin-core/bitcoin-maintainer-tools). From the root of the repository run: + + ../bitcoin-maintainer-tools/make-tag.py v(new version, e.g. 0.20.0) + +This will perform a few last-minute consistency checks in the build system files, and if they pass, create a signed tag. ## Building @@ -73,21 +80,12 @@ Open a draft of the release notes for collaborative editing at https://github.co For the period during which the notes are being edited on the wiki, the version on the branch should be wiped and replaced with a link to the wiki which should be used for all announcements until `-final`. -Write the release notes. `git shortlog` helps a lot, for example: - - git shortlog --no-merges v(current version, e.g. 0.19.2)..v(new version, e.g. 0.20.0) - -(or ping @wumpus on IRC, he has specific tooling to generate the list of merged pulls -and sort them into categories based on labels). +Generate the change log. As this is a huge amount of work to do manually, there is the `list-pulls` script to do a pre-sorting step based on github PR metadata. See the [documentation in the README.md](https://github.com/bitcoin-core/bitcoin-maintainer-tools/blob/master/README.md#list-pulls). Generate list of authors: git log --format='- %aN' v(current version, e.g. 0.20.0)..v(new version, e.g. 0.20.1) | sort -fiu -Tag the version (or release candidate) in git: - - git tag -s v(new version, e.g. 0.20.0) - ### Setup and perform Gitian builds If you're using the automated script (found in [contrib/gitian-build.py](/contrib/gitian-build.py)), then at this point you should run it with the "--build" command. Otherwise ignore this. diff --git a/doc/tor.md b/doc/tor.md index 8a2aef2d07..e38ada5bd6 100644 --- a/doc/tor.md +++ b/doc/tor.md @@ -21,39 +21,39 @@ information in the debug log about your Tor configuration. The first step is running Bitcoin Core behind a Tor proxy. This will already anonymize all outgoing connections, but more is possible. - -proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy - server will be used to try to reach .onion addresses as well. - You need to use -noonion or -onion=0 to explicitly disable - outbound access to onion services. - - -onion=ip:port Set the proxy server to use for Tor onion services. You do not - need to set this if it's the same as -proxy. You can use -onion=0 - to explicitly disable access to onion services. - Note: Only the -proxy option sets the proxy for DNS requests; - with -onion they will not route over Tor, so use -proxy if you - have privacy concerns. - - -listen When using -proxy, listening is disabled by default. If you want - to manually configure an onion service (see section 3), you'll - need to enable it explicitly. - - -connect=X When behind a Tor proxy, you can specify .onion addresses instead - -addnode=X of IP addresses or hostnames in these parameters. It requires - -seednode=X SOCKS5. In Tor mode, such addresses can also be exchanged with - other P2P nodes. - - -onlynet=onion Make outgoing connections only to .onion addresses. Incoming - connections are not affected by this option. This option can be - specified multiple times to allow multiple network types, e.g. - ipv4, ipv6 or onion. If you use this option with values other - than onion you *cannot* disable onion connections; outgoing onion - connections will be enabled when you use -proxy or -onion. Use - -noonion or -onion=0 if you want to be sure there are no outbound - onion connections over the default proxy or your defined -proxy. + -proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy + server will be used to try to reach .onion addresses as well. + You need to use -noonion or -onion=0 to explicitly disable + outbound access to onion services. + + -onion=ip:port Set the proxy server to use for Tor onion services. You do not + need to set this if it's the same as -proxy. You can use -onion=0 + to explicitly disable access to onion services. + Note: Only the -proxy option sets the proxy for DNS requests; + with -onion they will not route over Tor, so use -proxy if you + have privacy concerns. + + -listen When using -proxy, listening is disabled by default. If you want + to manually configure an onion service (see section 3), you'll + need to enable it explicitly. + + -connect=X When behind a Tor proxy, you can specify .onion addresses instead + -addnode=X of IP addresses or hostnames in these parameters. It requires + -seednode=X SOCKS5. In Tor mode, such addresses can also be exchanged with + other P2P nodes. + + -onlynet=onion Make outgoing connections only to .onion addresses. Incoming + connections are not affected by this option. This option can be + specified multiple times to allow multiple network types, e.g. + ipv4, ipv6 or onion. If you use this option with values other + than onion you *cannot* disable onion connections; outgoing onion + connections will be enabled when you use -proxy or -onion. Use + -noonion or -onion=0 if you want to be sure there are no outbound + onion connections over the default proxy or your defined -proxy. In a typical situation, this suffices to run behind a Tor proxy: - ./bitcoind -proxy=127.0.0.1:9050 + ./bitcoind -proxy=127.0.0.1:9050 ## 2. Automatically create a Bitcoin Core onion service @@ -152,57 +152,57 @@ details). You can also manually configure your node to be reachable from the Tor network. Add these lines to your `/etc/tor/torrc` (or equivalent config file): - HiddenServiceDir /var/lib/tor/bitcoin-service/ - HiddenServicePort 8333 127.0.0.1:8334 + HiddenServiceDir /var/lib/tor/bitcoin-service/ + HiddenServicePort 8333 127.0.0.1:8334 The directory can be different of course, but virtual port numbers should be equal to your bitcoind's P2P listen port (8333 by default), and target addresses and ports should be equal to binding address and port for inbound Tor connections (127.0.0.1:8334 by default). - -externalip=X You can tell bitcoin about its publicly reachable addresses using - this option, and this can be an onion address. Given the above - configuration, you can find your onion address in - /var/lib/tor/bitcoin-service/hostname. For connections - coming from unroutable addresses (such as 127.0.0.1, where the - Tor proxy typically runs), onion addresses are given - preference for your node to advertise itself with. - - You can set multiple local addresses with -externalip. The - one that will be rumoured to a particular peer is the most - compatible one and also using heuristics, e.g. the address - with the most incoming connections, etc. - - -listen You'll need to enable listening for incoming connections, as this - is off by default behind a proxy. - - -discover When -externalip is specified, no attempt is made to discover local - IPv4 or IPv6 addresses. If you want to run a dual stack, reachable - from both Tor and IPv4 (or IPv6), you'll need to either pass your - other addresses using -externalip, or explicitly enable -discover. - Note that both addresses of a dual-stack system may be easily - linkable using traffic analysis. + -externalip=X You can tell bitcoin about its publicly reachable addresses using + this option, and this can be an onion address. Given the above + configuration, you can find your onion address in + /var/lib/tor/bitcoin-service/hostname. For connections + coming from unroutable addresses (such as 127.0.0.1, where the + Tor proxy typically runs), onion addresses are given + preference for your node to advertise itself with. + + You can set multiple local addresses with -externalip. The + one that will be rumoured to a particular peer is the most + compatible one and also using heuristics, e.g. the address + with the most incoming connections, etc. + + -listen You'll need to enable listening for incoming connections, as this + is off by default behind a proxy. + + -discover When -externalip is specified, no attempt is made to discover local + IPv4 or IPv6 addresses. If you want to run a dual stack, reachable + from both Tor and IPv4 (or IPv6), you'll need to either pass your + other addresses using -externalip, or explicitly enable -discover. + Note that both addresses of a dual-stack system may be easily + linkable using traffic analysis. In a typical situation, where you're only reachable via Tor, this should suffice: - ./bitcoind -proxy=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -listen + ./bitcoind -proxy=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -listen (obviously, replace the .onion address with your own). It should be noted that you still listen on all devices and another node could establish a clearnet connection, when knowing your address. To mitigate this, additionally bind the address of your Tor proxy: - ./bitcoind ... -bind=127.0.0.1 + ./bitcoind ... -bind=127.0.0.1 If you don't care too much about hiding your node, and want to be reachable on IPv4 as well, use `discover` instead: - ./bitcoind ... -discover + ./bitcoind ... -discover and open port 8333 on your firewall (or use port mapping, i.e., `-upnp` or `-natpmp`). If you only want to use Tor to reach .onion addresses, but not use it as a proxy for normal IPv4/IPv6 communication, use: - ./bitcoind -onion=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -discover + ./bitcoind -onion=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -discover ## 4. Privacy recommendations diff --git a/src/.clang-format b/src/.clang-format index ef7a0ef5c7..a69c57f3e0 100644 --- a/src/.clang-format +++ b/src/.clang-format @@ -11,7 +11,8 @@ AllowShortIfStatementsOnASingleLine: true AllowShortLoopsOnASingleLine: false AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true -BinPackParameters: false +BinPackArguments: true +BinPackParameters: true BreakBeforeBinaryOperators: false BreakBeforeBraces: Custom BraceWrapping: diff --git a/src/Makefile.am b/src/Makefile.am index 2616eb8638..67efbbeae4 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -4,7 +4,7 @@ # Pattern rule to print variables, e.g. make print-top_srcdir print-%: - @echo $* = $($*) + @echo '$*' = '$($*)' DIST_SUBDIRS = secp256k1 univalue @@ -233,6 +233,7 @@ BITCOIN_CORE_H = \ util/check.h \ util/error.h \ util/fees.h \ + util/getuniquepath.h \ util/golombrice.h \ util/hasher.h \ util/macros.h \ @@ -242,6 +243,7 @@ BITCOIN_CORE_H = \ util/rbf.h \ util/ref.h \ util/settings.h \ + util/sock.h \ util/spanparsing.h \ util/string.h \ util/system.h \ @@ -556,7 +558,9 @@ libbitcoin_util_a_SOURCES = \ util/bytevectorhash.cpp \ util/error.cpp \ util/fees.cpp \ + util/getuniquepath.cpp \ util/hasher.cpp \ + util/sock.cpp \ util/system.cpp \ util/message.cpp \ util/moneystr.cpp \ @@ -683,7 +687,7 @@ endif bitcoin_util_SOURCES = bitcoin-util.cpp bitcoin_util_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) bitcoin_util_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) -bitcoin_util_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +bitcoin_util_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $(PTHREAD_FLAGS) if TARGET_WINDOWS bitcoin_util_SOURCES += bitcoin-util-res.rc diff --git a/src/Makefile.test.include b/src/Makefile.test.include index e9f9b73abe..e817bb2ee2 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -2,9 +2,11 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -if ENABLE_FUZZ +if ENABLE_FUZZ_BINARY noinst_PROGRAMS += test/fuzz/fuzz -else +endif + +if !ENABLE_FUZZ bin_PROGRAMS += test/test_bitcoin endif @@ -50,6 +52,14 @@ FUZZ_SUITE_LD_COMMON = \ $(EVENT_LIBS) \ $(EVENT_PTHREADS_LIBS) +if USE_UPNP +FUZZ_SUITE_LD_COMMON += $(MINIUPNPC_LIBS) +endif + +if USE_NATPMP +FUZZ_SUITE_LD_COMMON += $(NATPMP_LIBS) +endif + # test_bitcoin binary # BITCOIN_TESTS =\ test/arith_uint256_tests.cpp \ @@ -114,6 +124,7 @@ BITCOIN_TESTS =\ test/sighash_tests.cpp \ test/sigopcount_tests.cpp \ test/skiplist_tests.cpp \ + test/sock_tests.cpp \ test/streams_tests.cpp \ test/sync_tests.cpp \ test/system_tests.cpp \ @@ -145,10 +156,16 @@ BITCOIN_TESTS += \ wallet/test/ismine_tests.cpp \ wallet/test/scriptpubkeyman_tests.cpp +FUZZ_SUITE_LD_COMMON +=\ + $(LIBBITCOIN_WALLET) \ + $(SQLITE_LIBS) \ + $(BDB_LIBS) + if USE_BDB BITCOIN_TESTS += wallet/test/db_tests.cpp endif + BITCOIN_TEST_SUITE += \ wallet/test/wallet_test_fixture.cpp \ wallet/test/wallet_test_fixture.h \ @@ -172,12 +189,12 @@ test_test_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $( if ENABLE_ZMQ test_test_bitcoin_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS) +FUZZ_SUITE_LD_COMMON += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS) endif -if ENABLE_FUZZ - FUZZ_SUITE_LDFLAGS_COMMON = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $(PTHREAD_FLAGS) +if ENABLE_FUZZ_BINARY test_fuzz_fuzz_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_fuzz_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_fuzz_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -278,8 +295,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/tx_in.cpp \ test/fuzz/tx_out.cpp \ test/fuzz/txrequest.cpp - -endif # ENABLE_FUZZ +endif # ENABLE_FUZZ_BINARY nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES) diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include index 0621da8ddf..1abfb667a0 100644 --- a/src/Makefile.test_util.include +++ b/src/Makefile.test_util.include @@ -12,6 +12,7 @@ TEST_UTIL_H = \ test/util/logging.h \ test/util/mining.h \ test/util/net.h \ + test/util/script.h \ test/util/setup_common.h \ test/util/str.h \ test/util/transaction_utils.h \ diff --git a/src/addrman.cpp b/src/addrman.cpp index ed7fccc0ff..f91121f156 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -9,6 +9,8 @@ #include <logging.h> #include <serialize.h> +#include <cmath> + int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool> &asmap) const { uint64_t hash1 = (CHashWriter(SER_GETHASH, 0) << nKey << GetKey()).GetCheapHash(); diff --git a/src/addrman.h b/src/addrman.h index 9ac67b7af6..92a5570953 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -335,22 +335,20 @@ public: * * nNew * * nTried * * number of "new" buckets XOR 2**30 - * * all nNew addrinfos in vvNew - * * all nTried addrinfos in vvTried - * * for each bucket: + * * all new addresses (total count: nNew) + * * all tried addresses (total count: nTried) + * * for each new bucket: * * number of elements - * * for each element: index + * * for each element: index in the serialized "all new addresses" + * * asmap checksum * * 2**30 is xorred with the number of buckets to make addrman deserializer v0 detect it * as incompatible. This is necessary because it did not check the version number on * deserialization. * - * Notice that vvTried, mapAddr and vVector are never encoded explicitly; + * vvNew, vvTried, mapInfo, mapAddr and vRandom are never encoded explicitly; * they are instead reconstructed from the other information. * - * vvNew is serialized, but only used if ADDRMAN_UNKNOWN_BUCKET_COUNT didn't change, - * otherwise it is reconstructed as well. - * * This format is more complex, but significantly smaller (at most 1.5 MiB), and supports * changes to the ADDRMAN_ parameters without breaking the on-disk structure. * @@ -413,13 +411,13 @@ public: } } } - // Store asmap version after bucket entries so that it + // Store asmap checksum after bucket entries so that it // can be ignored by older clients for backward compatibility. - uint256 asmap_version; + uint256 asmap_checksum; if (m_asmap.size() != 0) { - asmap_version = SerializeHash(m_asmap); + asmap_checksum = SerializeHash(m_asmap); } - s << asmap_version; + s << asmap_checksum; } template <typename Stream> @@ -500,47 +498,63 @@ public: nTried -= nLost; // Store positions in the new table buckets to apply later (if possible). - std::map<int, int> entryToBucket; // Represents which entry belonged to which bucket when serializing - - for (int bucket = 0; bucket < nUBuckets; bucket++) { - int nSize = 0; - s >> nSize; - for (int n = 0; n < nSize; n++) { - int nIndex = 0; - s >> nIndex; - if (nIndex >= 0 && nIndex < nNew) { - entryToBucket[nIndex] = bucket; + // An entry may appear in up to ADDRMAN_NEW_BUCKETS_PER_ADDRESS buckets, + // so we store all bucket-entry_index pairs to iterate through later. + std::vector<std::pair<int, int>> bucket_entries; + + for (int bucket = 0; bucket < nUBuckets; ++bucket) { + int num_entries{0}; + s >> num_entries; + for (int n = 0; n < num_entries; ++n) { + int entry_index{0}; + s >> entry_index; + if (entry_index >= 0 && entry_index < nNew) { + bucket_entries.emplace_back(bucket, entry_index); } } } - uint256 supplied_asmap_version; + // If the bucket count and asmap checksum haven't changed, then attempt + // to restore the entries to the buckets/positions they were in before + // serialization. + uint256 supplied_asmap_checksum; if (m_asmap.size() != 0) { - supplied_asmap_version = SerializeHash(m_asmap); + supplied_asmap_checksum = SerializeHash(m_asmap); } - uint256 serialized_asmap_version; + uint256 serialized_asmap_checksum; if (format >= Format::V2_ASMAP) { - s >> serialized_asmap_version; + s >> serialized_asmap_checksum; } + const bool restore_bucketing{nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && + serialized_asmap_checksum == supplied_asmap_checksum}; - for (int n = 0; n < nNew; n++) { - CAddrInfo &info = mapInfo[n]; - int bucket = entryToBucket[n]; - int nUBucketPos = info.GetBucketPosition(nKey, true, bucket); - if (format >= Format::V2_ASMAP && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && vvNew[bucket][nUBucketPos] == -1 && - info.nRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS && serialized_asmap_version == supplied_asmap_version) { + if (!restore_bucketing) { + LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n"); + } + + for (auto bucket_entry : bucket_entries) { + int bucket{bucket_entry.first}; + const int entry_index{bucket_entry.second}; + CAddrInfo& info = mapInfo[entry_index]; + + // The entry shouldn't appear in more than + // ADDRMAN_NEW_BUCKETS_PER_ADDRESS. If it has already, just skip + // this bucket_entry. + if (info.nRefCount >= ADDRMAN_NEW_BUCKETS_PER_ADDRESS) continue; + + int bucket_position = info.GetBucketPosition(nKey, true, bucket); + if (restore_bucketing && vvNew[bucket][bucket_position] == -1) { // Bucketing has not changed, using existing bucket positions for the new table - vvNew[bucket][nUBucketPos] = n; - info.nRefCount++; + vvNew[bucket][bucket_position] = entry_index; + ++info.nRefCount; } else { - // In case the new table data cannot be used (format unknown, bucket count wrong or new asmap), + // In case the new table data cannot be used (bucket count wrong or new asmap), // try to give them a reference based on their primary source address. - LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n"); bucket = info.GetNewBucket(nKey, m_asmap); - nUBucketPos = info.GetBucketPosition(nKey, true, bucket); - if (vvNew[bucket][nUBucketPos] == -1) { - vvNew[bucket][nUBucketPos] = n; - info.nRefCount++; + bucket_position = info.GetBucketPosition(nKey, true, bucket); + if (vvNew[bucket][bucket_position] == -1) { + vvNew[bucket][bucket_position] = entry_index; + ++info.nRefCount; } } } diff --git a/src/base58.cpp b/src/base58.cpp index 65e373283c..fb04673c5c 100644 --- a/src/base58.cpp +++ b/src/base58.cpp @@ -52,7 +52,7 @@ static const int8_t mapBase58[256] = { int size = strlen(psz) * 733 /1000 + 1; // log(58) / log(256), rounded up. std::vector<unsigned char> b256(size); // Process the characters. - static_assert(sizeof(mapBase58)/sizeof(mapBase58[0]) == 256, "mapBase58.size() should be 256"); // guarantee not out of range + static_assert(std::size(mapBase58) == 256, "mapBase58.size() should be 256"); // guarantee not out of range while (*psz && !IsSpace(*psz)) { // Decode base58 character int carry = mapBase58[(uint8_t)*psz]; diff --git a/src/bench/bench.h b/src/bench/bench.h index bafc7f8716..22f06d8cb8 100644 --- a/src/bench/bench.h +++ b/src/bench/bench.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_BENCH_BENCH_H #define BITCOIN_BENCH_BENCH_H +#include <util/macros.h> + #include <chrono> #include <functional> #include <map> @@ -12,8 +14,6 @@ #include <vector> #include <bench/nanobench.h> -#include <boost/preprocessor/cat.hpp> -#include <boost/preprocessor/stringize.hpp> /* * Usage: @@ -56,8 +56,8 @@ public: static void RunAll(const Args& args); }; } -// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo"); +// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo", foo); #define BENCHMARK(n) \ - benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n); + benchmark::BenchRunner PASTE2(bench_, PASTE2(__LINE__, n))(STRINGIZE(n), n); #endif // BITCOIN_BENCH_BENCH_H diff --git a/src/bench/block_assemble.cpp b/src/bench/block_assemble.cpp index af5a82f69f..8f656c44d9 100644 --- a/src/bench/block_assemble.cpp +++ b/src/bench/block_assemble.cpp @@ -48,9 +48,8 @@ static void AssembleBlock(benchmark::Bench& bench) LOCK(::cs_main); // Required for ::AcceptToMemoryPool. for (const auto& txr : txs) { - TxValidationState state; - bool ret{::AcceptToMemoryPool(*test_setup.m_node.mempool, state, txr, nullptr /* plTxnReplaced */, false /* bypass_limits */)}; - assert(ret); + const MempoolAcceptResult res = ::AcceptToMemoryPool(::ChainstateActive(), *test_setup.m_node.mempool, txr, false /* bypass_limits */); + assert(res.m_result_type == MempoolAcceptResult::ResultType::VALID); } } diff --git a/src/bench/coin_selection.cpp b/src/bench/coin_selection.cpp index e50c4476bb..3abfbfd784 100644 --- a/src/bench/coin_selection.cpp +++ b/src/bench/coin_selection.cpp @@ -42,20 +42,19 @@ static void CoinSelection(benchmark::Bench& bench) } addCoin(3 * COIN, wallet, wtxs); - // Create groups - std::vector<OutputGroup> groups; + // Create coins + std::vector<COutput> coins; for (const auto& wtx : wtxs) { - COutput output(wtx.get(), 0 /* iIn */, 6 * 24 /* nDepthIn */, true /* spendable */, true /* solvable */, true /* safe */); - groups.emplace_back(output.GetInputCoin(), 6, false, 0, 0); + coins.emplace_back(wtx.get(), 0 /* iIn */, 6 * 24 /* nDepthIn */, true /* spendable */, true /* solvable */, true /* safe */); } const CoinEligibilityFilter filter_standard(1, 6, 0); - const CoinSelectionParams coin_selection_params(true, 34, 148, CFeeRate(0), 0); + const CoinSelectionParams coin_selection_params(true, 34, 148, CFeeRate(0), 0, false); bench.run([&] { std::set<CInputCoin> setCoinsRet; CAmount nValueRet; bool bnb_used; - bool success = wallet.SelectCoinsMinConf(1003 * COIN, filter_standard, groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used); + bool success = wallet.SelectCoinsMinConf(1003 * COIN, filter_standard, coins, setCoinsRet, nValueRet, coin_selection_params, bnb_used); assert(success); assert(nValueRet == 1003 * COIN); assert(setCoinsRet.size() == 2); @@ -75,7 +74,8 @@ static void add_coin(const CAmount& nValue, int nInput, std::vector<OutputGroup> tx.vout.resize(nInput + 1); tx.vout[nInput].nValue = nValue; std::unique_ptr<CWalletTx> wtx = MakeUnique<CWalletTx>(&testWallet, MakeTransactionRef(std::move(tx))); - set.emplace_back(COutput(wtx.get(), nInput, 0, true, true, true).GetInputCoin(), 0, true, 0, 0); + set.emplace_back(); + set.back().Insert(COutput(wtx.get(), nInput, 0, true, true, true).GetInputCoin(), 0, true, 0, 0, false); wtxn.emplace_back(std::move(wtx)); } // Copied from src/wallet/test/coinselector_tests.cpp diff --git a/src/bench/data.cpp b/src/bench/data.cpp index 0ae4c7cad4..481e372105 100644 --- a/src/bench/data.cpp +++ b/src/bench/data.cpp @@ -8,7 +8,7 @@ namespace benchmark { namespace data { #include <bench/data/block413567.raw.h> -const std::vector<uint8_t> block413567{block413567_raw, block413567_raw + sizeof(block413567_raw) / sizeof(block413567_raw[0])}; +const std::vector<uint8_t> block413567{std::begin(block413567_raw), std::end(block413567_raw)}; } // namespace data } // namespace benchmark diff --git a/src/bench/prevector.cpp b/src/bench/prevector.cpp index dcd0e10285..f4fabedab6 100644 --- a/src/bench/prevector.cpp +++ b/src/bench/prevector.cpp @@ -76,7 +76,7 @@ static void PrevectorDeserialize(benchmark::Bench& bench) for (auto x = 0; x < 1000; ++x) { s0 >> t1; } - s0.Init(SER_NETWORK, 0); + s0.Rewind(); }); } diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index 6a0d44a183..0830cb54cb 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -21,6 +21,7 @@ #include <util/url.h> #include <algorithm> +#include <cmath> #include <functional> #include <memory> #include <stdio.h> @@ -299,9 +300,13 @@ class NetinfoRequestHandler : public BaseRequestHandler { private: static constexpr int8_t UNKNOWN_NETWORK{-1}; - static constexpr uint8_t m_networks_size{3}; - const std::array<std::string, m_networks_size> m_networks{{"ipv4", "ipv6", "onion"}}; - std::array<std::array<uint16_t, m_networks_size + 2>, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total/block-relay) + static constexpr int8_t NET_I2P{3}; // pos of "i2p" in m_networks + static constexpr uint8_t m_networks_size{4}; + static constexpr uint8_t MAX_DETAIL_LEVEL{4}; + const std::array<std::string, m_networks_size> m_networks{{"ipv4", "ipv6", "onion", "i2p"}}; + std::array<std::array<uint16_t, m_networks_size + 1>, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total) + uint8_t m_block_relay_peers_count{0}; + uint8_t m_manual_peers_count{0}; int8_t NetworkStringToId(const std::string& str) const { for (uint8_t i = 0; i < m_networks_size; ++i) { @@ -315,12 +320,14 @@ private: bool IsAddressSelected() const { return m_details_level == 2 || m_details_level == 4; } bool IsVersionSelected() const { return m_details_level == 3 || m_details_level == 4; } bool m_is_asmap_on{false}; + bool m_is_i2p_on{false}; size_t m_max_addr_length{0}; - size_t m_max_age_length{4}; + size_t m_max_age_length{3}; size_t m_max_id_length{2}; struct Peer { std::string addr; std::string sub_version; + std::string conn_type; std::string network; std::string age; double min_ping; @@ -332,6 +339,8 @@ private: int id; int mapped_as; int version; + bool is_bip152_hb_from; + bool is_bip152_hb_to; bool is_block_relay; bool is_outbound; bool operator<(const Peer& rhs) const { return std::tie(is_outbound, min_ping) < std::tie(rhs.is_outbound, rhs.min_ping); } @@ -350,6 +359,14 @@ private: const double milliseconds{round(1000 * seconds)}; return milliseconds > 999999 ? "-" : ToString(milliseconds); } + std::string ConnectionTypeForNetinfo(const std::string& conn_type) const + { + if (conn_type == "outbound-full-relay") return "full"; + if (conn_type == "block-relay-only") return "block"; + if (conn_type == "manual" || conn_type == "feeler") return conn_type; + if (conn_type == "addr-fetch") return "addr"; + return ""; + } const UniValue NetinfoHelp() { return std::string{ @@ -378,6 +395,9 @@ private: " type Type of peer connection\n" " \"full\" - full relay, the default\n" " \"block\" - block relay; like full relay but does not relay transactions or addresses\n" + " \"manual\" - peer we manually added using RPC addnode or the -addnode/-connect config options\n" + " \"feeler\" - short-lived connection for testing addresses\n" + " \"addr\" - address fetch; short-lived connection for requesting addresses\n" " net Network the peer connected through (\"ipv4\", \"ipv6\", \"onion\", \"i2p\", or \"cjdns\")\n" " mping Minimum observed ping time, in milliseconds (ms)\n" " ping Last observed ping time, in milliseconds (ms)\n" @@ -385,6 +405,9 @@ private: " recv Time since last message received from the peer, in seconds\n" " txn Time since last novel transaction received from the peer and accepted into our mempool, in minutes\n" " blk Time since last novel block passing initial validity checks received from the peer, in minutes\n" + " hb High-bandwidth BIP152 compact block relay\n" + " \".\" (to) - we selected the peer as a high-bandwidth peer\n" + " \"*\" (from) - the peer selected us as a high-bandwidth peer\n" " age Duration of connection to the peer, in minutes\n" " asmap Mapped AS (Autonomous System) number in the BGP route to the peer, used for diversifying\n" " peer selection (only displayed if the -asmap config option is set)\n" @@ -392,7 +415,7 @@ private: " address IP address and port of the peer\n" " version Peer version and subversion concatenated, e.g. \"70016/Satoshi:21.0.0/\"\n\n" "* The connection counts table displays the number of peers by direction, network, and the totals\n" - " for each, as well as a column for block relay peers.\n\n" + " for each, as well as two special outbound columns for block relay peers and manual peers.\n\n" "* The local addresses table lists each local address broadcast by the node, the port, and the score.\n\n" "Examples:\n\n" "Connection counts and local addresses only\n" @@ -417,7 +440,7 @@ public: if (!args.empty()) { uint8_t n{0}; if (ParseUInt8(args.at(0), &n)) { - m_details_level = n; + m_details_level = std::min(n, MAX_DETAIL_LEVEL); } else if (args.at(0) == "help") { m_is_help_requested = true; } else { @@ -449,16 +472,16 @@ public: const std::string network{peer["network"].get_str()}; const int8_t network_id{NetworkStringToId(network)}; if (network_id == UNKNOWN_NETWORK) continue; + m_is_i2p_on |= (network_id == NET_I2P); const bool is_outbound{!peer["inbound"].get_bool()}; const bool is_block_relay{!peer["relaytxes"].get_bool()}; + const std::string conn_type{peer["connection_type"].get_str()}; ++m_counts.at(is_outbound).at(network_id); // in/out by network ++m_counts.at(is_outbound).at(m_networks_size); // in/out overall ++m_counts.at(2).at(network_id); // total by network ++m_counts.at(2).at(m_networks_size); // total overall - if (is_block_relay) { - ++m_counts.at(is_outbound).at(m_networks_size + 1); // in/out block-relay - ++m_counts.at(2).at(m_networks_size + 1); // total block-relay - } + if (conn_type == "block-relay-only") ++m_block_relay_peers_count; + if (conn_type == "manual") ++m_manual_peers_count; if (DetailsRequested()) { // Push data for this peer to the peers vector. const int peer_id{peer["id"].get_int()}; @@ -474,7 +497,9 @@ public: const std::string addr{peer["addr"].get_str()}; const std::string age{conn_time == 0 ? "" : ToString((m_time_now - conn_time) / 60)}; const std::string sub_version{peer["subver"].get_str()}; - m_peers.push_back({addr, sub_version, network, age, min_ping, ping, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_block_relay, is_outbound}); + const bool is_bip152_hb_from{peer["bip152_hb_from"].get_bool()}; + const bool is_bip152_hb_to{peer["bip152_hb_to"].get_bool()}; + m_peers.push_back({addr, sub_version, conn_type, network, age, min_ping, ping, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_bip152_hb_from, is_bip152_hb_to, is_block_relay, is_outbound}); m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length); m_max_age_length = std::max(age.length(), m_max_age_length); m_max_id_length = std::max(ToString(peer_id).length(), m_max_id_length); @@ -488,15 +513,15 @@ public: // Report detailed peer connections list sorted by direction and minimum ping time. if (DetailsRequested() && !m_peers.empty()) { std::sort(m_peers.begin(), m_peers.end()); - result += strprintf("<-> relay net mping ping send recv txn blk %*s ", m_max_age_length, "age"); + result += strprintf("<-> type net mping ping send recv txn blk hb %*s ", m_max_age_length, "age"); if (m_is_asmap_on) result += " asmap "; result += strprintf("%*s %-*s%s\n", m_max_id_length, "id", IsAddressSelected() ? m_max_addr_length : 0, IsAddressSelected() ? "address" : "", IsVersionSelected() ? "version" : ""); for (const Peer& peer : m_peers) { std::string version{ToString(peer.version) + peer.sub_version}; result += strprintf( - "%3s %5s %5s%7s%7s%5s%5s%5s%5s %*s%*i %*s %-*s%s\n", + "%3s %6s %5s%7s%7s%5s%5s%5s%5s %2s %*s%*i %*s %-*s%s\n", peer.is_outbound ? "out" : "in", - peer.is_block_relay ? "block" : "full", + ConnectionTypeForNetinfo(peer.conn_type), peer.network, PingTimeToString(peer.min_ping), PingTimeToString(peer.ping), @@ -504,6 +529,7 @@ public: peer.last_recv == 0 ? "" : ToString(m_time_now - peer.last_recv), peer.last_trxn == 0 ? "" : ToString((m_time_now - peer.last_trxn) / 60), peer.last_blck == 0 ? "" : ToString((m_time_now - peer.last_blck) / 60), + strprintf("%s%s", peer.is_bip152_hb_to ? "." : " ", peer.is_bip152_hb_from ? "*" : " "), m_max_age_length, // variable spacing peer.age, m_is_asmap_on ? 7 : 0, // variable spacing @@ -514,18 +540,27 @@ public: IsAddressSelected() ? peer.addr : "", IsVersionSelected() && version != "0" ? version : ""); } - result += strprintf(" ms ms sec sec min min %*s\n\n", m_max_age_length, "min"); + result += strprintf(" ms ms sec sec min min %*s\n\n", m_max_age_length, "min"); } // Report peer connection totals by type. - result += " ipv4 ipv6 onion total block-relay\n"; + result += " ipv4 ipv6 onion"; + if (m_is_i2p_on) result += " i2p"; + result += " total block"; + if (m_manual_peers_count) result += " manual"; const std::array<std::string, 3> rows{{"in", "out", "total"}}; - for (uint8_t i = 0; i < m_networks_size; ++i) { - result += strprintf("%-5s %5i %5i %5i %5i %5i\n", rows.at(i), m_counts.at(i).at(0), m_counts.at(i).at(1), m_counts.at(i).at(2), m_counts.at(i).at(m_networks_size), m_counts.at(i).at(m_networks_size + 1)); + for (uint8_t i = 0; i < 3; ++i) { + result += strprintf("\n%-5s %5i %5i %5i", rows.at(i), m_counts.at(i).at(0), m_counts.at(i).at(1), m_counts.at(i).at(2)); // ipv4/ipv6/onion peers counts + if (m_is_i2p_on) result += strprintf(" %5i", m_counts.at(i).at(3)); // i2p peers count + result += strprintf(" %5i", m_counts.at(i).at(m_networks_size)); // total peers count + if (i == 1) { // the outbound row has two extra columns for block relay and manual peer counts + result += strprintf(" %5i", m_block_relay_peers_count); + if (m_manual_peers_count) result += strprintf(" %5i", m_manual_peers_count); + } } // Report local addresses, ports, and scores. - result += "\nLocal addresses"; + result += "\n\nLocal addresses"; const std::vector<UniValue>& local_addrs{networkinfo["localaddresses"].getValues()}; if (local_addrs.empty()) { result += ": n/a\n"; diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp index 3e8e5fc7bc..b84d909b07 100644 --- a/src/bitcoin-wallet.cpp +++ b/src/bitcoin-wallet.cpp @@ -33,51 +33,52 @@ static void SetupWalletToolArgs(ArgsManager& argsman) argsman.AddArg("-format=<format>", "The format of the wallet file to create. Either \"bdb\" or \"sqlite\". Only used with 'createfromdump'", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("info", "Get wallet info", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - argsman.AddArg("create", "Create new wallet file", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - argsman.AddArg("salvage", "Attempt to recover private keys from a corrupt wallet. Warning: 'salvage' is experimental.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - argsman.AddArg("dump", "Print out all of the wallet key-value records", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - argsman.AddArg("createfromdump", "Create new wallet file from dumped records", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddCommand("info", "Get wallet info", OptionsCategory::COMMANDS); + argsman.AddCommand("create", "Create new wallet file", OptionsCategory::COMMANDS); + argsman.AddCommand("salvage", "Attempt to recover private keys from a corrupt wallet. Warning: 'salvage' is experimental.", OptionsCategory::COMMANDS); + argsman.AddCommand("dump", "Print out all of the wallet key-value records", OptionsCategory::COMMANDS); + argsman.AddCommand("createfromdump", "Create new wallet file from dumped records", OptionsCategory::COMMANDS); } -static bool WalletAppInit(int argc, char* argv[]) +static bool WalletAppInit(ArgsManager& args, int argc, char* argv[]) { - SetupWalletToolArgs(gArgs); + SetupWalletToolArgs(args); std::string error_message; - if (!gArgs.ParseParameters(argc, argv, error_message)) { + if (!args.ParseParameters(argc, argv, error_message)) { tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error_message); return false; } - if (argc < 2 || HelpRequested(gArgs) || gArgs.IsArgSet("-version")) { + if (argc < 2 || HelpRequested(args) || args.IsArgSet("-version")) { std::string strUsage = strprintf("%s bitcoin-wallet version", PACKAGE_NAME) + " " + FormatFullVersion() + "\n"; - if (!gArgs.IsArgSet("-version")) { - strUsage += "\n" - "bitcoin-wallet is an offline tool for creating and interacting with " PACKAGE_NAME " wallet files.\n" - "By default bitcoin-wallet will act on wallets in the default mainnet wallet directory in the datadir.\n" - "To change the target wallet, use the -datadir, -wallet and -testnet/-regtest arguments.\n\n" - "Usage:\n" - " bitcoin-wallet [options] <command>\n"; - strUsage += "\n" + gArgs.GetHelpMessage(); - } + if (!args.IsArgSet("-version")) { + strUsage += "\n" + "bitcoin-wallet is an offline tool for creating and interacting with " PACKAGE_NAME " wallet files.\n" + "By default bitcoin-wallet will act on wallets in the default mainnet wallet directory in the datadir.\n" + "To change the target wallet, use the -datadir, -wallet and -testnet/-regtest arguments.\n\n" + "Usage:\n" + " bitcoin-wallet [options] <command>\n"; + strUsage += "\n" + args.GetHelpMessage(); + } tfm::format(std::cout, "%s", strUsage); return false; } // check for printtoconsole, allow -debug - LogInstance().m_print_to_console = gArgs.GetBoolArg("-printtoconsole", gArgs.GetBoolArg("-debug", false)); + LogInstance().m_print_to_console = args.GetBoolArg("-printtoconsole", args.GetBoolArg("-debug", false)); if (!CheckDataDirOption()) { - tfm::format(std::cerr, "Error: Specified data directory \"%s\" does not exist.\n", gArgs.GetArg("-datadir", "")); + tfm::format(std::cerr, "Error: Specified data directory \"%s\" does not exist.\n", args.GetArg("-datadir", "")); return false; } // Check for chain settings (Params() calls are only valid after this clause) - SelectParams(gArgs.GetChainName()); + SelectParams(args.GetChainName()); return true; } int main(int argc, char* argv[]) { + ArgsManager& args = gArgs; #ifdef WIN32 util::WinCmdLineArgs winArgs; std::tie(argc, argv) = winArgs.get(); @@ -85,7 +86,7 @@ int main(int argc, char* argv[]) SetupEnvironment(); RandomInit(); try { - if (!WalletAppInit(argc, argv)) return EXIT_FAILURE; + if (!WalletAppInit(args, argc, argv)) return EXIT_FAILURE; } catch (const std::exception& e) { PrintExceptionContinue(&e, "WalletAppInit()"); return EXIT_FAILURE; @@ -94,33 +95,19 @@ int main(int argc, char* argv[]) return EXIT_FAILURE; } - std::string method {}; - for(int i = 1; i < argc; ++i) { - if (!IsSwitchChar(argv[i][0])) { - if (!method.empty()) { - tfm::format(std::cerr, "Error: two methods provided (%s and %s). Only one method should be provided.\n", method, argv[i]); - return EXIT_FAILURE; - } - method = argv[i]; - } - } - - if (method.empty()) { + const auto command = args.GetCommand(); + if (!command) { tfm::format(std::cerr, "No method provided. Run `bitcoin-wallet -help` for valid methods.\n"); return EXIT_FAILURE; } - - // A name must be provided when creating a file - if (method == "create" && !gArgs.IsArgSet("-wallet")) { - tfm::format(std::cerr, "Wallet name must be provided when creating a new wallet.\n"); + if (command->args.size() != 0) { + tfm::format(std::cerr, "Error: Additional arguments provided (%s). Methods do not take arguments. Please refer to `-help`.\n", Join(command->args, ", ")); return EXIT_FAILURE; } - std::string name = gArgs.GetArg("-wallet", ""); - ECCVerifyHandle globalVerifyHandle; ECC_Start(); - if (!WalletTool::ExecuteWalletToolFunc(gArgs, method, name)) { + if (!WalletTool::ExecuteWalletToolFunc(args, command->command)) { return EXIT_FAILURE; } ECC_Stop(); diff --git a/src/chain.h b/src/chain.h index 43e8a39f36..04a5db5a17 100644 --- a/src/chain.h +++ b/src/chain.h @@ -163,14 +163,27 @@ public: //! Number of transactions in this block. //! Note: in a potential headers-first mode, this number cannot be relied upon + //! Note: this value is faked during UTXO snapshot load to ensure that + //! LoadBlockIndex() will load index entries for blocks that we lack data for. + //! @sa ActivateSnapshot unsigned int nTx{0}; //! (memory only) Number of transactions in the chain up to and including this block. //! This value will be non-zero only if and only if transactions for this block and all its parents are available. //! Change to 64-bit type when necessary; won't happen before 2030 + //! + //! Note: this value is faked during use of a UTXO snapshot because we don't + //! have the underlying block data available during snapshot load. + //! @sa AssumeutxoData + //! @sa ActivateSnapshot unsigned int nChainTx{0}; //! Verification status of this block. See enum BlockStatus + //! + //! Note: this value is modified to show BLOCK_OPT_WITNESS during UTXO snapshot + //! load to avoid the block index being spuriously rewound. + //! @sa RewindBlockIndex + //! @sa ActivateSnapshot uint32_t nStatus{0}; //! block header diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 88cf5ef0a8..16efffa6f0 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -8,9 +8,7 @@ #include <chainparamsseeds.h> #include <consensus/merkle.h> #include <hash.h> // for signet block challenge hash -#include <tinyformat.h> #include <util/system.h> -#include <util/strencodings.h> #include <versionbitsinfo.h> #include <assert.h> @@ -136,7 +134,7 @@ public: bech32_hrp = "bc"; - vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main)); + vFixedSeeds = std::vector<SeedSpec6>(std::begin(pnSeed6_main), std::end(pnSeed6_main)); fDefaultConsistencyChecks = false; fRequireStandard = true; @@ -161,6 +159,10 @@ public: } }; + m_assumeutxo_data = MapAssumeutxo{ + // TODO to be specified in a future patch. + }; + chainTxData = ChainTxData{ // Data from RPC: getchaintxstats 4096 0000000000000000000b9d2ec5a352ecba0592946514a92f14319dc2b367fc72 /* nTime */ 1603995752, @@ -237,7 +239,7 @@ public: bech32_hrp = "tb"; - vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_test, pnSeed6_test + ARRAYLEN(pnSeed6_test)); + vFixedSeeds = std::vector<SeedSpec6>(std::begin(pnSeed6_test), std::end(pnSeed6_test)); fDefaultConsistencyChecks = false; fRequireStandard = false; @@ -250,6 +252,10 @@ public: } }; + m_assumeutxo_data = MapAssumeutxo{ + // TODO to be specified in a future patch. + }; + chainTxData = ChainTxData{ // Data from RPC: getchaintxstats 4096 000000000000006433d1efec504c53ca332b64963c425395515b01977bd7b3b0 /* nTime */ 1603359686, @@ -406,7 +412,7 @@ public: pchMessageStart[2] = 0xb5; pchMessageStart[3] = 0xda; nDefaultPort = 18444; - nPruneAfterHeight = 1000; + nPruneAfterHeight = gArgs.GetBoolArg("-fastprune", false) ? 100 : 1000; m_assumed_blockchain_size = 0; m_assumed_chain_state_size = 0; @@ -431,6 +437,17 @@ public: } }; + m_assumeutxo_data = MapAssumeutxo{ + { + 110, + {uint256S("0x76fd7334ac7c1baf57ddc0c626f073a655a35d98a4258cd1382c8cc2b8392e10"), 110}, + }, + { + 210, + {uint256S("0x9c5ed99ef98544b34f8920b6d1802f72ac28ae6e2bd2bd4c316ff10c230df3f2"), 210}, + }, + }; + chainTxData = ChainTxData{ 0, 0, @@ -526,3 +543,9 @@ void SelectParams(const std::string& network) SelectBaseParams(network); globalChainParams = CreateChainParams(gArgs, network); } + +std::ostream& operator<<(std::ostream& o, const AssumeutxoData& aud) +{ + o << strprintf("AssumeutxoData(%s, %s)", aud.hash_serialized.ToString(), aud.nChainTx); + return o; +} diff --git a/src/chainparams.h b/src/chainparams.h index d8b25c7220..4d24dcdb7c 100644 --- a/src/chainparams.h +++ b/src/chainparams.h @@ -31,6 +31,26 @@ struct CCheckpointData { }; /** + * Holds configuration for use during UTXO snapshot load and validation. The contents + * here are security critical, since they dictate which UTXO snapshots are recognized + * as valid. + */ +struct AssumeutxoData { + //! The expected hash of the deserialized UTXO set. + const uint256 hash_serialized; + + //! Used to populate the nChainTx value, which is used during BlockManager::LoadBlockIndex(). + //! + //! We need to hardcode the value here because this is computed cumulatively using block data, + //! which we do not necessarily have at the time of snapshot load. + const unsigned int nChainTx; +}; + +std::ostream& operator<<(std::ostream& o, const AssumeutxoData& aud); + +using MapAssumeutxo = std::map<int, const AssumeutxoData>; + +/** * Holds various statistics on transactions within a chain. Used to estimate * verification progress during chain sync. * @@ -90,6 +110,11 @@ public: const std::string& Bech32HRP() const { return bech32_hrp; } const std::vector<SeedSpec6>& FixedSeeds() const { return vFixedSeeds; } const CCheckpointData& Checkpoints() const { return checkpointData; } + + //! Get allowed assumeutxo configuration. + //! @see ChainstateManager + const MapAssumeutxo& Assumeutxo() const { return m_assumeutxo_data; } + const ChainTxData& TxData() const { return chainTxData; } protected: CChainParams() {} @@ -111,6 +136,7 @@ protected: bool m_is_test_chain; bool m_is_mockable_chain; CCheckpointData checkpointData; + MapAssumeutxo m_assumeutxo_data; ChainTxData chainTxData; }; diff --git a/src/clientversion.h b/src/clientversion.h index 2da909f829..0ed3f68094 100644 --- a/src/clientversion.h +++ b/src/clientversion.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_CLIENTVERSION_H #define BITCOIN_CLIENTVERSION_H +#include <util/macros.h> + #if defined(HAVE_CONFIG_H) #include <config/bitcoin-config.h> #endif //HAVE_CONFIG_H @@ -14,13 +16,6 @@ #error Client version information missing: version is not defined by bitcoin-config.h or in any other way #endif -/** - * Converts the parameter X to a string after macro replacement on X has been performed. - * Don't merge these into one macro! - */ -#define STRINGIZE(X) DO_STRINGIZE(X) -#define DO_STRINGIZE(X) #X - //! Copyright string used in Windows .rc files #define COPYRIGHT_STR "2009-" STRINGIZE(COPYRIGHT_YEAR) " " COPYRIGHT_HOLDERS_FINAL diff --git a/src/coins.cpp b/src/coins.cpp index dd84e720e7..d52851cadd 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -97,6 +97,14 @@ void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possi cachedCoinsUsage += it->second.coin.DynamicMemoryUsage(); } +void CCoinsViewCache::EmplaceCoinInternalDANGER(COutPoint&& outpoint, Coin&& coin) { + cachedCoinsUsage += coin.DynamicMemoryUsage(); + cacheCoins.emplace( + std::piecewise_construct, + std::forward_as_tuple(std::move(outpoint)), + std::forward_as_tuple(std::move(coin), CCoinsCacheEntry::DIRTY)); +} + void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, bool check_for_overwrite) { bool fCoinbase = tx.IsCoinBase(); const uint256& txid = tx.GetHash(); diff --git a/src/coins.h b/src/coins.h index d2eb42d8cf..feb441fd6a 100644 --- a/src/coins.h +++ b/src/coins.h @@ -20,6 +20,8 @@ #include <functional> #include <unordered_map> +class ChainstateManager; + /** * A UTXO entry. * @@ -125,6 +127,7 @@ struct CCoinsCacheEntry CCoinsCacheEntry() : flags(0) {} explicit CCoinsCacheEntry(Coin&& coin_) : coin(std::move(coin_)), flags(0) {} + CCoinsCacheEntry(Coin&& coin_, unsigned char flag) : coin(std::move(coin_)), flags(flag) {} }; typedef std::unordered_map<COutPoint, CCoinsCacheEntry, SaltedOutpointHasher> CCoinsMap; @@ -263,6 +266,15 @@ public: void AddCoin(const COutPoint& outpoint, Coin&& coin, bool possible_overwrite); /** + * Emplace a coin into cacheCoins without performing any checks, marking + * the emplaced coin as dirty. + * + * NOT FOR GENERAL USE. Used only when loading coins from a UTXO snapshot. + * @sa ChainstateManager::PopulateAndValidateSnapshot() + */ + void EmplaceCoinInternalDANGER(COutPoint&& outpoint, Coin&& coin); + + /** * Spend a coin. Pass moveto in order to get the deleted data. * If no unspent output exists for the passed outpoint, this call * has no effect. diff --git a/src/crypto/muhash.cpp b/src/crypto/muhash.cpp index fbd14f9325..e5a0d4cb9c 100644 --- a/src/crypto/muhash.cpp +++ b/src/crypto/muhash.cpp @@ -17,7 +17,6 @@ namespace { using limb_t = Num3072::limb_t; using double_limb_t = Num3072::double_limb_t; constexpr int LIMB_SIZE = Num3072::LIMB_SIZE; -constexpr int LIMBS = Num3072::LIMBS; /** 2^3072 - 1103717, the largest 3072-bit safe prime number, is used as the modulus. */ constexpr limb_t MAX_PRIME_DIFF = 1103717; @@ -123,7 +122,7 @@ inline void square_n_mul(Num3072& in_out, const int sq, const Num3072& mul) } // namespace -/** Indicates wether d is larger than the modulus. */ +/** Indicates whether d is larger than the modulus. */ bool Num3072::IsOverflow() const { if (this->limbs[0] <= std::numeric_limits<limb_t>::max() - MAX_PRIME_DIFF) return false; @@ -276,18 +275,33 @@ void Num3072::Divide(const Num3072& a) if (this->IsOverflow()) this->FullReduce(); } -Num3072 MuHash3072::ToNum3072(Span<const unsigned char> in) { - Num3072 out{}; - uint256 hashed_in = (CHashWriter(SER_DISK, 0) << in).GetSHA256(); - unsigned char tmp[BYTE_SIZE]; - ChaCha20(hashed_in.data(), hashed_in.size()).Keystream(tmp, BYTE_SIZE); +Num3072::Num3072(const unsigned char (&data)[BYTE_SIZE]) { + for (int i = 0; i < LIMBS; ++i) { + if (sizeof(limb_t) == 4) { + this->limbs[i] = ReadLE32(data + 4 * i); + } else if (sizeof(limb_t) == 8) { + this->limbs[i] = ReadLE64(data + 8 * i); + } + } +} + +void Num3072::ToBytes(unsigned char (&out)[BYTE_SIZE]) { for (int i = 0; i < LIMBS; ++i) { if (sizeof(limb_t) == 4) { - out.limbs[i] = ReadLE32(tmp + 4 * i); + WriteLE32(out + i * 4, this->limbs[i]); } else if (sizeof(limb_t) == 8) { - out.limbs[i] = ReadLE64(tmp + 8 * i); + WriteLE64(out + i * 8, this->limbs[i]); } } +} + +Num3072 MuHash3072::ToNum3072(Span<const unsigned char> in) { + unsigned char tmp[Num3072::BYTE_SIZE]; + + uint256 hashed_in = (CHashWriter(SER_DISK, 0) << in).GetSHA256(); + ChaCha20(hashed_in.data(), hashed_in.size()).Keystream(tmp, Num3072::BYTE_SIZE); + Num3072 out{tmp}; + return out; } @@ -301,14 +315,8 @@ void MuHash3072::Finalize(uint256& out) noexcept m_numerator.Divide(m_denominator); m_denominator.SetToOne(); // Needed to keep the MuHash object valid - unsigned char data[384]; - for (int i = 0; i < LIMBS; ++i) { - if (sizeof(limb_t) == 4) { - WriteLE32(data + i * 4, m_numerator.limbs[i]); - } else if (sizeof(limb_t) == 8) { - WriteLE64(data + i * 8, m_numerator.limbs[i]); - } - } + unsigned char data[Num3072::BYTE_SIZE]; + m_numerator.ToBytes(data); out = (CHashWriter(SER_DISK, 0) << data).GetSHA256(); } diff --git a/src/crypto/muhash.h b/src/crypto/muhash.h index 0c710007c4..c023a8b9d3 100644 --- a/src/crypto/muhash.h +++ b/src/crypto/muhash.h @@ -22,6 +22,7 @@ private: Num3072 GetInverse() const; public: + static constexpr size_t BYTE_SIZE = 384; #ifdef HAVE___INT128 typedef unsigned __int128 double_limb_t; @@ -48,8 +49,10 @@ public: void Divide(const Num3072& a); void SetToOne(); void Square(); + void ToBytes(unsigned char (&out)[BYTE_SIZE]); Num3072() { this->SetToOne(); }; + Num3072(const unsigned char (&data)[BYTE_SIZE]); SERIALIZE_METHODS(Num3072, obj) { @@ -78,7 +81,7 @@ public: * arbitrary subset of the update operations, allowing them to be * efficiently combined later. * - * Muhash does not support checking if an element is already part of the + * MuHash does not support checking if an element is already part of the * set. That is why this class does not enforce the use of a set as the * data it represents because there is no efficient way to do so. * It is possible to add elements more than once and also to remove @@ -91,8 +94,6 @@ public: class MuHash3072 { private: - static constexpr size_t BYTE_SIZE = 384; - Num3072 m_numerator; Num3072 m_denominator; diff --git a/src/dbwrapper.h b/src/dbwrapper.h index 33e4b366a1..c119036db2 100644 --- a/src/dbwrapper.h +++ b/src/dbwrapper.h @@ -8,9 +8,10 @@ #include <clientversion.h> #include <fs.h> #include <serialize.h> +#include <span.h> #include <streams.h> -#include <util/system.h> #include <util/strencodings.h> +#include <util/system.h> #include <leveldb/db.h> #include <leveldb/write_batch.h> @@ -73,12 +74,12 @@ public: { ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; - leveldb::Slice slKey(ssKey.data(), ssKey.size()); + leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size()); ssValue.reserve(DBWRAPPER_PREALLOC_VALUE_SIZE); ssValue << value; ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); - leveldb::Slice slValue(ssValue.data(), ssValue.size()); + leveldb::Slice slValue((const char*)ssValue.data(), ssValue.size()); batch.Put(slKey, slValue); // LevelDB serializes writes as: @@ -98,7 +99,7 @@ public: { ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; - leveldb::Slice slKey(ssKey.data(), ssKey.size()); + leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size()); batch.Delete(slKey); // LevelDB serializes erases as: @@ -137,7 +138,7 @@ public: CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; - leveldb::Slice slKey(ssKey.data(), ssKey.size()); + leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size()); piter->Seek(slKey); } @@ -146,7 +147,7 @@ public: template<typename K> bool GetKey(K& key) { leveldb::Slice slKey = piter->key(); try { - CDataStream ssKey(slKey.data(), slKey.data() + slKey.size(), SER_DISK, CLIENT_VERSION); + CDataStream ssKey(MakeUCharSpan(slKey), SER_DISK, CLIENT_VERSION); ssKey >> key; } catch (const std::exception&) { return false; @@ -157,7 +158,7 @@ public: template<typename V> bool GetValue(V& value) { leveldb::Slice slValue = piter->value(); try { - CDataStream ssValue(slValue.data(), slValue.data() + slValue.size(), SER_DISK, CLIENT_VERSION); + CDataStream ssValue(MakeUCharSpan(slValue), SER_DISK, CLIENT_VERSION); ssValue.Xor(dbwrapper_private::GetObfuscateKey(parent)); ssValue >> value; } catch (const std::exception&) { @@ -232,7 +233,7 @@ public: CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; - leveldb::Slice slKey(ssKey.data(), ssKey.size()); + leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size()); std::string strValue; leveldb::Status status = pdb->Get(readoptions, slKey, &strValue); @@ -243,7 +244,7 @@ public: dbwrapper_private::HandleError(status); } try { - CDataStream ssValue(strValue.data(), strValue.data() + strValue.size(), SER_DISK, CLIENT_VERSION); + CDataStream ssValue(MakeUCharSpan(strValue), SER_DISK, CLIENT_VERSION); ssValue.Xor(obfuscate_key); ssValue >> value; } catch (const std::exception&) { @@ -266,7 +267,7 @@ public: CDataStream ssKey(SER_DISK, CLIENT_VERSION); ssKey.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey << key; - leveldb::Slice slKey(ssKey.data(), ssKey.size()); + leveldb::Slice slKey((const char*)ssKey.data(), ssKey.size()); std::string strValue; leveldb::Status status = pdb->Get(readoptions, slKey, &strValue); @@ -310,8 +311,8 @@ public: ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey1 << key_begin; ssKey2 << key_end; - leveldb::Slice slKey1(ssKey1.data(), ssKey1.size()); - leveldb::Slice slKey2(ssKey2.data(), ssKey2.size()); + leveldb::Slice slKey1((const char*)ssKey1.data(), ssKey1.size()); + leveldb::Slice slKey2((const char*)ssKey2.data(), ssKey2.size()); uint64_t size = 0; leveldb::Range range(slKey1, slKey2); pdb->GetApproximateSizes(&range, 1, &size); @@ -329,11 +330,10 @@ public: ssKey2.reserve(DBWRAPPER_PREALLOC_KEY_SIZE); ssKey1 << key_begin; ssKey2 << key_end; - leveldb::Slice slKey1(ssKey1.data(), ssKey1.size()); - leveldb::Slice slKey2(ssKey2.data(), ssKey2.size()); + leveldb::Slice slKey1((const char*)ssKey1.data(), ssKey1.size()); + leveldb::Slice slKey2((const char*)ssKey2.data(), ssKey2.size()); pdb->CompactRange(&slKey1, &slKey2); } - }; #endif // BITCOIN_DBWRAPPER_H diff --git a/src/flatfile.cpp b/src/flatfile.cpp index 11cf357f3d..151f1a38f1 100644 --- a/src/flatfile.cpp +++ b/src/flatfile.cpp @@ -66,7 +66,7 @@ size_t FlatFileSeq::Allocate(const FlatFilePos& pos, size_t add_size, bool& out_ if (CheckDiskSpace(m_dir, inc_size)) { FILE *file = Open(pos); if (file) { - LogPrintf("Pre-allocating up to position 0x%x in %s%05u.dat\n", new_size, m_prefix, pos.nFile); + LogPrint(BCLog::VALIDATION, "Pre-allocating up to position 0x%x in %s%05u.dat\n", new_size, m_prefix, pos.nFile); AllocateFileRange(file, pos.nPos, inc_size); fclose(file); return inc_size; diff --git a/src/index/base.cpp b/src/index/base.cpp index e67b813763..25644c3b41 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -62,9 +62,46 @@ bool BaseIndex::Init() if (locator.IsNull()) { m_best_block_index = nullptr; } else { - m_best_block_index = FindForkInGlobalIndex(::ChainActive(), locator); + m_best_block_index = g_chainman.m_blockman.FindForkInGlobalIndex(::ChainActive(), locator); } m_synced = m_best_block_index.load() == ::ChainActive().Tip(); + if (!m_synced) { + bool prune_violation = false; + if (!m_best_block_index) { + // index is not built yet + // make sure we have all block data back to the genesis + const CBlockIndex* block = ::ChainActive().Tip(); + while (block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA)) { + block = block->pprev; + } + prune_violation = block != ::ChainActive().Genesis(); + } + // in case the index has a best block set and is not fully synced + // check if we have the required blocks to continue building the index + else { + const CBlockIndex* block_to_test = m_best_block_index.load(); + if (!ChainActive().Contains(block_to_test)) { + // if the bestblock is not part of the mainchain, find the fork + // and make sure we have all data down to the fork + block_to_test = ::ChainActive().FindFork(block_to_test); + } + const CBlockIndex* block = ::ChainActive().Tip(); + prune_violation = true; + // check backwards from the tip if we have all block data until we reach the indexes bestblock + while (block_to_test && block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA)) { + if (block_to_test == block) { + prune_violation = false; + break; + } + block = block->pprev; + } + } + if (prune_violation) { + // throw error and graceful shutdown if we can't build the index + FatalError("%s: %s best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)", __func__, GetName()); + return false; + } + } return true; } @@ -177,6 +214,10 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); // In the case of a reorg, ensure persisted block locator is not stale. + // Pruning has a minimum of 288 blocks-to-keep and getting the index + // out of sync may be possible but a users fault. + // In case we reorg beyond the pruned depth, ReadBlockFromDisk would + // throw and lead to a graceful shutdown m_best_block_index = new_tip; if (!Commit()) { // If commit fails, revert the best block index to avoid corruption. @@ -239,7 +280,7 @@ void BaseIndex::ChainStateFlushed(const CBlockLocator& locator) const CBlockIndex* locator_tip_index; { LOCK(cs_main); - locator_tip_index = LookupBlockIndex(locator_tip_hash); + locator_tip_index = g_chainman.m_blockman.LookupBlockIndex(locator_tip_hash); } if (!locator_tip_index) { @@ -325,6 +366,6 @@ IndexSummary BaseIndex::GetSummary() const IndexSummary summary{}; summary.name = GetName(); summary.synced = m_synced; - summary.best_block_height = m_best_block_index.load()->nHeight; + summary.best_block_height = m_best_block_index ? m_best_block_index.load()->nHeight : 0; return summary; } diff --git a/src/init.cpp b/src/init.cpp index 64bbf8988d..befba2eb2d 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -68,6 +68,8 @@ #include <set> #include <stdint.h> #include <stdio.h> +#include <thread> +#include <vector> #ifndef WIN32 #include <attributes.h> @@ -78,7 +80,6 @@ #include <boost/algorithm/string/replace.hpp> #include <boost/signals2/signal.hpp> -#include <boost/thread/thread.hpp> #if ENABLE_ZMQ #include <zmq/zmqabstractnotifier.h> @@ -155,8 +156,6 @@ static std::unique_ptr<ECCVerifyHandle> globalVerifyHandle; static std::thread g_load_block; -static boost::thread_group threadGroup; - void Interrupt(NodeContext& node) { InterruptHTTPServer(); @@ -218,11 +217,9 @@ void Shutdown(NodeContext& node) StopTorControl(); // After everything has been shut down, but before things get flushed, stop the - // CScheduler/checkqueue, threadGroup and load block thread. + // CScheduler/checkqueue, scheduler and load block thread. if (node.scheduler) node.scheduler->stop(); if (g_load_block.joinable()) g_load_block.join(); - threadGroup.interrupt_all(); - threadGroup.join_all(); StopScriptCheckWorkerThreads(); // After the threads that potentially access these pointers have been stopped, @@ -389,6 +386,7 @@ void SetupServerArgs(NodeContext& node) #endif argsman.AddArg("-assumevalid=<hex>", strprintf("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s, signet: %s)", defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex(), signetChainParams->GetConsensus().defaultAssumeValid.GetHex()), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-blocksdir=<dir>", "Specify directory to hold blocks subdirectory for *.dat files (default: <datadir>)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-fastprune", "Use smaller block files and lower minimum prune height for testing purposes", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); #if HAVE_SYSTEM argsman.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #endif @@ -437,8 +435,9 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-connect=<ip>", "Connect only to the specified node; -noconnect disables automatic connections (the rules for this peer are the same as for -addnode). This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); argsman.AddArg("-discover", "Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-dns", strprintf("Allow DNS lookups for -addnode, -seednode and -connect (default: %u)", DEFAULT_NAME_LOOKUP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - argsman.AddArg("-dnsseed", "Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-dnsseed", strprintf("Query for peer addresses via DNS lookup, if low on addresses (default: %u unless -connect used)", DEFAULT_DNSSEED), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-fixedseeds", strprintf("Allow fixed seeds if DNS seeds don't provide peers (default: %u)", DEFAULT_FIXEDSEEDS), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); @@ -448,7 +447,7 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks. Warning: if it is used with ipv4 or ipv6 but not onion and the -onion or -proxy option is set, then outbound onion connections will still be made; use -noonion or -onion=0 to disable outbound onion connections in this case.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (" + Join(GetNetworkNames(), ", ") + "). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks. Warning: if it is used with ipv4 or ipv6 but not onion and the -onion or -proxy option is set, then outbound onion connections will still be made; use -noonion or -onion=0 to disable outbound onion connections in this case.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); @@ -457,8 +456,8 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); - argsman.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - argsman.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-timeout=<n>", strprintf("Specify socket connection timeout in milliseconds. If an initial attempt to connect is unsuccessful after this amount of time, drop it (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-peertimeout=<n>", strprintf("Specify a p2p connection timeout delay in seconds. After connecting to a peer, wait this amount of time before considering disconnection based on inactivity (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); argsman.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION); #ifdef USE_UPNP @@ -522,10 +521,11 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-capturemessages", "Capture all P2P messages to disk", ArgsManager::ALLOW_BOOL | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-debug=<category>", "Output debugging information (default: -nodebug, supplying <category> is optional). " - "If <category> is not supplied or if <category> = 1, output all debugging information. <category> can be: " + LogInstance().LogCategoriesString() + ".", + "If <category> is not supplied or if <category> = 1, output all debugging information. <category> can be: " + LogInstance().LogCategoriesString() + ". This option can be specified multiple times to output multiple categories.", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except the specified category. This option can be specified multiple times to exclude multiple categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); #ifdef HAVE_THREAD_LOCAL @@ -533,6 +533,7 @@ void SetupServerArgs(NodeContext& node) #else hidden_args.emplace_back("-logthreadnames"); #endif + argsman.AddArg("-logsourcelocations", strprintf("Prepend debug output with name of the originating source location (source file, line number and function name) (default: %u)", DEFAULT_LOGSOURCELOCATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); @@ -705,7 +706,7 @@ static void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImp if (!file) break; // This error is logged in OpenBlockFile LogPrintf("Reindexing block file blk%05u.dat...\n", (unsigned int)nFile); - LoadExternalBlockFile(chainparams, file, &pos); + ::ChainstateActive().LoadExternalBlockFile(chainparams, file, &pos); if (ShutdownRequested()) { LogPrintf("Shutdown requested. Exit %s\n", __func__); return; @@ -724,7 +725,7 @@ static void ThreadImport(ChainstateManager& chainman, std::vector<fs::path> vImp FILE *file = fsbridge::fopen(path, "rb"); if (file) { LogPrintf("Importing blocks file %s...\n", path.string()); - LoadExternalBlockFile(chainparams, file); + ::ChainstateActive().LoadExternalBlockFile(chainparams, file); if (ShutdownRequested()) { LogPrintf("Shutdown requested. Exit %s\n", __func__); return; @@ -774,6 +775,10 @@ static bool InitSanityCheck() return InitError(Untranslated("OS cryptographic RNG sanity check failure. Aborting.")); } + if (!ChronoSanityCheck()) { + return InitError(Untranslated("Clock epoch mismatch. Aborting.")); + } + return true; } @@ -879,6 +884,7 @@ void InitLogging(const ArgsManager& args) #ifdef HAVE_THREAD_LOCAL LogInstance().m_log_threadnames = args.GetBoolArg("-logthreadnames", DEFAULT_LOGTHREADNAMES); #endif + LogInstance().m_log_sourcelocations = args.GetBoolArg("-logsourcelocations", DEFAULT_LOGSOURCELOCATIONS); fLogIPs = args.GetBoolArg("-logips", DEFAULT_LOGIPS); @@ -1025,9 +1031,6 @@ bool AppInitParameterInteraction(const ArgsManager& args) if (args.GetArg("-prune", 0)) { if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) return InitError(_("Prune mode is incompatible with -txindex.")); - if (!g_enabled_filter_types.empty()) { - return InitError(_("Prune mode is incompatible with -blockfilterindex.")); - } } // -bind and -whitebind can't be set when not listening @@ -1043,16 +1046,17 @@ bool AppInitParameterInteraction(const ArgsManager& args) // Trim requested connection counts, to fit into system limitations // <int> in std::min<int>(...) to work around FreeBSD compilation issue described in #2695 - nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS + nBind); + nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS + nBind + NUM_FDS_MESSAGE_CAPTURE); + #ifdef USE_POLL int fd_max = nFD; #else int fd_max = FD_SETSIZE; #endif - nMaxConnections = std::max(std::min<int>(nMaxConnections, fd_max - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS), 0); + nMaxConnections = std::max(std::min<int>(nMaxConnections, fd_max - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS - NUM_FDS_MESSAGE_CAPTURE), 0); if (nFD < MIN_CORE_FILEDESCRIPTORS) return InitError(_("Not enough file descriptors available.")); - nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS, nMaxConnections); + nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS - NUM_FDS_MESSAGE_CAPTURE, nMaxConnections); if (nMaxConnections < nUserMaxConnections) InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, because of system limitations."), nUserMaxConnections, nMaxConnections)); @@ -1342,7 +1346,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockA node.scheduler = MakeUnique<CScheduler>(); // Start the lightweight task scheduler thread - threadGroup.create_thread([&] { TraceThread("scheduler", [&] { node.scheduler->serviceQueue(); }); }); + node.scheduler->m_service_thread = std::thread([&] { TraceThread("scheduler", [&] { node.scheduler->serviceQueue(); }); }); // Gather some entropy once per minute. node.scheduler->scheduleEvery([]{ @@ -1611,7 +1615,7 @@ bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockA // If the loaded chain has a wrong genesis, bail out immediately // (we're likely using a testnet datadir, or the other way around). if (!chainman.BlockIndex().empty() && - !LookupBlockIndex(chainparams.GetConsensus().hashGenesisBlock)) { + !g_chainman.m_blockman.LookupBlockIndex(chainparams.GetConsensus().hashGenesisBlock)) { return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?")); } diff --git a/src/logging.cpp b/src/logging.cpp index 4ddcf1d930..e82f2c2810 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -5,6 +5,7 @@ #include <logging.h> #include <util/threadnames.h> +#include <util/string.h> #include <util/time.h> #include <mutex> @@ -203,9 +204,9 @@ std::string BCLog::Logger::LogTimestampStr(const std::string& str) strStamped.pop_back(); strStamped += strprintf(".%06dZ", nTimeMicros%1000000); } - int64_t mocktime = GetMockTime(); - if (mocktime) { - strStamped += " (mocktime: " + FormatISO8601DateTime(mocktime) + ")"; + std::chrono::seconds mocktime = GetMockTime(); + if (mocktime > 0s) { + strStamped += " (mocktime: " + FormatISO8601DateTime(count_seconds(mocktime)) + ")"; } strStamped += ' ' + str; } else @@ -236,11 +237,15 @@ namespace BCLog { } } -void BCLog::Logger::LogPrintStr(const std::string& str) +void BCLog::Logger::LogPrintStr(const std::string& str, const std::string& logging_function, const std::string& source_file, const int source_line) { StdLockGuard scoped_lock(m_cs); std::string str_prefixed = LogEscapeMessage(str); + if (m_log_sourcelocations && m_started_new_line) { + str_prefixed.insert(0, "[" + RemovePrefix(source_file, "./") + ":" + ToString(source_line) + "] [" + logging_function + "] "); + } + if (m_log_threadnames && m_started_new_line) { str_prefixed.insert(0, "[" + util::ThreadGetInternalName() + "] "); } diff --git a/src/logging.h b/src/logging.h index 9efecc7c12..4ece8f5e3a 100644 --- a/src/logging.h +++ b/src/logging.h @@ -22,6 +22,7 @@ static const bool DEFAULT_LOGTIMEMICROS = false; static const bool DEFAULT_LOGIPS = false; static const bool DEFAULT_LOGTIMESTAMPS = true; static const bool DEFAULT_LOGTHREADNAMES = false; +static const bool DEFAULT_LOGSOURCELOCATIONS = false; extern const char * const DEFAULT_DEBUGLOGFILE; extern bool fLogIPs; @@ -90,12 +91,13 @@ namespace BCLog { bool m_log_timestamps = DEFAULT_LOGTIMESTAMPS; bool m_log_time_micros = DEFAULT_LOGTIMEMICROS; bool m_log_threadnames = DEFAULT_LOGTHREADNAMES; + bool m_log_sourcelocations = DEFAULT_LOGSOURCELOCATIONS; fs::path m_file_path; std::atomic<bool> m_reopen_file{false}; /** Send a string to the log output */ - void LogPrintStr(const std::string& str); + void LogPrintStr(const std::string& str, const std::string& logging_function, const std::string& source_file, const int source_line); /** Returns whether logs will be written to any output */ bool Enabled() const @@ -163,7 +165,7 @@ bool GetLogCategory(BCLog::LogFlags& flag, const std::string& str); // peer can fill up a user's disk with debug.log entries. template <typename... Args> -static inline void LogPrintf(const char* fmt, const Args&... args) +static inline void LogPrintf_(const std::string& logging_function, const std::string& source_file, const int source_line, const char* fmt, const Args&... args) { if (LogInstance().Enabled()) { std::string log_msg; @@ -173,10 +175,12 @@ static inline void LogPrintf(const char* fmt, const Args&... args) /* Original format string will have newline so don't add one here */ log_msg = "Error \"" + std::string(fmterr.what()) + "\" while formatting log message: " + fmt; } - LogInstance().LogPrintStr(log_msg); + LogInstance().LogPrintStr(log_msg, logging_function, source_file, source_line); } } +#define LogPrintf(...) LogPrintf_(__func__, __FILE__, __LINE__, __VA_ARGS__) + // Use a macro instead of a function for conditional logging to prevent // evaluating arguments when logging for the category is not enabled. #define LogPrint(category, ...) \ diff --git a/src/miner.cpp b/src/miner.cpp index 009ae6b13a..076d43c951 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -45,7 +45,7 @@ void RegenerateCommitments(CBlock& block) tx.vout.erase(tx.vout.begin() + GetWitnessCommitmentIndex(block)); block.vtx.at(0) = MakeTransactionRef(tx); - GenerateCoinbaseCommitment(block, WITH_LOCK(cs_main, return LookupBlockIndex(block.hashPrevBlock)), Params().GetConsensus()); + GenerateCoinbaseCommitment(block, WITH_LOCK(cs_main, return g_chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock)), Params().GetConsensus()); block.hashMerkleRoot = BlockMerkleRoot(block); } @@ -176,7 +176,7 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]); BlockValidationState state; - if (!TestBlockValidity(state, chainparams, *pblock, pindexPrev, false, false)) { + if (!TestBlockValidity(state, chainparams, ::ChainstateActive(), *pblock, pindexPrev, false, false)) { throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString())); } int64_t nTime2 = GetTimeMicros(); diff --git a/src/net.cpp b/src/net.cpp index 76bf7effa4..533815b755 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -20,6 +20,7 @@ #include <protocol.h> #include <random.h> #include <scheduler.h> +#include <util/sock.h> #include <util/strencodings.h> #include <util/translation.h> @@ -200,31 +201,29 @@ bool IsPeerAddrLocalGood(CNode *pnode) IsReachable(addrLocal.GetNetwork()); } -// pushes our own address to a peer -void AdvertiseLocal(CNode *pnode) +Optional<CAddress> GetLocalAddrForPeer(CNode *pnode) { - if (fListen && pnode->fSuccessfullyConnected) + CAddress addrLocal = GetLocalAddress(&pnode->addr, pnode->GetLocalServices()); + if (gArgs.GetBoolArg("-addrmantest", false)) { + // use IPv4 loopback during addrmantest + addrLocal = CAddress(CService(LookupNumeric("127.0.0.1", GetListenPort())), pnode->GetLocalServices()); + } + // If discovery is enabled, sometimes give our peer the address it + // tells us that it sees us as in case it has a better idea of our + // address than we do. + FastRandomContext rng; + if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() || + rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) { - CAddress addrLocal = GetLocalAddress(&pnode->addr, pnode->GetLocalServices()); - if (gArgs.GetBoolArg("-addrmantest", false)) { - // use IPv4 loopback during addrmantest - addrLocal = CAddress(CService(LookupNumeric("127.0.0.1", GetListenPort())), pnode->GetLocalServices()); - } - // If discovery is enabled, sometimes give our peer the address it - // tells us that it sees us as in case it has a better idea of our - // address than we do. - FastRandomContext rng; - if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() || - rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) - { - addrLocal.SetIP(pnode->GetAddrLocal()); - } - if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false)) - { - LogPrint(BCLog::NET, "AdvertiseLocal: advertising address %s\n", addrLocal.ToString()); - pnode->PushAddress(addrLocal, rng); - } + addrLocal.SetIP(pnode->GetAddrLocal()); + } + if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false)) + { + LogPrint(BCLog::NET, "Advertising address %s to peer=%d\n", addrLocal.ToString(), pnode->GetId()); + return addrLocal; } + // Address is unroutable. Don't advertise. + return nullopt; } // learn a new local address @@ -429,24 +428,26 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo // Connect bool connected = false; - SOCKET hSocket = INVALID_SOCKET; + std::unique_ptr<Sock> sock; proxyType proxy; if (addrConnect.IsValid()) { bool proxyConnectionFailed = false; if (GetProxy(addrConnect.GetNetwork(), proxy)) { - hSocket = CreateSocket(proxy.proxy); - if (hSocket == INVALID_SOCKET) { + sock = CreateSock(proxy.proxy); + if (!sock) { return nullptr; } - connected = ConnectThroughProxy(proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), hSocket, nConnectTimeout, proxyConnectionFailed); + connected = ConnectThroughProxy(proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), + *sock, nConnectTimeout, proxyConnectionFailed); } else { // no proxy needed (none set for target network) - hSocket = CreateSocket(addrConnect); - if (hSocket == INVALID_SOCKET) { + sock = CreateSock(addrConnect); + if (!sock) { return nullptr; } - connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, conn_type == ConnectionType::MANUAL); + connected = ConnectSocketDirectly(addrConnect, sock->Get(), nConnectTimeout, + conn_type == ConnectionType::MANUAL); } if (!proxyConnectionFailed) { // If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to @@ -454,26 +455,26 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo addrman.Attempt(addrConnect, fCountFailure); } } else if (pszDest && GetNameProxy(proxy)) { - hSocket = CreateSocket(proxy.proxy); - if (hSocket == INVALID_SOCKET) { + sock = CreateSock(proxy.proxy); + if (!sock) { return nullptr; } std::string host; int port = default_port; SplitHostPort(std::string(pszDest), port, host); bool proxyConnectionFailed; - connected = ConnectThroughProxy(proxy, host, port, hSocket, nConnectTimeout, proxyConnectionFailed); + connected = ConnectThroughProxy(proxy, host, port, *sock, nConnectTimeout, + proxyConnectionFailed); } if (!connected) { - CloseSocket(hSocket); return nullptr; } // Add node NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); - CAddress addr_bind = GetBindAddress(hSocket); - CNode* pnode = new CNode(id, nLocalServices, hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type); + CAddress addr_bind = GetBindAddress(sock->Get()); + CNode* pnode = new CNode(id, nLocalServices, sock->Release(), addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type, /* inbound_onion */ false); pnode->AddRef(); // We're making a new connection, harvest entropy from the time (and our peer count) @@ -598,21 +599,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) stats.minFeeFilter = 0; } - // It is common for nodes with good ping times to suddenly become lagged, - // due to a new block arriving or other large transfer. - // Merely reporting pingtime might fool the caller into thinking the node was still responsive, - // since pingtime does not update until the ping is complete, which might take a while. - // So, if a ping is taking an unusually long time in flight, - // the caller can immediately detect that this is happening. - std::chrono::microseconds ping_wait{0}; - if ((0 != nPingNonceSent) && (0 != m_ping_start.load().count())) { - ping_wait = GetTime<std::chrono::microseconds>() - m_ping_start.load(); - } - - // Raw ping time is in microseconds, but show it to user as whole seconds (Bitcoin users should be well used to small numbers with many decimal places by now :) - stats.m_ping_usec = nPingUsecTime; - stats.m_min_ping_usec = nMinPingUsecTime; - stats.m_ping_wait_usec = count_microseconds(ping_wait); + stats.m_ping_usec = m_last_ping_time; + stats.m_min_ping_usec = m_min_ping_time; // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); @@ -834,7 +822,7 @@ size_t CConnman::SocketSendData(CNode& node) const static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { - return a.nMinPingUsecTime > b.nMinPingUsecTime; + return a.m_min_ping_time > b.m_min_ping_time; } static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) @@ -989,7 +977,7 @@ bool CConnman::AttemptToEvictConnection() peer_relay_txes = node->m_tx_relay->fRelayTxes; peer_filter_not_null = node->m_tx_relay->pfilter != nullptr; } - NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->nMinPingUsecTime, + NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->m_min_ping_time, node->nLastBlockTime, node->nLastTXTime, HasAllDesirableServiceFlags(node->nServices), peer_relay_txes, peer_filter_not_null, node->nKeyedNetGroup, @@ -1218,18 +1206,17 @@ void CConnman::NotifyNumConnectionsChanged() } } +bool CConnman::RunInactivityChecks(const CNode& node) const +{ + return GetSystemTimeInSeconds() > node.nTimeConnected + m_peer_connect_timeout; +} + bool CConnman::InactivityCheck(const CNode& node) const { // Use non-mockable system time (otherwise these timers will pop when we // use setmocktime in the tests). int64_t now = GetSystemTimeInSeconds(); - if (now <= node.nTimeConnected + m_peer_connect_timeout) { - // Only run inactivity checks if the peer has been connected longer - // than m_peer_connect_timeout. - return false; - } - if (node.nLastRecv == 0 || node.nLastSend == 0) { LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d peer=%d\n", m_peer_connect_timeout, node.nLastRecv != 0, node.nLastSend != 0, node.GetId()); return true; @@ -1245,14 +1232,6 @@ bool CConnman::InactivityCheck(const CNode& node) const return true; } - if (node.nPingNonceSent && node.m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL} < GetTime<std::chrono::microseconds>()) { - // We use mockable time for ping timeouts. This means that setmocktime - // may cause pings to time out for peers that have been connected for - // longer than m_peer_connect_timeout. - LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(GetTime<std::chrono::microseconds>() - node.m_ping_start.load()), node.GetId()); - return true; - } - if (!node.fSuccessfullyConnected) { LogPrint(BCLog::NET, "version handshake timeout peer=%d\n", node.GetId()); return true; @@ -1534,7 +1513,7 @@ void CConnman::SocketHandler() if (bytes_sent) RecordBytesSent(bytes_sent); } - if (InactivityCheck(*pnode)) pnode->fDisconnect = true; + if (RunInactivityChecks(*pnode) && InactivityCheck(*pnode)) pnode->fDisconnect = true; } { LOCK(cs_vNodes); @@ -1769,11 +1748,19 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) } // Initiate network connections - int64_t nStart = GetTime(); + auto start = GetTime<std::chrono::seconds>(); // Minimum time before next feeler connection (in microseconds). - int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL); - int64_t nNextExtraBlockRelay = PoissonNextSend(nStart*1000*1000, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + + int64_t nNextFeeler = PoissonNextSend(count_microseconds(start), FEELER_INTERVAL); + int64_t nNextExtraBlockRelay = PoissonNextSend(count_microseconds(start), EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED); + bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS); + + if (!add_fixed_seeds) { + LogPrintf("Fixed seeds are disabled\n"); + } + while (!interruptNet) { ProcessAddrFetch(); @@ -1785,18 +1772,32 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) if (interruptNet) return; - // Add seed nodes if DNS seeds are all down (an infrastructure attack?). - // Note that we only do this if we started with an empty peers.dat, - // (in which case we will query DNS seeds immediately) *and* the DNS - // seeds have not returned any results. - if (addrman.size() == 0 && (GetTime() - nStart > 60)) { - static bool done = false; - if (!done) { - LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be available.\n"); + if (add_fixed_seeds && addrman.size() == 0) { + // When the node starts with an empty peers.dat, there are a few other sources of peers before + // we fallback on to fixed seeds: -dnsseed, -seednode, -addnode + // If none of those are available, we fallback on to fixed seeds immediately, else we allow + // 60 seconds for any of those sources to populate addrman. + bool add_fixed_seeds_now = false; + // It is cheapest to check if enough time has passed first. + if (GetTime<std::chrono::seconds>() > start + std::chrono::minutes{1}) { + add_fixed_seeds_now = true; + LogPrintf("Adding fixed seeds as 60 seconds have passed and addrman is empty\n"); + } + + // Checking !dnsseed is cheaper before locking 2 mutexes. + if (!add_fixed_seeds_now && !dnsseed) { + LOCK2(m_addr_fetches_mutex, cs_vAddedNodes); + if (m_addr_fetches.empty() && vAddedNodes.empty()) { + add_fixed_seeds_now = true; + LogPrintf("Adding fixed seeds as -dnsseed=0, -addnode is not provided and all -seednode(s) attempted\n"); + } + } + + if (add_fixed_seeds_now) { CNetAddr local; local.SetInternal("fixedseeds"); addrman.Add(convertSeed6(Params().FixedSeeds()), local); - done = true; + add_fixed_seeds = false; } } @@ -2188,9 +2189,8 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, return false; } - SOCKET hListenSocket = CreateSocket(addrBind); - if (hListenSocket == INVALID_SOCKET) - { + std::unique_ptr<Sock> sock = CreateSock(addrBind); + if (!sock) { strError = strprintf(Untranslated("Error: Couldn't open socket for incoming connections (socket returned error %s)"), NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError.original); return false; @@ -2198,21 +2198,21 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, // Allow binding if the port is still in TIME_WAIT state after // the program was closed and restarted. - setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne, sizeof(int)); + setsockopt(sock->Get(), SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne, sizeof(int)); // some systems don't have IPV6_V6ONLY but are always v6only; others do have the option // and enable it by default or not. Try to enable it, if possible. if (addrBind.IsIPv6()) { #ifdef IPV6_V6ONLY - setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (sockopt_arg_type)&nOne, sizeof(int)); + setsockopt(sock->Get(), IPPROTO_IPV6, IPV6_V6ONLY, (sockopt_arg_type)&nOne, sizeof(int)); #endif #ifdef WIN32 int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED; - setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char*)&nProtLevel, sizeof(int)); + setsockopt(sock->Get(), IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char*)&nProtLevel, sizeof(int)); #endif } - if (::bind(hListenSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) + if (::bind(sock->Get(), (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) { int nErr = WSAGetLastError(); if (nErr == WSAEADDRINUSE) @@ -2220,21 +2220,19 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, else strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %s)"), addrBind.ToString(), NetworkErrorString(nErr)); LogPrintf("%s\n", strError.original); - CloseSocket(hListenSocket); return false; } LogPrintf("Bound to %s\n", addrBind.ToString()); // Listen for incoming connections - if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) + if (listen(sock->Get(), SOMAXCONN) == SOCKET_ERROR) { strError = strprintf(_("Error: Listening for incoming connections failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError.original); - CloseSocket(hListenSocket); return false; } - vhListenSocket.push_back(ListenSocket(hListenSocket, permissions)); + vhListenSocket.push_back(ListenSocket(sock->Release(), permissions)); return true; } @@ -2434,7 +2432,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) // Send and receive from sockets, accept connections threadSocketHandler = std::thread(&TraceThread<std::function<void()> >, "net", std::function<void()>(std::bind(&CConnman::ThreadSocketHandler, this))); - if (!gArgs.GetBoolArg("-dnsseed", true)) + if (!gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED)) LogPrintf("DNS seeding disabled\n"); else threadDNSAddressSeed = std::thread(&TraceThread<std::function<void()> >, "dnsseed", std::function<void()>(std::bind(&CConnman::ThreadDNSAddressSeed, this))); @@ -2833,12 +2831,12 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const : nTimeConnected(GetSystemTimeInSeconds()), addr(addrIn), addrBind(addrBindIn), + m_inbound_onion(inbound_onion), nKeyedNetGroup(nKeyedNetGroupIn), id(idIn), nLocalHostNonce(nLocalHostNonceIn), m_conn_type(conn_type_in), - nLocalServices(nLocalServicesIn), - m_inbound_onion(inbound_onion) + nLocalServices(nLocalServicesIn) { if (inbound_onion) assert(conn_type_in == ConnectionType::INBOUND); hSocket = hSocketIn; @@ -2879,6 +2877,9 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) { size_t nMessageSize = msg.data.size(); LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.m_type), nMessageSize, pnode->GetId()); + if (gArgs.GetBoolArg("-capturemessages", false)) { + CaptureMessage(pnode->addr, msg.m_type, msg.data, /* incoming */ false); + } // make sure we use the appropriate network transport format std::vector<unsigned char> serializedHeader; @@ -2894,18 +2895,14 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) pnode->mapSendBytesPerMsgCmd[msg.m_type] += nTotalSize; pnode->nSendSize += nTotalSize; - if (pnode->nSendSize > nSendBufferMaxSize) - pnode->fPauseSend = true; + if (pnode->nSendSize > nSendBufferMaxSize) pnode->fPauseSend = true; pnode->vSendMsg.push_back(std::move(serializedHeader)); - if (nMessageSize) - pnode->vSendMsg.push_back(std::move(msg.data)); + if (nMessageSize) pnode->vSendMsg.push_back(std::move(msg.data)); // If write queue empty, attempt "optimistic write" - if (optimisticSend == true) - nBytesSent = SocketSendData(*pnode); + if (optimisticSend) nBytesSent = SocketSendData(*pnode); } - if (nBytesSent) - RecordBytesSent(nBytesSent); + if (nBytesSent) RecordBytesSent(nBytesSent); } bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func) @@ -2948,3 +2945,31 @@ uint64_t CConnman::CalculateKeyedNetGroup(const CAddress& ad) const return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup.data(), vchNetGroup.size()).Finalize(); } + +void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming) +{ + // Note: This function captures the message at the time of processing, + // not at socket receive/send time. + // This ensures that the messages are always in order from an application + // layer (processing) perspective. + auto now = GetTime<std::chrono::microseconds>(); + + // Windows folder names can not include a colon + std::string clean_addr = addr.ToString(); + std::replace(clean_addr.begin(), clean_addr.end(), ':', '_'); + + fs::path base_path = GetDataDir() / "message_capture" / clean_addr; + fs::create_directories(base_path); + + fs::path path = base_path / (is_incoming ? "msgs_recv.dat" : "msgs_sent.dat"); + CAutoFile f(fsbridge::fopen(path, "ab"), SER_DISK, CLIENT_VERSION); + + ser_writedata64(f, now.count()); + f.write(msg_type.data(), msg_type.length()); + for (auto i = msg_type.length(); i < CMessageHeader::COMMAND_SIZE; ++i) { + f << '\0'; + } + uint32_t size = data.size(); + ser_writedata32(f, size); + f.write((const char*)data.data(), data.size()); +} @@ -20,6 +20,7 @@ #include <policy/feerate.h> #include <protocol.h> #include <random.h> +#include <span.h> #include <streams.h> #include <sync.h> #include <threadinterrupt.h> @@ -75,8 +76,12 @@ static constexpr uint64_t DEFAULT_MAX_UPLOAD_TARGET = 0; static const bool DEFAULT_BLOCKSONLY = false; /** -peertimeout default */ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60; +/** Number of file descriptors required for message capture **/ +static const int NUM_FDS_MESSAGE_CAPTURE = 1; static const bool DEFAULT_FORCEDNSSEED = false; +static const bool DEFAULT_DNSSEED = true; +static const bool DEFAULT_FIXEDSEEDS = true; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; @@ -192,7 +197,8 @@ enum }; bool IsPeerAddrLocalGood(CNode *pnode); -void AdvertiseLocal(CNode *pnode); +/** Returns a local address that we should advertise to this peer */ +Optional<CAddress> GetLocalAddrForPeer(CNode *pnode); /** * Mark a network as reachable or unreachable (no automatic connects to it) @@ -255,7 +261,6 @@ public: mapMsgCmdSize mapRecvBytesPerMsgCmd; NetPermissionFlags m_permissionFlags; int64_t m_ping_usec; - int64_t m_ping_wait_usec; int64_t m_min_ping_usec; CAmount minFeeFilter; // Our address, as reported by the peer @@ -424,6 +429,8 @@ public: const CAddress addr; // Bind address of our side of the connection const CAddress addrBind; + //! Whether this peer is an inbound onion, i.e. connected via our Tor onion service. + const bool m_inbound_onion; std::atomic<int> nVersion{0}; RecursiveMutex cs_SubVer; /** @@ -442,6 +449,7 @@ public: * messages, implying a preference to receive ADDRv2 instead of ADDR ones. */ std::atomic_bool m_wants_addrv2{false}; + /** fSuccessfullyConnected is set to true on receiving VERACK from the peer. */ std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs @@ -583,19 +591,14 @@ public: * in CConnman::AttemptToEvictConnection. */ std::atomic<int64_t> nLastTXTime{0}; - // Ping time measurement: - // The pong reply we're expecting, or 0 if no pong expected. - std::atomic<uint64_t> nPingNonceSent{0}; - /** When the last ping was sent, or 0 if no ping was ever sent */ - std::atomic<std::chrono::microseconds> m_ping_start{0us}; - // Last measured round-trip time. - std::atomic<int64_t> nPingUsecTime{0}; - // Best measured round-trip time. - std::atomic<int64_t> nMinPingUsecTime{std::numeric_limits<int64_t>::max()}; - // Whether a ping is requested. - std::atomic<bool> fPingQueued{false}; - - CNode(NodeId id, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion = false); + /** Last measured round-trip time. Used only for RPC/GUI stats/debugging.*/ + std::atomic<int64_t> m_last_ping_time{0}; + + /** Lowest measured round-trip time. Used as an inbound peer eviction + * criterium in CConnman::AttemptToEvictConnection. */ + std::atomic<int64_t> m_min_ping_time{std::numeric_limits<int64_t>::max()}; + + CNode(NodeId id, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion); ~CNode(); CNode(const CNode&) = delete; CNode& operator=(const CNode&) = delete; @@ -713,8 +716,11 @@ public: std::string ConnectionTypeAsString() const { return ::ConnectionTypeAsString(m_conn_type); } - /** Whether this peer is an inbound onion, e.g. connected via our Tor onion service. */ - bool IsInboundOnion() const { return m_inbound_onion; } + /** A ping-pong round trip has completed successfully. Update latest and minimum ping times. */ + void PongReceived(std::chrono::microseconds ping_time) { + m_last_ping_time = count_microseconds(ping_time); + m_min_ping_time = std::min(m_min_ping_time.load(), count_microseconds(ping_time)); + } private: const NodeId id; @@ -748,9 +754,6 @@ private: CService addrLocal GUARDED_BY(cs_addrLocal); mutable RecursiveMutex cs_addrLocal; - //! Whether this peer is an inbound onion, e.g. connected via our Tor onion service. - const bool m_inbound_onion{false}; - mapMsgCmdSize mapSendBytesPerMsgCmd GUARDED_BY(cs_vSend); mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); }; @@ -1020,6 +1023,9 @@ public: void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = std::move(asmap); } + /** Return true if the peer has been connected for long enough to do inactivity checks. */ + bool RunInactivityChecks(const CNode& node) const; + private: struct ListenSocket { public: @@ -1241,11 +1247,14 @@ inline std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, return std::chrono::microseconds{PoissonNextSend(now.count(), average_interval.count())}; } +/** Dump binary message to file, with timestamp */ +void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming); + struct NodeEvictionCandidate { NodeId id; int64_t nTimeConnected; - int64_t nMinPingUsecTime; + int64_t m_min_ping_time; int64_t nLastBlockTime; int64_t nLastTXTime; bool fRelevantServices; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index a4070f5b63..198228de26 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -169,6 +169,14 @@ void EraseOrphansFor(NodeId peer); // Internal stuff namespace { +/** Blocks that are in flight, and that are in the queue to be downloaded. */ +struct QueuedBlock { + uint256 hash; + const CBlockIndex* pindex; //!< Optional. + bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request. + std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads +}; + /** * Data structure for an individual peer. This struct is not protected by * cs_main since it does not contain validation-critical data. @@ -211,6 +219,13 @@ struct Peer { /** This peer's reported block height when we connected */ std::atomic<int> m_starting_height{-1}; + /** The pong reply we're expecting, or 0 if no pong expected. */ + std::atomic<uint64_t> m_ping_nonce_sent{0}; + /** When the last ping was sent, or 0 if no ping was ever sent */ + std::atomic<std::chrono::microseconds> m_ping_start{0us}; + /** Whether a ping has been requested by the user */ + std::atomic<bool> m_ping_queued{false}; + /** Set of txids to reconsider once their parent transactions have been accepted **/ std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans); @@ -248,6 +263,7 @@ public: void CheckForStaleTipAndEvictPeers() override; bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) override; bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; } + void SendPings() override; void SetBestHeight(int height) override { m_best_height = height; }; void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) override; void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, @@ -294,9 +310,10 @@ private: /** Maybe disconnect a peer and discourage future connections from its address. * * @param[in] pnode The node to check. + * @param[in] peer The peer object to check. * @return True if the peer was marked for disconnection in this function */ - bool MaybeDiscourageAndDisconnect(CNode& pnode); + bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans); /** Process a single headers message from a peer. */ @@ -315,6 +332,10 @@ private: /** Send a version message to a peer */ void PushNodeVersion(CNode& pnode, int64_t nTime); + /** Send a ping message every PING_INTERVAL or if requested via RPC. May + * mark the peer to be disconnected if a ping has timed out. */ + void MaybeSendPing(CNode& node_to, Peer& peer); + const CChainParams& m_chainparams; CConnman& m_connman; /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ @@ -345,10 +366,7 @@ private: * their own locks. */ std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); -}; -} // namespace -namespace { /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; @@ -360,6 +378,14 @@ namespace { */ std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main); + /** Number of peers with wtxid relay. */ + int m_wtxid_relay_peers GUARDED_BY(cs_main) = 0; + + /** Number of outbound peers with m_chain_sync.m_protect. */ + int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; + + bool AlreadyHaveTx(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** * Filter for transactions that were recently rejected by * AcceptToMemoryPool. These are not rerequested until the chain tip @@ -402,35 +428,36 @@ namespace { * We use this to avoid requesting transactions that have already been * confirnmed. */ - Mutex g_cs_recent_confirmed_transactions; - std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions); - - /** Blocks that are in flight, and that are in the queue to be downloaded. */ - struct QueuedBlock { - uint256 hash; - const CBlockIndex* pindex; //!< Optional. - bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request. - std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads - }; - std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); + Mutex m_recent_confirmed_transactions_mutex; + std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex); - /** Stack of nodes which we have set to announce using compact blocks */ - std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); + /* Returns a bool indicating whether we requested this block. + * Also used if a block was /not/ received and timed out or started with another peer + */ + bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Number of preferable block download peers. */ - int nPreferredDownload GUARDED_BY(cs_main) = 0; + /* Mark a block as in flight + * Returns false, still setting pit, if the block was already in flight from the same peer + * pit will only be valid as long as the same cs_main lock is being held + */ + bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Number of peers from which we're downloading blocks. */ - int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Number of peers with wtxid relay. */ - int g_wtxid_relay_peers GUARDED_BY(cs_main) = 0; + /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has + * at most count entries. + */ + void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Number of outbound peers with m_chain_sync.m_protect. */ - int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; + std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); /** When our tip was last updated. */ - std::atomic<int64_t> g_last_tip_update(0); + std::atomic<int64_t> m_last_tip_update{0}; + + /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ + CTransactionRef FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main); + + void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex); /** Relay map (txid or wtxid -> CTransactionRef) */ typedef std::map<uint256, CTransactionRef> MapRelay; @@ -438,6 +465,28 @@ namespace { /** Expiration-time ordered list of (expire time, relay map entry) pairs. */ std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main); + /** + * When a peer sends us a valid block, instruct it to announce blocks to us + * using CMPCTBLOCK if possible by adding its nodeid to the end of + * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by + * removing the first element if necessary. + */ + void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Stack of nodes which we have set to announce using compact blocks */ + std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); + + /** Number of peers from which we're downloading blocks. */ + int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + +}; +} // namespace + +namespace { + + /** Number of preferable block download peers. */ + int nPreferredDownload GUARDED_BY(cs_main) = 0; + struct IteratorComparator { template<typename I> @@ -610,9 +659,8 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS nPreferredDownload += state->fPreferredDownload; } -// Returns a bool indicating whether we requested this block. -// Also used if a block was /not/ received and timed out or started with another peer -static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { +bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash) +{ std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState *state = State(itInFlight->second.first); @@ -635,9 +683,8 @@ static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs return false; } -// returns false, still setting pit, if the block was already in flight from the same peer -// pit will only be valid as long as the same cs_main lock is being held -static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { +bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex, std::list<QueuedBlock>::iterator** pit) +{ CNodeState *state = State(nodeid); assert(state != nullptr); @@ -654,7 +701,7 @@ static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint25 MarkBlockAsReceived(hash); std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), - {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)}); + {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); state->nBlocksInFlight++; state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; if (state->nBlocksInFlight == 1) { @@ -676,7 +723,7 @@ static void ProcessBlockAvailability(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_ assert(state != nullptr); if (!state->hashLastUnknownBlock.IsNull()) { - const CBlockIndex* pindex = LookupBlockIndex(state->hashLastUnknownBlock); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(state->hashLastUnknownBlock); if (pindex && pindex->nChainWork > 0) { if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { state->pindexBestKnownBlock = pindex; @@ -693,7 +740,7 @@ static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIV ProcessBlockAvailability(nodeid); - const CBlockIndex* pindex = LookupBlockIndex(hash); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (pindex && pindex->nChainWork > 0) { // An actually better block was announced. if (state->pindexBestKnownBlock == nullptr || pindex->nChainWork >= state->pindexBestKnownBlock->nChainWork) { @@ -705,13 +752,7 @@ static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIV } } -/** - * When a peer sends us a valid block, instruct it to announce blocks to us - * using CMPCTBLOCK if possible by adding its nodeid to the end of - * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by - * removing the first element if necessary. - */ -static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) { AssertLockHeld(cs_main); CNodeState* nodestate = State(nodeid); @@ -727,21 +768,21 @@ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connma return; } } - connman.ForNode(nodeid, [&connman](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1; if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { // As per BIP152, we only get 3 of our peers to announce // blocks using compact encodings. - connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode* pnodeStop){ - connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); + m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this, nCMPCTBLOCKVersion](CNode* pnodeStop){ + m_connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); // save BIP152 bandwidth state: we select peer to be low-bandwidth pnodeStop->m_bip152_highbandwidth_to = false; return true; }); lNodesAnnouncingHeaderAndIDs.pop_front(); } - connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); + m_connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); // save BIP152 bandwidth state: we select peer to be high-bandwidth pfrom->m_bip152_highbandwidth_to = true; lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); @@ -750,13 +791,14 @@ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connma } } -static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +bool PeerManagerImpl::TipMayBeStale() { AssertLockHeld(cs_main); - if (g_last_tip_update == 0) { - g_last_tip_update = GetTime(); + const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); + if (m_last_tip_update == 0) { + m_last_tip_update = GetTime(); } - return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty(); + return m_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty(); } static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -773,9 +815,7 @@ static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIV return false; } -/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has - * at most count entries. */ -static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) { if (count == 0) return; @@ -804,6 +844,7 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return; + const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); std::vector<const CBlockIndex*> vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last @@ -911,7 +952,7 @@ void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, auto delay = std::chrono::microseconds{0}; const bool preferred = state->fPreferredDownload; if (!preferred) delay += NONPREF_PEER_TX_DELAY; - if (!gtxid.IsWtxid() && g_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY; + if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY; const bool overloaded = !node.HasPermission(PF_RELAY) && m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; @@ -1004,10 +1045,10 @@ void PeerManagerImpl::FinalizeNode(const CNode& node, bool& fUpdateConnectionTim nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); - g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; - assert(g_outbound_peers_with_protect_from_disconnect >= 0); - g_wtxid_relay_peers -= state->m_wtxid_relay; - assert(g_wtxid_relay_peers >= 0); + m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; + assert(m_outbound_peers_with_protect_from_disconnect >= 0); + m_wtxid_relay_peers -= state->m_wtxid_relay; + assert(m_wtxid_relay_peers >= 0); mapNodeState.erase(nodeid); @@ -1016,8 +1057,8 @@ void PeerManagerImpl::FinalizeNode(const CNode& node, bool& fUpdateConnectionTim assert(mapBlocksInFlight.empty()); assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); - assert(g_outbound_peers_with_protect_from_disconnect == 0); - assert(g_wtxid_relay_peers == 0); + assert(m_outbound_peers_with_protect_from_disconnect == 0); + assert(m_wtxid_relay_peers == 0); assert(m_txrequest.Size() == 0); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); @@ -1060,6 +1101,18 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) PeerRef peer = GetPeerRef(nodeid); if (peer == nullptr) return false; stats.m_starting_height = peer->m_starting_height; + // It is common for nodes with good ping times to suddenly become lagged, + // due to a new block arriving or other large transfer. + // Merely reporting pingtime might fool the caller into thinking the node was still responsive, + // since pingtime does not update until the ping is complete, which might take a while. + // So, if a ping is taking an unusually long time in flight, + // the caller can immediately detect that this is happening. + std::chrono::microseconds ping_wait{0}; + if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) { + ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load(); + } + + stats.m_ping_wait_usec = count_microseconds(ping_wait); return true; } @@ -1344,7 +1397,7 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn // The false positive rate of 1/1M should come out to less than 1 // transaction per day that would be inadvertently ignored (which is the // same probability that we have in the reject filter). - g_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001)); + m_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001)); // Stale tip checking and peer eviction are on two different timers, but we // don't want them to get out of sync due to drift in the scheduler, so we @@ -1394,14 +1447,14 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); } - g_last_tip_update = GetTime(); + m_last_tip_update = GetTime(); } { - LOCK(g_cs_recent_confirmed_transactions); + LOCK(m_recent_confirmed_transactions_mutex); for (const auto& ptx : pblock->vtx) { - g_recent_confirmed_transactions->insert(ptx->GetHash()); + m_recent_confirmed_transactions->insert(ptx->GetHash()); if (ptx->GetHash() != ptx->GetWitnessHash()) { - g_recent_confirmed_transactions->insert(ptx->GetWitnessHash()); + m_recent_confirmed_transactions->insert(ptx->GetWitnessHash()); } } } @@ -1424,8 +1477,8 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo // block's worth of transactions in it, but that should be fine, since // presumably the most common case of relaying a confirmed transaction // should be just after a new block containing it is found. - LOCK(g_cs_recent_confirmed_transactions); - g_recent_confirmed_transactions->reset(); + LOCK(m_recent_confirmed_transactions_mutex); + m_recent_confirmed_transactions->reset(); } // All of the following cache a recent block, and are protected by cs_most_recent_block @@ -1550,7 +1603,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta !::ChainstateActive().IsInitialBlockDownload() && mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { if (it != mapBlockSource.end()) { - MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman); + MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first); } } if (it != mapBlockSource.end()) @@ -1563,7 +1616,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta // -bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) { assert(recentRejects); if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { @@ -1587,16 +1640,22 @@ bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLU } { - LOCK(g_cs_recent_confirmed_transactions); - if (g_recent_confirmed_transactions->contains(hash)) return true; + LOCK(m_recent_confirmed_transactions_mutex); + if (m_recent_confirmed_transactions->contains(hash)) return true; } - return recentRejects->contains(hash) || mempool.exists(gtxid); + return recentRejects->contains(hash) || m_mempool.exists(gtxid); } bool static AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { - return LookupBlockIndex(block_hash) != nullptr; + return g_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr; +} + +void PeerManagerImpl::SendPings() +{ + LOCK(m_peer_mutex); + for(auto& it : m_peer_map) it.second->m_ping_queued = true; } void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman) @@ -1686,7 +1745,7 @@ void static ProcessGetBlockData(CNode& pfrom, Peer& peer, const CChainParams& ch bool need_activate_chain = false; { LOCK(cs_main); - const CBlockIndex* pindex = LookupBlockIndex(inv.hash); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(inv.hash); if (pindex) { if (pindex->HaveTxsDownloaded() && !pindex->IsValid(BLOCK_VALID_SCRIPTS) && pindex->IsValid(BLOCK_VALID_TREE)) { @@ -1701,13 +1760,13 @@ void static ProcessGetBlockData(CNode& pfrom, Peer& peer, const CChainParams& ch } // release cs_main before calling ActivateBestChain if (need_activate_chain) { BlockValidationState state; - if (!ActivateBestChain(state, chainparams, a_recent_block)) { + if (!::ChainstateActive().ActivateBestChain(state, chainparams, a_recent_block)) { LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); } } LOCK(cs_main); - const CBlockIndex* pindex = LookupBlockIndex(inv.hash); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(inv.hash); if (pindex) { send = BlockRequestAllowed(pindex, consensusParams); if (!send) { @@ -1825,10 +1884,9 @@ void static ProcessGetBlockData(CNode& pfrom, Peer& peer, const CChainParams& ch } } -//! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). -static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main) +CTransactionRef PeerManagerImpl::FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) { - auto txinfo = mempool.info(gtxid); + auto txinfo = m_mempool.info(gtxid); if (txinfo.tx) { // If a TX could have been INVed in reply to a MEMPOOL request, // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request @@ -1853,7 +1911,7 @@ static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& return {}; } -void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex) +void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) { AssertLockNotHeld(cs_main); @@ -1882,17 +1940,17 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa continue; } - CTransactionRef tx = FindTxForGetData(mempool, pfrom, ToGenTxid(inv), mempool_req, now); + CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now); if (tx) { // WTX and WITNESS_TX imply we serialize with witness int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); - connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); - mempool.RemoveUnbroadcastTx(tx->GetHash()); + m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); + m_mempool.RemoveUnbroadcastTx(tx->GetHash()); // As we're going to send tx, make sure its unconfirmed parents are made requestable. std::vector<uint256> parent_ids_to_add; { - LOCK(mempool.cs); - auto txiter = mempool.GetIter(tx->GetHash()); + LOCK(m_mempool.cs); + auto txiter = m_mempool.GetIter(tx->GetHash()); if (txiter) { const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst(); parent_ids_to_add.reserve(parents.size()); @@ -1920,7 +1978,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) { const CInv &inv = *it++; if (inv.IsGenBlkMsg()) { - ProcessGetBlockData(pfrom, peer, chainparams, inv, connman); + ProcessGetBlockData(pfrom, peer, m_chainparams, inv, m_connman); } // else: If the first item on the queue is an unknown type, we erase it // and continue processing the queue on the next call. @@ -1943,7 +2001,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa // In normal operation, we often send NOTFOUND messages for parents of // transactions that we relay; if a peer is missing a parent, they may // assume we have them and request the parents from us. - connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); } } @@ -1997,7 +2055,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, // don't connect before giving DoS points // - Once a headers message is received that is valid and does connect, // nUnconnectingHeaders gets reset back to 0. - if (!LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { + if (!g_chainman.m_blockman.LookupBlockIndex(headers[0].hashPrevBlock) && nCount < MAX_BLOCKS_TO_ANNOUNCE) { nodestate->nUnconnectingHeaders++; m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); LogPrint(BCLog::NET, "received header %s: missing prev block %s, sending getheaders (%d) to end (peer=%d, nUnconnectingHeaders=%d)\n", @@ -2027,7 +2085,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, // If we don't have the last header, then they'll have given us // something new (if these headers are valid). - if (!LookupBlockIndex(hashLastBlock)) { + if (!g_chainman.m_blockman.LookupBlockIndex(hashLastBlock)) { received_new_header = true; } } @@ -2102,7 +2160,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, } uint32_t nFetchFlags = GetFetchFlags(pfrom); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex); + MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex); LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", pindex->GetBlockHash().ToString(), pfrom.GetId()); } @@ -2146,10 +2204,10 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, // thus always subject to eviction under the bad/lagging chain logic. // See ChainSyncTimeoutState. if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) { - if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { + if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); nodestate->m_chain_sync.m_protect = true; - ++g_outbound_peers_with_protect_from_disconnect; + ++m_outbound_peers_with_protect_from_disconnect; } } } @@ -2178,10 +2236,10 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set) if (orphan_it == mapOrphanTransactions.end()) continue; const CTransactionRef porphanTx = orphan_it->second.tx; - TxValidationState state; - std::list<CTransactionRef> removed_txn; + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), m_mempool, porphanTx, false /* bypass_limits */); + const TxValidationState& state = result.m_state; - if (AcceptToMemoryPool(m_mempool, state, porphanTx, &removed_txn, false /* bypass_limits */)) { + if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString()); RelayTransaction(orphanHash, porphanTx->GetWitnessHash(), m_connman); for (unsigned int i = 0; i < porphanTx->vout.size(); i++) { @@ -2193,7 +2251,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set) } } EraseOrphanTx(orphanHash); - for (const CTransactionRef& removedTx : removed_txn) { + for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) { AddToCompactExtraTransactions(removedTx); } break; @@ -2279,7 +2337,7 @@ static bool PrepareBlockFilterRequest(CNode& peer, const CChainParams& chain_par { LOCK(cs_main); - stop_index = LookupBlockIndex(stop_hash); + stop_index = g_chainman.m_blockman.LookupBlockIndex(stop_hash); // Check that the stop block exists and the peer would be allowed to fetch it. if (!stop_index || !BlockRequestAllowed(stop_index, chain_params.GetConsensus())) { @@ -2489,6 +2547,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, bool fRelay = true; vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; + if (nTime < 0) { + nTime = 0; + } nServices = ServiceFlags(nServiceInt); if (!pfrom.IsInboundConn()) { @@ -2745,12 +2806,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - // Feature negotiation of wtxidrelay must happen between VERSION and VERACK - // to avoid relay problems from switching after a connection is up. + // BIP339 defines feature negotiation of wtxidrelay, which must happen between + // VERSION and VERACK to avoid relay problems from switching after a connection is up. if (msg_type == NetMsgType::WTXIDRELAY) { if (pfrom.fSuccessfullyConnected) { - // Disconnect peers that send wtxidrelay message after VERACK; this - // must be negotiated between VERSION and VERACK. + // Disconnect peers that send a wtxidrelay message after VERACK. LogPrint(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId()); pfrom.fDisconnect = true; return; @@ -2759,7 +2819,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LOCK(cs_main); if (!State(pfrom.GetId())->m_wtxid_relay) { State(pfrom.GetId())->m_wtxid_relay = true; - g_wtxid_relay_peers++; + m_wtxid_relay_peers++; } else { LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId()); } @@ -2769,10 +2829,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen + // between VERSION and VERACK. if (msg_type == NetMsgType::SENDADDRV2) { if (pfrom.fSuccessfullyConnected) { - // Disconnect peers that send SENDADDRV2 message after VERACK; this - // must be negotiated between VERSION and VERACK. + // Disconnect peers that send a SENDADDRV2 message after VERACK. LogPrint(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId()); pfrom.fDisconnect = true; return; @@ -2901,7 +2962,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } } else if (inv.IsGenTxMsg()) { const GenTxid gtxid = ToGenTxid(inv); - const bool fAlreadyHave = AlreadyHaveTx(gtxid, m_mempool); + const bool fAlreadyHave = AlreadyHaveTx(gtxid); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); pfrom.AddKnownTx(inv.hash); @@ -2943,7 +3004,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, { LOCK(peer->m_getdata_requests_mutex); peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end()); - ProcessGetData(pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc); + ProcessGetData(pfrom, *peer, interruptMsgProc); } return; @@ -2974,7 +3035,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, a_recent_block = most_recent_block; } BlockValidationState state; - if (!ActivateBestChain(state, m_chainparams, a_recent_block)) { + if (!::ChainstateActive().ActivateBestChain(state, m_chainparams, a_recent_block)) { LogPrint(BCLog::NET, "failed to activate chain (%s)\n", state.ToString()); } } @@ -2982,7 +3043,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LOCK(cs_main); // Find the last block the caller has in the main chain - const CBlockIndex* pindex = FindForkInGlobalIndex(::ChainActive(), locator); + const CBlockIndex* pindex = g_chainman.m_blockman.FindForkInGlobalIndex(::ChainActive(), locator); // Send the rest of the chain if (pindex) @@ -3035,7 +3096,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, { LOCK(cs_main); - const CBlockIndex* pindex = LookupBlockIndex(req.blockhash); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(req.blockhash); if (!pindex || !(pindex->nStatus & BLOCK_HAVE_DATA)) { LogPrint(BCLog::NET, "Peer %d sent us a getblocktxn for a block we don't have\n", pfrom.GetId()); return; @@ -3089,7 +3150,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if (locator.IsNull()) { // If locator is null, return the hashStop block - pindex = LookupBlockIndex(hashStop); + pindex = g_chainman.m_blockman.LookupBlockIndex(hashStop); if (!pindex) { return; } @@ -3102,7 +3163,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, else { // Find the last block the caller has in the main chain - pindex = FindForkInGlobalIndex(::ChainActive(), locator); + pindex = g_chainman.m_blockman.FindForkInGlobalIndex(::ChainActive(), locator); if (pindex) pindex = ::ChainActive().Next(pindex); } @@ -3182,7 +3243,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // already; and an adversary can already relay us old transactions // (older than our recency filter) if trying to DoS us, without any need // for witness malleation. - if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool)) { + if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid))) { if (pfrom.HasPermission(PF_FORCERELAY)) { // Always relay transactions received from peers with forcerelay // permission, even if they were already in the mempool, allowing @@ -3197,10 +3258,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - TxValidationState state; - std::list<CTransactionRef> lRemovedTxn; + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), m_mempool, ptx, false /* bypass_limits */); + const TxValidationState& state = result.m_state; - if (AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */)) { + if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { m_mempool.check(&::ChainstateActive().CoinsTip()); // As this version of the transaction was acceptable, we can forget about any // requests for it. @@ -3223,7 +3284,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, tx.GetHash().ToString(), m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000); - for (const CTransactionRef& removedTx : lRemovedTxn) { + for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) { AddToCompactExtraTransactions(removedTx); } @@ -3261,7 +3322,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // protocol for getting all unconfirmed parents. const GenTxid gtxid{/* is_wtxid=*/false, parent_txid}; pfrom.AddKnownTx(parent_txid); - if (!AlreadyHaveTx(gtxid, m_mempool)) AddTxAnnouncement(pfrom, gtxid, current_time); + if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time); } AddOrphanTx(ptx, pfrom.GetId()); @@ -3366,14 +3427,14 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, { LOCK(cs_main); - if (!LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { + if (!g_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.hashPrevBlock)) { // Doesn't connect (or is genesis), instead of DoSing in AcceptBlockHeader, request deeper headers if (!::ChainstateActive().IsInitialBlockDownload()) m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::GETHEADERS, ::ChainActive().GetLocator(pindexBestHeader), uint256())); return; } - if (!LookupBlockIndex(cmpctblock.header.GetHash())) { + if (!g_chainman.m_blockman.LookupBlockIndex(cmpctblock.header.GetHash())) { received_new_header = true; } } @@ -3451,7 +3512,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) { std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; - if (!MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) { + if (!MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); else { @@ -3807,15 +3868,14 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, vRecv >> nonce; // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) - if (pfrom.nPingNonceSent != 0) { - if (nonce == pfrom.nPingNonceSent) { + if (peer->m_ping_nonce_sent != 0) { + if (nonce == peer->m_ping_nonce_sent) { // Matching pong received, this ping is no longer outstanding bPingFinished = true; - const auto ping_time = ping_end - pfrom.m_ping_start.load(); + const auto ping_time = ping_end - peer->m_ping_start.load(); if (ping_time.count() >= 0) { - // Successful ping time measurement, replace previous - pfrom.nPingUsecTime = count_microseconds(ping_time); - pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time)); + // Let connman know about this successful ping-pong + pfrom.PongReceived(ping_time); } else { // This should never happen sProblem = "Timing mishap"; @@ -3842,12 +3902,12 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", pfrom.GetId(), sProblem, - pfrom.nPingNonceSent, + peer->m_ping_nonce_sent, nonce, nAvail); } if (bPingFinished) { - pfrom.nPingNonceSent = 0; + peer->m_ping_nonce_sent = 0; } return; } @@ -3966,43 +4026,39 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } -bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode) +bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) { - const NodeId peer_id{pnode.GetId()}; - PeerRef peer = GetPeerRef(peer_id); - if (peer == nullptr) return false; - { - LOCK(peer->m_misbehavior_mutex); + LOCK(peer.m_misbehavior_mutex); // There's nothing to do if the m_should_discourage flag isn't set - if (!peer->m_should_discourage) return false; + if (!peer.m_should_discourage) return false; - peer->m_should_discourage = false; + peer.m_should_discourage = false; } // peer.m_misbehavior_mutex if (pnode.HasPermission(PF_NOBAN)) { // We never disconnect or discourage peers for bad behavior if they have the NOBAN permission flag - LogPrintf("Warning: not punishing noban peer %d!\n", peer_id); + LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id); return false; } if (pnode.IsManualConn()) { // We never disconnect or discourage manual peers for bad behavior - LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id); + LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id); return false; } if (pnode.addr.IsLocal()) { // We disconnect local peers for bad behavior but don't discourage (since that would discourage // all peers on the same local address) - LogPrintf("Warning: disconnecting but not discouraging local peer %d!\n", peer_id); + LogPrintf("Warning: disconnecting but not discouraging local peer %d!\n", peer.m_id); pnode.fDisconnect = true; return true; } // Normal case: Disconnect the peer and discourage all nodes sharing the address - LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer_id); + LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id); if (m_banman) m_banman->Discourage(pnode.addr); m_connman.DisconnectNode(pnode.addr); return true; @@ -4018,7 +4074,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt { LOCK(peer->m_getdata_requests_mutex); if (!peer->m_getdata_requests.empty()) { - ProcessGetData(*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc); + ProcessGetData(*pfrom, *peer, interruptMsgProc); } } @@ -4045,14 +4101,12 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt } // Don't bother if send buffer is too full to respond anyway - if (pfrom->fPauseSend) - return false; + if (pfrom->fPauseSend) return false; std::list<CNetMessage> msgs; { LOCK(pfrom->cs_vProcessMsg); - if (pfrom->vProcessMsg.empty()) - return false; + if (pfrom->vProcessMsg.empty()) return false; // Just take one message msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin()); pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size; @@ -4061,6 +4115,10 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt } CNetMessage& msg(msgs.front()); + if (gArgs.GetBoolArg("-capturemessages", false)) { + CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv), /* incoming */ true); + } + msg.SetVersion(pfrom->GetCommonVersion()); const std::string& msg_type = msg.m_command; @@ -4248,8 +4306,8 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers() if (time_in_seconds > m_stale_tip_check_time) { // Check whether our tip is stale, and if so, allow using an extra // outbound peer - if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(m_chainparams.GetConsensus())) { - LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update); + if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) { + LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - m_last_tip_update); m_connman.SetTryNewOutboundPeer(true); } else if (m_connman.GetTryNewOutboundPeer()) { m_connman.SetTryNewOutboundPeer(false); @@ -4263,6 +4321,50 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers() } } +void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer) +{ + // Use mockable time for ping timeouts. + // This means that setmocktime may cause pings to time out. + auto now = GetTime<std::chrono::microseconds>(); + + if (m_connman.RunInactivityChecks(node_to) && peer.m_ping_nonce_sent && + now > peer.m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL}) { + LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id); + node_to.fDisconnect = true; + return; + } + + const CNetMsgMaker msgMaker(node_to.GetCommonVersion()); + bool pingSend = false; + + if (peer.m_ping_queued) { + // RPC ping request by user + pingSend = true; + } + + if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) { + // Ping automatically sent as a latency probe & keepalive. + pingSend = true; + } + + if (pingSend) { + uint64_t nonce = 0; + while (nonce == 0) { + GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); + } + peer.m_ping_queued = false; + peer.m_ping_start = now; + if (node_to.GetCommonVersion() > BIP0031_VERSION) { + peer.m_ping_nonce_sent = nonce; + m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING, nonce)); + } else { + // Peer is too old to support ping command with nonce, pong will never arrive. + peer.m_ping_nonce_sent = 0; + m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING)); + } + } +} + namespace { class CompareInvMempoolOrder { @@ -4287,11 +4389,12 @@ public: bool PeerManagerImpl::SendMessages(CNode* pto) { PeerRef peer = GetPeerRef(pto->GetId()); + if (!peer) return false; const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll // disconnect misbehaving peers even before the version handshake is complete. - if (MaybeDiscourageAndDisconnect(*pto)) return true; + if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true; // Don't send anything until the version handshake is complete if (!pto->fSuccessfullyConnected || pto->fDisconnect) @@ -4300,34 +4403,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // If we get here, the outgoing message serialization version is set and can't change. const CNetMsgMaker msgMaker(pto->GetCommonVersion()); - // - // Message: ping - // - bool pingSend = false; - if (pto->fPingQueued) { - // RPC ping request by user - pingSend = true; - } - if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) { - // Ping automatically sent as a latency probe & keepalive. - pingSend = true; - } - if (pingSend) { - uint64_t nonce = 0; - while (nonce == 0) { - GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); - } - pto->fPingQueued = false; - pto->m_ping_start = GetTime<std::chrono::microseconds>(); - if (pto->GetCommonVersion() > BIP0031_VERSION) { - pto->nPingNonceSent = nonce; - m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); - } else { - // Peer is too old to support ping command with nonce, pong will never arrive. - pto->nPingNonceSent = 0; - m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); - } - } + MaybeSendPing(*pto, *peer); + + // MaybeSendPing may have marked peer for disconnection + if (pto->fDisconnect) return true; { LOCK(cs_main); @@ -4337,7 +4416,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Address refresh broadcast auto current_time = GetTime<std::chrono::microseconds>(); - if (pto->RelayAddrsWithConn() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) { + if (fListen && pto->RelayAddrsWithConn() && + !::ChainstateActive().IsInitialBlockDownload() && + pto->m_next_local_addr_send < current_time) { // If we've sent before, clear the bloom filter for the peer, so that our // self-announcement will actually go out. // This might be unnecessary if the bloom filter has already rolled @@ -4347,7 +4428,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (pto->m_next_local_addr_send != 0us) { pto->m_addr_known->reset(); } - AdvertiseLocal(pto); + if (Optional<CAddress> local_addr = GetLocalAddrForPeer(pto)) { + FastRandomContext insecure_rand; + pto->PushAddress(*local_addr, insecure_rand); + } pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } @@ -4442,7 +4526,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // then send all headers past that one. If we come across any // headers that aren't on ::ChainActive(), give up. for (const uint256& hash : peer->m_blocks_for_headers_relay) { - const CBlockIndex* pindex = LookupBlockIndex(hash); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(hash); assert(pindex); if (::ChainActive()[pindex->nHeight] != pindex) { // Bail out if we reorged away from this block @@ -4534,7 +4618,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // in the past. if (!peer->m_blocks_for_headers_relay.empty()) { const uint256& hashToAnnounce = peer->m_blocks_for_headers_relay.back(); - const CBlockIndex* pindex = LookupBlockIndex(hashToAnnounce); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(hashToAnnounce); assert(pindex); // Warn if we're announcing a block that is not on the main chain. @@ -4776,11 +4860,11 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector<const CBlockIndex*> vToDownload; NodeId staller = -1; - FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); + FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); for (const CBlockIndex *pindex : vToDownload) { uint32_t nFetchFlags = GetFetchFlags(*pto); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex); + MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->GetId()); } @@ -4802,7 +4886,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) entry.second.GetHash().ToString(), entry.first); } for (const GenTxid& gtxid : requestable) { - if (!AlreadyHaveTx(gtxid, m_mempool)) { + if (!AlreadyHaveTx(gtxid)) { LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", gtxid.GetHash().ToString(), pto->GetId()); vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash()); diff --git a/src/net_processing.h b/src/net_processing.h index eaa3b142a8..d7be453df5 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -30,6 +30,7 @@ struct CNodeStateStats { int nSyncHeight = -1; int nCommonHeight = -1; int m_starting_height = -1; + int64_t m_ping_wait_usec; std::vector<int> vHeightInFlight; }; @@ -47,6 +48,9 @@ public: /** Whether this node ignores txs received over p2p. */ virtual bool IgnoresIncomingTxs() = 0; + /** Send ping message to all peers */ + virtual void SendPings() = 0; + /** Set the best height */ virtual void SetBestHeight(int height) = 0; diff --git a/src/netaddress.h b/src/netaddress.h index b9eade7fd5..d0986557f7 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -36,7 +36,7 @@ static constexpr int ADDRV2_FORMAT = 0x20000000; * @note An address may belong to more than one network, for example `10.0.0.1` * belongs to both `NET_UNROUTABLE` and `NET_IPV4`. * Keep these sequential starting from 0 and `NET_MAX` as the last entry. - * We have loops like `for (int i = 0; i < NET_MAX; i++)` that expect to iterate + * We have loops like `for (int i = 0; i < NET_MAX; ++i)` that expect to iterate * over all enum values and also `GetExtNetwork()` "extends" this enum by * introducing standalone constants starting from `NET_MAX`. */ diff --git a/src/netbase.cpp b/src/netbase.cpp index 264029d8a2..0c5b3a220e 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -7,13 +7,17 @@ #include <sync.h> #include <tinyformat.h> +#include <util/sock.h> #include <util/strencodings.h> #include <util/string.h> #include <util/system.h> +#include <util/time.h> #include <atomic> #include <cstdint> +#include <functional> #include <limits> +#include <memory> #ifndef WIN32 #include <fcntl.h> @@ -55,7 +59,7 @@ enum Network ParseNetwork(const std::string& net_in) { std::string GetNetworkName(enum Network net) { switch (net) { - case NET_UNROUTABLE: return "unroutable"; + case NET_UNROUTABLE: return "not_publicly_routable"; case NET_IPV4: return "ipv4"; case NET_IPV6: return "ipv6"; case NET_ONION: return "onion"; @@ -68,6 +72,20 @@ std::string GetNetworkName(enum Network net) assert(false); } +std::vector<std::string> GetNetworkNames(bool append_unroutable) +{ + std::vector<std::string> names; + for (int n = 0; n < NET_MAX; ++n) { + const enum Network network{static_cast<Network>(n)}; + if (network == NET_UNROUTABLE || network == NET_I2P || network == NET_CJDNS || network == NET_INTERNAL) continue; + names.emplace_back(GetNetworkName(network)); + } + if (append_unroutable) { + names.emplace_back(GetNetworkName(NET_UNROUTABLE)); + } + return names; +} + bool static LookupIntern(const std::string& name, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup) { vIP.clear(); @@ -271,14 +289,6 @@ CService LookupNumeric(const std::string& name, int portDefault) return addr; } -struct timeval MillisToTimeval(int64_t nTimeout) -{ - struct timeval timeout; - timeout.tv_sec = nTimeout / 1000; - timeout.tv_usec = (nTimeout % 1000) * 1000; - return timeout; -} - /** SOCKS version */ enum SOCKSVersion: uint8_t { SOCKS4 = 0x04, @@ -336,8 +346,7 @@ enum class IntrRecvError { * @param data The buffer where the read bytes should be stored. * @param len The number of bytes to read into the specified buffer. * @param timeout The total timeout in milliseconds for this read. - * @param hSocket The socket (has to be in non-blocking mode) from which to read - * bytes. + * @param sock The socket (has to be in non-blocking mode) from which to read bytes. * * @returns An IntrRecvError indicating the resulting status of this read. * IntrRecvError::OK only if all of the specified number of bytes were @@ -347,7 +356,7 @@ enum class IntrRecvError { * Sockets can be made non-blocking with SetSocketNonBlocking(const * SOCKET&, bool). */ -static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, const SOCKET& hSocket) +static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, const Sock& sock) { int64_t curTime = GetTimeMillis(); int64_t endTime = curTime + timeout; @@ -355,7 +364,7 @@ static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, c // (in millis) to break off in case of an interruption. const int64_t maxWait = 1000; while (len > 0 && curTime < endTime) { - ssize_t ret = recv(hSocket, (char*)data, len, 0); // Optimistically try the recv first + ssize_t ret = sock.Recv(data, len, 0); // Optimistically try the recv first if (ret > 0) { len -= ret; data += ret; @@ -364,25 +373,10 @@ static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, c } else { // Other error or blocking int nErr = WSAGetLastError(); if (nErr == WSAEINPROGRESS || nErr == WSAEWOULDBLOCK || nErr == WSAEINVAL) { - if (!IsSelectableSocket(hSocket)) { - return IntrRecvError::NetworkError; - } // Only wait at most maxWait milliseconds at a time, unless // we're approaching the end of the specified total timeout int timeout_ms = std::min(endTime - curTime, maxWait); -#ifdef USE_POLL - struct pollfd pollfd = {}; - pollfd.fd = hSocket; - pollfd.events = POLLIN; - int nRet = poll(&pollfd, 1, timeout_ms); -#else - struct timeval tval = MillisToTimeval(timeout_ms); - fd_set fdset; - FD_ZERO(&fdset); - FD_SET(hSocket, &fdset); - int nRet = select(hSocket + 1, &fdset, nullptr, nullptr, &tval); -#endif - if (nRet == SOCKET_ERROR) { + if (!sock.Wait(std::chrono::milliseconds{timeout_ms}, Sock::RECV)) { return IntrRecvError::NetworkError; } } else { @@ -436,7 +430,7 @@ static std::string Socks5ErrorString(uint8_t err) * @param port The destination port. * @param auth The credentials with which to authenticate with the specified * SOCKS5 proxy. - * @param hSocket The SOCKS5 proxy socket. + * @param sock The SOCKS5 proxy socket. * * @returns Whether or not the operation succeeded. * @@ -446,7 +440,7 @@ static std::string Socks5ErrorString(uint8_t err) * @see <a href="https://www.ietf.org/rfc/rfc1928.txt">RFC1928: SOCKS Protocol * Version 5</a> */ -static bool Socks5(const std::string& strDest, int port, const ProxyCredentials *auth, const SOCKET& hSocket) +static bool Socks5(const std::string& strDest, int port, const ProxyCredentials* auth, const Sock& sock) { IntrRecvError recvr; LogPrint(BCLog::NET, "SOCKS5 connecting %s\n", strDest); @@ -464,12 +458,12 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vSocks5Init.push_back(0x01); // 1 method identifier follows... vSocks5Init.push_back(SOCKS5Method::NOAUTH); } - ssize_t ret = send(hSocket, (const char*)vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL); + ssize_t ret = sock.Send(vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5Init.size()) { return error("Error sending to proxy"); } uint8_t pchRet1[2]; - if ((recvr = InterruptibleRecv(pchRet1, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) { + if ((recvr = InterruptibleRecv(pchRet1, 2, SOCKS5_RECV_TIMEOUT, sock)) != IntrRecvError::OK) { LogPrintf("Socks5() connect to %s:%d failed: InterruptibleRecv() timeout or other failure\n", strDest, port); return false; } @@ -486,13 +480,13 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vAuth.insert(vAuth.end(), auth->username.begin(), auth->username.end()); vAuth.push_back(auth->password.size()); vAuth.insert(vAuth.end(), auth->password.begin(), auth->password.end()); - ret = send(hSocket, (const char*)vAuth.data(), vAuth.size(), MSG_NOSIGNAL); + ret = sock.Send(vAuth.data(), vAuth.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vAuth.size()) { return error("Error sending authentication to proxy"); } LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password); uint8_t pchRetA[2]; - if ((recvr = InterruptibleRecv(pchRetA, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) { + if ((recvr = InterruptibleRecv(pchRetA, 2, SOCKS5_RECV_TIMEOUT, sock)) != IntrRecvError::OK) { return error("Error reading proxy authentication response"); } if (pchRetA[0] != 0x01 || pchRetA[1] != 0x00) { @@ -512,12 +506,12 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vSocks5.insert(vSocks5.end(), strDest.begin(), strDest.end()); vSocks5.push_back((port >> 8) & 0xFF); vSocks5.push_back((port >> 0) & 0xFF); - ret = send(hSocket, (const char*)vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL); + ret = sock.Send(vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5.size()) { return error("Error sending to proxy"); } uint8_t pchRet2[4]; - if ((recvr = InterruptibleRecv(pchRet2, 4, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) { + if ((recvr = InterruptibleRecv(pchRet2, 4, SOCKS5_RECV_TIMEOUT, sock)) != IntrRecvError::OK) { if (recvr == IntrRecvError::Timeout) { /* If a timeout happens here, this effectively means we timed out while connecting * to the remote node. This is very common for Tor, so do not print an @@ -541,16 +535,16 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials uint8_t pchRet3[256]; switch (pchRet2[3]) { - case SOCKS5Atyp::IPV4: recvr = InterruptibleRecv(pchRet3, 4, SOCKS5_RECV_TIMEOUT, hSocket); break; - case SOCKS5Atyp::IPV6: recvr = InterruptibleRecv(pchRet3, 16, SOCKS5_RECV_TIMEOUT, hSocket); break; + case SOCKS5Atyp::IPV4: recvr = InterruptibleRecv(pchRet3, 4, SOCKS5_RECV_TIMEOUT, sock); break; + case SOCKS5Atyp::IPV6: recvr = InterruptibleRecv(pchRet3, 16, SOCKS5_RECV_TIMEOUT, sock); break; case SOCKS5Atyp::DOMAINNAME: { - recvr = InterruptibleRecv(pchRet3, 1, SOCKS5_RECV_TIMEOUT, hSocket); + recvr = InterruptibleRecv(pchRet3, 1, SOCKS5_RECV_TIMEOUT, sock); if (recvr != IntrRecvError::OK) { return error("Error reading from proxy"); } int nRecv = pchRet3[0]; - recvr = InterruptibleRecv(pchRet3, nRecv, SOCKS5_RECV_TIMEOUT, hSocket); + recvr = InterruptibleRecv(pchRet3, nRecv, SOCKS5_RECV_TIMEOUT, sock); break; } default: return error("Error: malformed proxy response"); @@ -558,41 +552,35 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials if (recvr != IntrRecvError::OK) { return error("Error reading from proxy"); } - if ((recvr = InterruptibleRecv(pchRet3, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) { + if ((recvr = InterruptibleRecv(pchRet3, 2, SOCKS5_RECV_TIMEOUT, sock)) != IntrRecvError::OK) { return error("Error reading from proxy"); } LogPrint(BCLog::NET, "SOCKS5 connected %s\n", strDest); return true; } -/** - * Try to create a socket file descriptor with specific properties in the - * communications domain (address family) of the specified service. - * - * For details on the desired properties, see the inline comments in the source - * code. - */ -SOCKET CreateSocket(const CService &addrConnect) +std::unique_ptr<Sock> CreateSockTCP(const CService& address_family) { // Create a sockaddr from the specified service. struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); - if (!addrConnect.GetSockAddr((struct sockaddr*)&sockaddr, &len)) { - LogPrintf("Cannot create socket for %s: unsupported network\n", addrConnect.ToString()); - return INVALID_SOCKET; + if (!address_family.GetSockAddr((struct sockaddr*)&sockaddr, &len)) { + LogPrintf("Cannot create socket for %s: unsupported network\n", address_family.ToString()); + return nullptr; } // Create a TCP socket in the address family of the specified service. SOCKET hSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP); - if (hSocket == INVALID_SOCKET) - return INVALID_SOCKET; + if (hSocket == INVALID_SOCKET) { + return nullptr; + } // Ensure that waiting for I/O on this socket won't result in undefined // behavior. if (!IsSelectableSocket(hSocket)) { CloseSocket(hSocket); LogPrintf("Cannot create connection: non-selectable socket created (fd >= FD_SETSIZE ?)\n"); - return INVALID_SOCKET; + return nullptr; } #ifdef SO_NOSIGPIPE @@ -608,11 +596,14 @@ SOCKET CreateSocket(const CService &addrConnect) // Set the non-blocking option on the socket. if (!SetSocketNonBlocking(hSocket, true)) { CloseSocket(hSocket); - LogPrintf("CreateSocket: Setting socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError())); + LogPrintf("Error setting socket to non-blocking: %s\n", NetworkErrorString(WSAGetLastError())); + return nullptr; } - return hSocket; + return std::make_unique<Sock>(hSocket); } +std::function<std::unique_ptr<Sock>(const CService&)> CreateSock = CreateSockTCP; + template<typename... Args> static void LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args) { std::string error_message = tfm::format(fmt, args...); @@ -786,7 +777,7 @@ bool IsProxy(const CNetAddr &addr) { * @param proxy The SOCKS5 proxy. * @param strDest The destination service to which to connect. * @param port The destination port. - * @param hSocket The socket on which to connect to the SOCKS5 proxy. + * @param sock The socket on which to connect to the SOCKS5 proxy. * @param nTimeout Wait this many milliseconds for the connection to the SOCKS5 * proxy to be established. * @param[out] outProxyConnectionFailed Whether or not the connection to the @@ -794,10 +785,10 @@ bool IsProxy(const CNetAddr &addr) { * * @returns Whether or not the operation succeeded. */ -bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocket, int nTimeout, bool& outProxyConnectionFailed) +bool ConnectThroughProxy(const proxyType& proxy, const std::string& strDest, int port, const Sock& sock, int nTimeout, bool& outProxyConnectionFailed) { // first connect to proxy server - if (!ConnectSocketDirectly(proxy.proxy, hSocket, nTimeout, true)) { + if (!ConnectSocketDirectly(proxy.proxy, sock.Get(), nTimeout, true)) { outProxyConnectionFailed = true; return false; } @@ -806,11 +797,11 @@ bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int ProxyCredentials random_auth; static std::atomic_int counter(0); random_auth.username = random_auth.password = strprintf("%i", counter++); - if (!Socks5(strDest, (uint16_t)port, &random_auth, hSocket)) { + if (!Socks5(strDest, (uint16_t)port, &random_auth, sock)) { return false; } } else { - if (!Socks5(strDest, (uint16_t)port, 0, hSocket)) { + if (!Socks5(strDest, (uint16_t)port, 0, sock)) { return false; } } @@ -869,57 +860,6 @@ bool LookupSubNet(const std::string& strSubnet, CSubNet& ret) return false; } -#ifdef WIN32 -std::string NetworkErrorString(int err) -{ - wchar_t buf[256]; - buf[0] = 0; - if(FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK, - nullptr, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - buf, ARRAYSIZE(buf), nullptr)) - { - return strprintf("%s (%d)", std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf), err); - } - else - { - return strprintf("Unknown error (%d)", err); - } -} -#else -std::string NetworkErrorString(int err) -{ - char buf[256]; - buf[0] = 0; - /* Too bad there are two incompatible implementations of the - * thread-safe strerror. */ - const char *s; -#ifdef STRERROR_R_CHAR_P /* GNU variant can return a pointer outside the passed buffer */ - s = strerror_r(err, buf, sizeof(buf)); -#else /* POSIX variant always returns message in buffer */ - s = buf; - if (strerror_r(err, buf, sizeof(buf))) - buf[0] = 0; -#endif - return strprintf("%s (%d)", s, err); -} -#endif - -bool CloseSocket(SOCKET& hSocket) -{ - if (hSocket == INVALID_SOCKET) - return false; -#ifdef WIN32 - int ret = closesocket(hSocket); -#else - int ret = close(hSocket); -#endif - if (ret) { - LogPrintf("Socket close failed: %d. Error: %s\n", hSocket, NetworkErrorString(WSAGetLastError())); - } - hSocket = INVALID_SOCKET; - return ret != SOCKET_ERROR; -} - bool SetSocketNonBlocking(const SOCKET& hSocket, bool fNonBlocking) { if (fNonBlocking) { diff --git a/src/netbase.h b/src/netbase.h index ac4cd97673..847a72ca8e 100644 --- a/src/netbase.h +++ b/src/netbase.h @@ -12,7 +12,10 @@ #include <compat.h> #include <netaddress.h> #include <serialize.h> +#include <util/sock.h> +#include <functional> +#include <memory> #include <stdint.h> #include <string> #include <vector> @@ -39,6 +42,8 @@ public: enum Network ParseNetwork(const std::string& net); std::string GetNetworkName(enum Network net); +/** Return a vector of publicly routable Network names; optionally append NET_UNROUTABLE. */ +std::vector<std::string> GetNetworkNames(bool append_unroutable = false); bool SetProxy(enum Network net, const proxyType &addrProxy); bool GetProxy(enum Network net, proxyType &proxyInfoOut); bool IsProxy(const CNetAddr &addr); @@ -51,21 +56,25 @@ bool Lookup(const std::string& name, CService& addr, int portDefault, bool fAllo bool Lookup(const std::string& name, std::vector<CService>& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions); CService LookupNumeric(const std::string& name, int portDefault = 0); bool LookupSubNet(const std::string& strSubnet, CSubNet& subnet); -SOCKET CreateSocket(const CService &addrConnect); + +/** + * Create a TCP socket in the given address family. + * @param[in] address_family The socket is created in the same address family as this address. + * @return pointer to the created Sock object or unique_ptr that owns nothing in case of failure + */ +std::unique_ptr<Sock> CreateSockTCP(const CService& address_family); + +/** + * Socket factory. Defaults to `CreateSockTCP()`, but can be overridden by unit tests. + */ +extern std::function<std::unique_ptr<Sock>(const CService&)> CreateSock; + bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocketRet, int nTimeout, bool manual_connection); -bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocketRet, int nTimeout, bool& outProxyConnectionFailed); -/** Return readable error string for a network error code */ -std::string NetworkErrorString(int err); -/** Close socket and set hSocket to INVALID_SOCKET */ -bool CloseSocket(SOCKET& hSocket); +bool ConnectThroughProxy(const proxyType& proxy, const std::string& strDest, int port, const Sock& sock, int nTimeout, bool& outProxyConnectionFailed); /** Disable or enable blocking-mode for a socket */ bool SetSocketNonBlocking(const SOCKET& hSocket, bool fNonBlocking); /** Set the TCP_NODELAY flag on a socket */ bool SetSocketNoDelay(const SOCKET& hSocket); -/** - * Convert milliseconds to a struct timeval for e.g. select. - */ -struct timeval MillisToTimeval(int64_t nTimeout); void InterruptSocks5(bool interrupt); #endif // BITCOIN_NETBASE_H diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp index 02e50c4dbe..06fcc33725 100644 --- a/src/node/coinstats.cpp +++ b/src/node/coinstats.cpp @@ -6,6 +6,7 @@ #include <node/coinstats.h> #include <coins.h> +#include <crypto/muhash.h> #include <hash.h> #include <serialize.h> #include <uint256.h> @@ -24,31 +25,59 @@ static uint64_t GetBogoSize(const CScript& scriptPubKey) scriptPubKey.size() /* scriptPubKey */; } -static void ApplyStats(CCoinsStats& stats, CHashWriter& ss, const uint256& hash, const std::map<uint32_t, Coin>& outputs) +static void ApplyHash(CCoinsStats& stats, CHashWriter& ss, const uint256& hash, const std::map<uint32_t, Coin>& outputs, std::map<uint32_t, Coin>::const_iterator it) { - assert(!outputs.empty()); - ss << hash; - ss << VARINT(outputs.begin()->second.nHeight * 2 + outputs.begin()->second.fCoinBase ? 1u : 0u); - stats.nTransactions++; - for (const auto& output : outputs) { - ss << VARINT(output.first + 1); - ss << output.second.out.scriptPubKey; - ss << VARINT_MODE(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED); - stats.nTransactionOutputs++; - stats.nTotalAmount += output.second.out.nValue; - stats.nBogoSize += GetBogoSize(output.second.out.scriptPubKey); + if (it == outputs.begin()) { + ss << hash; + ss << VARINT(it->second.nHeight * 2 + it->second.fCoinBase ? 1u : 0u); + } + + ss << VARINT(it->first + 1); + ss << it->second.out.scriptPubKey; + ss << VARINT_MODE(it->second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED); + + if (it == std::prev(outputs.end())) { + ss << VARINT(0u); } - ss << VARINT(0u); } -static void ApplyStats(CCoinsStats& stats, std::nullptr_t, const uint256& hash, const std::map<uint32_t, Coin>& outputs) +static void ApplyHash(CCoinsStats& stats, std::nullptr_t, const uint256& hash, const std::map<uint32_t, Coin>& outputs, std::map<uint32_t, Coin>::const_iterator it) {} + +static void ApplyHash(CCoinsStats& stats, MuHash3072& muhash, const uint256& hash, const std::map<uint32_t, Coin>& outputs, std::map<uint32_t, Coin>::const_iterator it) +{ + COutPoint outpoint = COutPoint(hash, it->first); + Coin coin = it->second; + + CDataStream ss(SER_DISK, PROTOCOL_VERSION); + ss << outpoint; + ss << static_cast<uint32_t>(coin.nHeight * 2 + coin.fCoinBase); + ss << coin.out; + muhash.Insert(MakeUCharSpan(ss)); +} + +//! Warning: be very careful when changing this! assumeutxo and UTXO snapshot +//! validation commitments are reliant on the hash constructed by this +//! function. +//! +//! If the construction of this hash is changed, it will invalidate +//! existing UTXO snapshots. This will not result in any kind of consensus +//! failure, but it will force clients that were expecting to make use of +//! assumeutxo to do traditional IBD instead. +//! +//! It is also possible, though very unlikely, that a change in this +//! construction could cause a previously invalid (and potentially malicious) +//! UTXO snapshot to be considered valid. +template <typename T> +static void ApplyStats(CCoinsStats& stats, T& hash_obj, const uint256& hash, const std::map<uint32_t, Coin>& outputs) { assert(!outputs.empty()); stats.nTransactions++; - for (const auto& output : outputs) { + for (auto it = outputs.begin(); it != outputs.end(); ++it) { + ApplyHash(stats, hash_obj, hash, outputs, it); + stats.nTransactionOutputs++; - stats.nTotalAmount += output.second.out.nValue; - stats.nBogoSize += GetBogoSize(output.second.out.scriptPubKey); + stats.nTotalAmount += it->second.out.nValue; + stats.nBogoSize += GetBogoSize(it->second.out.scriptPubKey); } } @@ -63,7 +92,7 @@ static bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, T hash_obj, const stats.hashBlock = pcursor->GetBestBlock(); { LOCK(cs_main); - stats.nHeight = LookupBlockIndex(stats.hashBlock)->nHeight; + stats.nHeight = g_chainman.m_blockman.LookupBlockIndex(stats.hashBlock)->nHeight; } PrepareHash(hash_obj, stats); @@ -104,6 +133,10 @@ bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, CoinStatsHashType hash_t CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); return GetUTXOStats(view, stats, ss, interruption_point); } + case(CoinStatsHashType::MUHASH): { + MuHash3072 muhash; + return GetUTXOStats(view, stats, muhash, interruption_point); + } case(CoinStatsHashType::NONE): { return GetUTXOStats(view, stats, nullptr, interruption_point); } @@ -116,10 +149,18 @@ static void PrepareHash(CHashWriter& ss, const CCoinsStats& stats) { ss << stats.hashBlock; } +// MuHash does not need the prepare step +static void PrepareHash(MuHash3072& muhash, CCoinsStats& stats) {} static void PrepareHash(std::nullptr_t, CCoinsStats& stats) {} static void FinalizeHash(CHashWriter& ss, CCoinsStats& stats) { stats.hashSerialized = ss.GetHash(); } +static void FinalizeHash(MuHash3072& muhash, CCoinsStats& stats) +{ + uint256 out; + muhash.Finalize(out); + stats.hashSerialized = out; +} static void FinalizeHash(std::nullptr_t, CCoinsStats& stats) {} diff --git a/src/node/coinstats.h b/src/node/coinstats.h index 7c56bfc2ad..f02b95235f 100644 --- a/src/node/coinstats.h +++ b/src/node/coinstats.h @@ -16,6 +16,7 @@ class CCoinsView; enum class CoinStatsHashType { HASH_SERIALIZED, + MUHASH, NONE, }; diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index e07eaa33d8..ec976fe9bf 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -441,13 +441,13 @@ public: bool checkFinalTx(const CTransaction& tx) override { LOCK(cs_main); - return CheckFinalTx(tx); + return CheckFinalTx(::ChainActive().Tip(), tx); } Optional<int> findLocatorFork(const CBlockLocator& locator) override { LOCK(cs_main); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - if (CBlockIndex* fork = FindForkInGlobalIndex(active, locator)) { + if (CBlockIndex* fork = g_chainman.m_blockman.FindForkInGlobalIndex(active, locator)) { return fork->nHeight; } return nullopt; @@ -456,7 +456,7 @@ public: { WAIT_LOCK(cs_main, lock); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - return FillBlock(LookupBlockIndex(hash), block, lock, active); + return FillBlock(g_chainman.m_blockman.LookupBlockIndex(hash), block, lock, active); } bool findFirstBlockWithTimeAndHeight(int64_t min_time, int min_height, const FoundBlock& block) override { @@ -468,7 +468,7 @@ public: { WAIT_LOCK(cs_main, lock); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - if (const CBlockIndex* block = LookupBlockIndex(block_hash)) { + if (const CBlockIndex* block = g_chainman.m_blockman.LookupBlockIndex(block_hash)) { if (const CBlockIndex* ancestor = block->GetAncestor(ancestor_height)) { return FillBlock(ancestor, ancestor_out, lock, active); } @@ -479,8 +479,8 @@ public: { WAIT_LOCK(cs_main, lock); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - const CBlockIndex* block = LookupBlockIndex(block_hash); - const CBlockIndex* ancestor = LookupBlockIndex(ancestor_hash); + const CBlockIndex* block = g_chainman.m_blockman.LookupBlockIndex(block_hash); + const CBlockIndex* ancestor = g_chainman.m_blockman.LookupBlockIndex(ancestor_hash); if (block && ancestor && block->GetAncestor(ancestor->nHeight) != ancestor) ancestor = nullptr; return FillBlock(ancestor, ancestor_out, lock, active); } @@ -488,8 +488,8 @@ public: { WAIT_LOCK(cs_main, lock); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - const CBlockIndex* block1 = LookupBlockIndex(block_hash1); - const CBlockIndex* block2 = LookupBlockIndex(block_hash2); + const CBlockIndex* block1 = g_chainman.m_blockman.LookupBlockIndex(block_hash1); + const CBlockIndex* block2 = g_chainman.m_blockman.LookupBlockIndex(block_hash2); const CBlockIndex* ancestor = block1 && block2 ? LastCommonAncestor(block1, block2) : nullptr; // Using & instead of && below to avoid short circuiting and leaving // output uninitialized. @@ -499,7 +499,7 @@ public: double guessVerificationProgress(const uint256& block_hash) override { LOCK(cs_main); - return GuessVerificationProgress(Params().TxData(), LookupBlockIndex(block_hash)); + return GuessVerificationProgress(Params().TxData(), g_chainman.m_blockman.LookupBlockIndex(block_hash)); } bool hasBlocks(const uint256& block_hash, int min_height, Optional<int> max_height) override { @@ -511,7 +511,7 @@ public: // used to limit the range, and passing min_height that's too low or // max_height that's too high will not crash or change the result. LOCK(::cs_main); - if (CBlockIndex* block = LookupBlockIndex(block_hash)) { + if (CBlockIndex* block = g_chainman.m_blockman.LookupBlockIndex(block_hash)) { if (max_height && block->nHeight >= *max_height) block = block->GetAncestor(*max_height); for (; block->nStatus & BLOCK_HAVE_DATA; block = block->pprev) { // Check pprev to not segfault if min_height is too low diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index d3bb9687a8..3b3fab7b6b 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -50,22 +50,22 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t } if (!node.mempool->exists(hashTx)) { // Transaction is not already in the mempool. - TxValidationState state; if (max_tx_fee > 0) { // First, call ATMP with test_accept and check the fee. If ATMP // fails here, return error immediately. - CAmount fee{0}; - if (!AcceptToMemoryPool(*node.mempool, state, tx, - nullptr /* plTxnReplaced */, false /* bypass_limits */, /* test_accept */ true, &fee)) { - return HandleATMPError(state, err_string); - } else if (fee > max_tx_fee) { + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *node.mempool, tx, false /* bypass_limits */, + true /* test_accept */); + if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { + return HandleATMPError(result.m_state, err_string); + } else if (result.m_base_fees.value() > max_tx_fee) { return TransactionError::MAX_FEE_EXCEEDED; } } // Try to submit the transaction to the mempool. - if (!AcceptToMemoryPool(*node.mempool, state, tx, - nullptr /* plTxnReplaced */, false /* bypass_limits */)) { - return HandleATMPError(state, err_string); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *node.mempool, tx, false /* bypass_limits */, + false /* test_accept */); + if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { + return HandleATMPError(result.m_state, err_string); } // Transaction was accepted to the mempool. diff --git a/src/protocol.cpp b/src/protocol.cpp index 56e738eaa8..0b893b9272 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -5,7 +5,6 @@ #include <protocol.h> -#include <util/strencodings.h> #include <util/system.h> static std::atomic<bool> g_initial_block_download_completed(false); @@ -86,7 +85,7 @@ const static std::string allNetMessageTypes[] = { NetMsgType::CFCHECKPT, NetMsgType::WTXIDRELAY, }; -const static std::vector<std::string> allNetMessageTypesVec(allNetMessageTypes, allNetMessageTypes+ARRAYLEN(allNetMessageTypes)); +const static std::vector<std::string> allNetMessageTypesVec(std::begin(allNetMessageTypes), std::end(allNetMessageTypes)); CMessageHeader::CMessageHeader() { diff --git a/src/psbt.cpp b/src/psbt.cpp index 4db57d3cd0..a849b2ea53 100644 --- a/src/psbt.cpp +++ b/src/psbt.cpp @@ -363,7 +363,7 @@ bool DecodeBase64PSBT(PartiallySignedTransaction& psbt, const std::string& base6 bool DecodeRawPSBT(PartiallySignedTransaction& psbt, const std::string& tx_data, std::string& error) { - CDataStream ss_data(tx_data.data(), tx_data.data() + tx_data.size(), SER_NETWORK, PROTOCOL_VERSION); + CDataStream ss_data(MakeUCharSpan(tx_data), SER_NETWORK, PROTOCOL_VERSION); try { ss_data >> psbt; if (!ss_data.empty()) { diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui index 3831852185..3acfe7eb65 100644 --- a/src/qt/forms/debugwindow.ui +++ b/src/qt/forms/debugwindow.ui @@ -988,7 +988,7 @@ </item> </layout> </widget> - <widget class="QWidget" name="widget_2" native="true"> + <widget class="QWidget" name="peersTabRightPanel" native="true"> <property name="sizePolicy"> <sizepolicy hsizetype="Minimum" vsizetype="Preferred"> <horstretch>0</horstretch> @@ -1079,10 +1079,10 @@ <item row="1" column="0"> <widget class="QLabel" name="peerConnectionTypeLabel"> <property name="toolTip"> - <string>The type of peer connection: %1</string> + <string>The direction and type of peer connection: %1</string> </property> <property name="text"> - <string>Connection Type</string> + <string>Direction/Type</string> </property> </widget> </item> diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 8fe9460b9a..d72ed0c3ff 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -652,15 +652,19 @@ QString NetworkToQString(Network net) assert(false); } -QString ConnectionTypeToQString(ConnectionType conn_type, bool relay_txes) +QString ConnectionTypeToQString(ConnectionType conn_type, bool prepend_direction) { + QString prefix; + if (prepend_direction) { + prefix = (conn_type == ConnectionType::INBOUND) ? QObject::tr("Inbound") : QObject::tr("Outbound") + " "; + } switch (conn_type) { - case ConnectionType::INBOUND: return relay_txes ? QObject::tr("Inbound Full Relay") : QObject::tr("Inbound Block Relay"); - case ConnectionType::OUTBOUND_FULL_RELAY: return QObject::tr("Outbound Full Relay"); - case ConnectionType::BLOCK_RELAY: return QObject::tr("Outbound Block Relay"); - case ConnectionType::MANUAL: return QObject::tr("Outbound Manual"); - case ConnectionType::FEELER: return QObject::tr("Outbound Feeler"); - case ConnectionType::ADDR_FETCH: return QObject::tr("Outbound Address Fetch"); + case ConnectionType::INBOUND: return prefix; + case ConnectionType::OUTBOUND_FULL_RELAY: return prefix + QObject::tr("Full Relay"); + case ConnectionType::BLOCK_RELAY: return prefix + QObject::tr("Block Relay"); + case ConnectionType::MANUAL: return prefix + QObject::tr("Manual"); + case ConnectionType::FEELER: return prefix + QObject::tr("Feeler"); + case ConnectionType::ADDR_FETCH: return prefix + QObject::tr("Address Fetch"); } // no default case, so the compiler can warn about missing cases assert(false); } diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h index 63c7c265ed..3134c98429 100644 --- a/src/qt/guiutil.h +++ b/src/qt/guiutil.h @@ -194,7 +194,7 @@ namespace GUIUtil QString NetworkToQString(Network net); /** Convert enum ConnectionType to QString */ - QString ConnectionTypeToQString(ConnectionType conn_type, bool relay_txes); + QString ConnectionTypeToQString(ConnectionType conn_type, bool prepend_direction); /** Convert seconds into a QString with days, hours, mins, secs */ QString formatDurationStr(int secs); diff --git a/src/qt/networkstyle.cpp b/src/qt/networkstyle.cpp index b1081f6aee..ee70c1bc30 100644 --- a/src/qt/networkstyle.cpp +++ b/src/qt/networkstyle.cpp @@ -22,7 +22,6 @@ static const struct { {"signet", QAPP_APP_NAME_SIGNET, 35, 15}, {"regtest", QAPP_APP_NAME_REGTEST, 160, 30}, }; -static const unsigned network_styles_count = sizeof(network_styles)/sizeof(*network_styles); // titleAddText needs to be const char* for tr() NetworkStyle::NetworkStyle(const QString &_appName, const int iconColorHueShift, const int iconColorSaturationReduction, const char *_titleAddText): @@ -81,14 +80,12 @@ NetworkStyle::NetworkStyle(const QString &_appName, const int iconColorHueShift, const NetworkStyle* NetworkStyle::instantiate(const std::string& networkId) { std::string titleAddText = networkId == CBaseChainParams::MAIN ? "" : strprintf("[%s]", networkId); - for (unsigned x=0; x<network_styles_count; ++x) - { - if (networkId == network_styles[x].networkId) - { + for (const auto& network_style : network_styles) { + if (networkId == network_style.networkId) { return new NetworkStyle( - network_styles[x].appName, - network_styles[x].iconColorHueShift, - network_styles[x].iconColorSaturationReduction, + network_style.appName, + network_style.iconColorHueShift, + network_style.iconColorSaturationReduction, titleAddText.c_str()); } } diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp index bad81d894c..5f518a67cd 100644 --- a/src/qt/peertablemodel.cpp +++ b/src/qt/peertablemodel.cpp @@ -29,6 +29,8 @@ bool NodeLessThan::operator()(const CNodeCombinedStats &left, const CNodeCombine return pLeft->nodeid < pRight->nodeid; case PeerTableModel::Address: return pLeft->addrName.compare(pRight->addrName) < 0; + case PeerTableModel::ConnectionType: + return pLeft->m_conn_type < pRight->m_conn_type; case PeerTableModel::Network: return pLeft->m_network < pRight->m_network; case PeerTableModel::Ping: @@ -163,6 +165,8 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const case Address: // prepend to peer address down-arrow symbol for inbound connection and up-arrow for outbound connection return QString(rec->nodeStats.fInbound ? "↓ " : "↑ ") + QString::fromStdString(rec->nodeStats.addrName); + case ConnectionType: + return GUIUtil::ConnectionTypeToQString(rec->nodeStats.m_conn_type, /* prepend_direction */ false); case Network: return GUIUtil::NetworkToQString(rec->nodeStats.m_network); case Ping: @@ -176,6 +180,7 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const } } else if (role == Qt::TextAlignmentRole) { switch (index.column()) { + case ConnectionType: case Network: return QVariant(Qt::AlignCenter); case Ping: diff --git a/src/qt/peertablemodel.h b/src/qt/peertablemodel.h index 7bff239507..0823235ec0 100644 --- a/src/qt/peertablemodel.h +++ b/src/qt/peertablemodel.h @@ -59,12 +59,13 @@ public: enum ColumnIndex { NetNodeId = 0, - Address = 1, - Network = 2, - Ping = 3, - Sent = 4, - Received = 5, - Subversion = 6 + Address, + ConnectionType, + Network, + Ping, + Sent, + Received, + Subversion }; enum { @@ -87,7 +88,7 @@ public Q_SLOTS: private: interfaces::Node& m_node; - const QStringList columns{tr("Peer Id"), tr("Address"), tr("Network"), tr("Ping"), tr("Sent"), tr("Received"), tr("User Agent")}; + const QStringList columns{tr("Peer Id"), tr("Address"), tr("Type"), tr("Network"), tr("Ping"), tr("Sent"), tr("Received"), tr("User Agent")}; std::unique_ptr<PeerTablePriv> priv; QTimer *timer; }; diff --git a/src/qt/platformstyle.cpp b/src/qt/platformstyle.cpp index c6b80fd340..aab8d8e4af 100644 --- a/src/qt/platformstyle.cpp +++ b/src/qt/platformstyle.cpp @@ -23,7 +23,6 @@ static const struct { /* Other: linux, unix, ... */ {"other", true, true, false} }; -static const unsigned platform_styles_count = sizeof(platform_styles)/sizeof(*platform_styles); namespace { /* Local functions for colorizing single-color images */ @@ -121,15 +120,13 @@ QIcon PlatformStyle::TextColorIcon(const QIcon& icon) const const PlatformStyle *PlatformStyle::instantiate(const QString &platformId) { - for (unsigned x=0; x<platform_styles_count; ++x) - { - if (platformId == platform_styles[x].platformId) - { + for (const auto& platform_style : platform_styles) { + if (platformId == platform_style.platformId) { return new PlatformStyle( - platform_styles[x].platformId, - platform_styles[x].imagesOnButtons, - platform_styles[x].colorizeIcons, - platform_styles[x].useExtraSpacing); + platform_style.platformId, + platform_style.imagesOnButtons, + platform_style.colorizeIcons, + platform_style.useExtraSpacing); } } return nullptr; diff --git a/src/qt/recentrequeststablemodel.cpp b/src/qt/recentrequeststablemodel.cpp index 18b913774b..03531a1381 100644 --- a/src/qt/recentrequeststablemodel.cpp +++ b/src/qt/recentrequeststablemodel.cpp @@ -181,7 +181,7 @@ void RecentRequestsTableModel::addNewRequest(const SendCoinsRecipient &recipient // called from ctor when loading from wallet void RecentRequestsTableModel::addNewRequest(const std::string &recipient) { - std::vector<char> data(recipient.begin(), recipient.end()); + std::vector<uint8_t> data(recipient.begin(), recipient.end()); CDataStream ss(data, SER_DISK, CLIENT_VERSION); RecentRequestEntry entry; diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index a252685d2f..7c8de962c6 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -462,7 +462,7 @@ RPCConsole::RPCConsole(interfaces::Node& node, const PlatformStyle *_platformSty constexpr QChar nonbreaking_hyphen(8209); const std::vector<QString> CONNECTION_TYPE_DOC{ - tr("Inbound Full/Block Relay: initiated by peer"), + tr("Inbound: initiated by peer"), tr("Outbound Full Relay: default"), tr("Outbound Block Relay: does not relay transactions or addresses"), tr("Outbound Manual: added using RPC %1 or %2/%3 configuration options") @@ -1097,7 +1097,7 @@ void RPCConsole::updateDetailWidget() { const QList<QModelIndex> selected_peers = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId); if (!clientModel || !clientModel->getPeerTableModel() || selected_peers.size() != 1) { - ui->detailWidget->hide(); + ui->peersTabRightPanel->hide(); ui->peerHeading->setText(tr("Select a peer to view detailed information.")); return; } @@ -1115,12 +1115,11 @@ void RPCConsole::updateDetailWidget() ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes)); ui->peerConnTime->setText(GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nTimeConnected)); ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_usec)); - ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_wait_usec)); ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.m_min_ping_usec)); ui->timeoffset->setText(GUIUtil::formatTimeOffset(stats->nodeStats.nTimeOffset)); ui->peerVersion->setText(QString::number(stats->nodeStats.nVersion)); ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer)); - ui->peerConnectionType->setText(GUIUtil::ConnectionTypeToQString(stats->nodeStats.m_conn_type, stats->nodeStats.fRelayTxes)); + ui->peerConnectionType->setText(GUIUtil::ConnectionTypeToQString(stats->nodeStats.m_conn_type, /* prepend_direction */ true)); ui->peerNetwork->setText(GUIUtil::NetworkToQString(stats->nodeStats.m_network)); if (stats->nodeStats.m_permissionFlags == PF_NONE) { ui->peerPermissions->setText(tr("N/A")); @@ -1149,9 +1148,10 @@ void RPCConsole::updateDetailWidget() ui->peerCommonHeight->setText(tr("Unknown")); ui->peerHeight->setText(QString::number(stats->nodeStateStats.m_starting_height)); + ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStateStats.m_ping_wait_usec)); } - ui->detailWidget->show(); + ui->peersTabRightPanel->show(); } void RPCConsole::resizeEvent(QResizeEvent *event) diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index 58427b42cc..02254da3ce 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -245,7 +245,7 @@ WalletModel::SendCoinsReturn WalletModel::sendCoins(WalletModelTransaction &tran CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << *newTx; - transaction_array.append(&(ssTx[0]), ssTx.size()); + transaction_array.append((const char*)&(ssTx[0]), ssTx.size()); } // Add addresses / update labels that we've sent to the address book, diff --git a/src/random.cpp b/src/random.cpp index af9504e0ce..9900825abb 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -38,7 +38,6 @@ #include <sys/random.h> #endif #ifdef HAVE_SYSCTL_ARND -#include <util/strencodings.h> // for ARRAYLEN #include <sys/sysctl.h> #endif @@ -333,7 +332,7 @@ void GetOSRand(unsigned char *ent32) int have = 0; do { size_t len = NUM_OS_RANDOM_BYTES - have; - if (sysctl(name, ARRAYLEN(name), ent32 + have, &len, nullptr, 0) != 0) { + if (sysctl(name, std::size(name), ent32 + have, &len, nullptr, 0) != 0) { RandFailure(); } have += len; diff --git a/src/rest.cpp b/src/rest.cpp index 678ffdb760..71426a4dc4 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -19,7 +19,6 @@ #include <txmempool.h> #include <util/check.h> #include <util/ref.h> -#include <util/strencodings.h> #include <validation.h> #include <version.h> @@ -117,9 +116,10 @@ static RetFormat ParseDataFormat(std::string& param, const std::string& strReq) param = strReq.substr(0, pos); const std::string suff(strReq, pos + 1); - for (unsigned int i = 0; i < ARRAYLEN(rf_names); i++) - if (suff == rf_names[i].name) - return rf_names[i].rf; + for (const auto& rf_name : rf_names) { + if (suff == rf_name.name) + return rf_name.rf; + } /* If no suffix is found, return original string. */ param = strReq; @@ -129,12 +129,13 @@ static RetFormat ParseDataFormat(std::string& param, const std::string& strReq) static std::string AvailableDataFormatsString() { std::string formats; - for (unsigned int i = 0; i < ARRAYLEN(rf_names); i++) - if (strlen(rf_names[i].name) > 0) { + for (const auto& rf_name : rf_names) { + if (strlen(rf_name.name) > 0) { formats.append("."); - formats.append(rf_names[i].name); + formats.append(rf_name.name); formats.append(", "); } + } if (formats.length() > 0) return formats.substr(0, formats.length() - 2); @@ -179,7 +180,7 @@ static bool rest_headers(const util::Ref& context, { LOCK(cs_main); tip = ::ChainActive().Tip(); - const CBlockIndex* pindex = LookupBlockIndex(hash); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(hash); while (pindex != nullptr && ::ChainActive().Contains(pindex)) { headers.push_back(pindex); if (headers.size() == (unsigned long)count) @@ -247,7 +248,7 @@ static bool rest_block(HTTPRequest* req, { LOCK(cs_main); tip = ::ChainActive().Tip(); - pblockindex = LookupBlockIndex(hash); + pblockindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (!pblockindex) { return RESTERR(req, HTTP_NOT_FOUND, hashStr + " not found"); } @@ -695,6 +696,7 @@ void InterruptREST() void StopREST() { - for (unsigned int i = 0; i < ARRAYLEN(uri_prefixes); i++) - UnregisterHTTPHandler(uri_prefixes[i].prefix, false); + for (const auto& up : uri_prefixes) { + UnregisterHTTPHandler(up.prefix, false); + } } diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index e41a78f917..5dc33d7a98 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -17,8 +17,8 @@ #include <node/coinstats.h> #include <node/context.h> #include <node/utxo_snapshot.h> -#include <policy/fees.h> #include <policy/feerate.h> +#include <policy/fees.h> #include <policy/policy.h> #include <policy/rbf.h> #include <primitives/transaction.h> @@ -156,21 +156,11 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIndex* blockindex, bool txDetails) { - // Serialize passed information without accessing chain state of the active chain! - AssertLockNotHeld(cs_main); // For performance reasons + UniValue result = blockheaderToJSON(tip, blockindex); - UniValue result(UniValue::VOBJ); - result.pushKV("hash", blockindex->GetBlockHash().GetHex()); - const CBlockIndex* pnext; - int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); - result.pushKV("confirmations", confirmations); result.pushKV("strippedsize", (int)::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS)); result.pushKV("size", (int)::GetSerializeSize(block, PROTOCOL_VERSION)); result.pushKV("weight", (int)::GetBlockWeight(block)); - result.pushKV("height", blockindex->nHeight); - result.pushKV("version", block.nVersion); - result.pushKV("versionHex", strprintf("%08x", block.nVersion)); - result.pushKV("merkleroot", block.hashMerkleRoot.GetHex()); UniValue txs(UniValue::VARR); if (txDetails) { CBlockUndo blockUndo; @@ -189,18 +179,7 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIn } } result.pushKV("tx", txs); - result.pushKV("time", block.GetBlockTime()); - result.pushKV("mediantime", (int64_t)blockindex->GetMedianTimePast()); - result.pushKV("nonce", (uint64_t)block.nNonce); - result.pushKV("bits", strprintf("%08x", block.nBits)); - result.pushKV("difficulty", GetDifficulty(blockindex)); - result.pushKV("chainwork", blockindex->nChainWork.GetHex()); - result.pushKV("nTx", (uint64_t)blockindex->nTx); - if (blockindex->pprev) - result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()); - if (pnext) - result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex()); return result; } @@ -843,7 +822,7 @@ static RPCHelpMan getblockheader() const CBlockIndex* tip; { LOCK(cs_main); - pblockindex = LookupBlockIndex(hash); + pblockindex = g_chainman.m_blockman.LookupBlockIndex(hash); tip = ::ChainActive().Tip(); } @@ -967,7 +946,7 @@ static RPCHelpMan getblock() const CBlockIndex* tip; { LOCK(cs_main); - pblockindex = LookupBlockIndex(hash); + pblockindex = g_chainman.m_blockman.LookupBlockIndex(hash); tip = ::ChainActive().Tip(); if (!pblockindex) { @@ -1047,13 +1026,26 @@ static RPCHelpMan pruneblockchain() }; } +CoinStatsHashType ParseHashType(const std::string& hash_type_input) +{ + if (hash_type_input == "hash_serialized_2") { + return CoinStatsHashType::HASH_SERIALIZED; + } else if (hash_type_input == "muhash") { + return CoinStatsHashType::MUHASH; + } else if (hash_type_input == "none") { + return CoinStatsHashType::NONE; + } else { + throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s is not a valid hash_type", hash_type_input)); + } +} + static RPCHelpMan gettxoutsetinfo() { return RPCHelpMan{"gettxoutsetinfo", "\nReturns statistics about the unspent transaction output set.\n" "Note this call may take some time.\n", { - {"hash_type", RPCArg::Type::STR, /* default */ "hash_serialized_2", "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'none'."}, + {"hash_type", RPCArg::Type::STR, /* default */ "hash_serialized_2", "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'muhash', 'none'."}, }, RPCResult{ RPCResult::Type::OBJ, "", "", @@ -1063,7 +1055,8 @@ static RPCHelpMan gettxoutsetinfo() {RPCResult::Type::NUM, "transactions", "The number of transactions with unspent outputs"}, {RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs"}, {RPCResult::Type::NUM, "bogosize", "A meaningless metric for UTXO set size"}, - {RPCResult::Type::STR_HEX, "hash_serialized_2", "The serialized hash (only present if 'hash_serialized_2' hash_type is chosen)"}, + {RPCResult::Type::STR_HEX, "hash_serialized_2", /* optional */ true, "The serialized hash (only present if 'hash_serialized_2' hash_type is chosen)"}, + {RPCResult::Type::STR_HEX, "muhash", /* optional */ true, "The serialized hash (only present if 'muhash' hash_type is chosen)"}, {RPCResult::Type::NUM, "disk_size", "The estimated size of the chainstate on disk"}, {RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount"}, }}, @@ -1078,7 +1071,7 @@ static RPCHelpMan gettxoutsetinfo() CCoinsStats stats; ::ChainstateActive().ForceFlushStateToDisk(); - const CoinStatsHashType hash_type = ParseHashType(request.params[0], CoinStatsHashType::HASH_SERIALIZED); + const CoinStatsHashType hash_type{request.params[0].isNull() ? CoinStatsHashType::HASH_SERIALIZED : ParseHashType(request.params[0].get_str())}; CCoinsView* coins_view = WITH_LOCK(cs_main, return &ChainstateActive().CoinsDB()); NodeContext& node = EnsureNodeContext(request.context); @@ -1091,6 +1084,9 @@ static RPCHelpMan gettxoutsetinfo() if (hash_type == CoinStatsHashType::HASH_SERIALIZED) { ret.pushKV("hash_serialized_2", stats.hashSerialized.GetHex()); } + if (hash_type == CoinStatsHashType::MUHASH) { + ret.pushKV("muhash", stats.hashSerialized.GetHex()); + } ret.pushKV("disk_size", stats.nDiskSize); ret.pushKV("total_amount", ValueFromAmount(stats.nTotalAmount)); } else { @@ -1164,7 +1160,7 @@ static RPCHelpMan gettxout() } } - const CBlockIndex* pindex = LookupBlockIndex(coins_view->GetBestBlock()); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(coins_view->GetBestBlock()); ret.pushKV("bestblock", pindex->GetBlockHash().GetHex()); if (coin.nHeight == MEMPOOL_HEIGHT) { ret.pushKV("confirmations", 0); @@ -1500,6 +1496,7 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool) ret.pushKV("size", (int64_t)pool.size()); ret.pushKV("bytes", (int64_t)pool.GetTotalTxSize()); ret.pushKV("usage", (int64_t)pool.DynamicMemoryUsage()); + ret.pushKV("total_fee", ValueFromAmount(pool.GetTotalFee())); size_t maxmempool = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; ret.pushKV("maxmempool", (int64_t) maxmempool); ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(maxmempool), ::minRelayTxFee).GetFeePerK())); @@ -1520,6 +1517,7 @@ static RPCHelpMan getmempoolinfo() {RPCResult::Type::NUM, "size", "Current tx count"}, {RPCResult::Type::NUM, "bytes", "Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted"}, {RPCResult::Type::NUM, "usage", "Total memory usage for the mempool"}, + {RPCResult::Type::STR_AMOUNT, "total_fee", "Total fees for the mempool in " + CURRENCY_UNIT + ", ignoring modified fees through prioritizetransaction"}, {RPCResult::Type::NUM, "maxmempool", "Maximum memory usage for the mempool"}, {RPCResult::Type::STR_AMOUNT, "mempoolminfee", "Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee"}, {RPCResult::Type::STR_AMOUNT, "minrelaytxfee", "Current minimum relay fee for transactions"}, @@ -1557,7 +1555,7 @@ static RPCHelpMan preciousblock() { LOCK(cs_main); - pblockindex = LookupBlockIndex(hash); + pblockindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } @@ -1595,7 +1593,7 @@ static RPCHelpMan invalidateblock() CBlockIndex* pblockindex; { LOCK(cs_main); - pblockindex = LookupBlockIndex(hash); + pblockindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } @@ -1603,7 +1601,7 @@ static RPCHelpMan invalidateblock() InvalidateBlock(state, Params(), pblockindex); if (state.IsValid()) { - ActivateBestChain(state, Params()); + ::ChainstateActive().ActivateBestChain(state, Params()); } if (!state.IsValid()) { @@ -1634,7 +1632,7 @@ static RPCHelpMan reconsiderblock() { LOCK(cs_main); - CBlockIndex* pblockindex = LookupBlockIndex(hash); + CBlockIndex* pblockindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } @@ -1643,7 +1641,7 @@ static RPCHelpMan reconsiderblock() } BlockValidationState state; - ActivateBestChain(state, Params()); + ::ChainstateActive().ActivateBestChain(state, Params()); if (!state.IsValid()) { throw JSONRPCError(RPC_DATABASE_ERROR, state.ToString()); @@ -1689,7 +1687,7 @@ static RPCHelpMan getchaintxstats() } else { uint256 hash(ParseHashV(request.params[1], "blockhash")); LOCK(cs_main); - pindex = LookupBlockIndex(hash); + pindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (!pindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } @@ -1867,7 +1865,7 @@ static RPCHelpMan getblockstats() pindex = ::ChainActive()[height]; } else { const uint256 hash(ParseHashV(request.params[0], "hash_or_height")); - pindex = LookupBlockIndex(hash); + pindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (!pindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } @@ -2330,7 +2328,7 @@ static RPCHelpMan getblockfilter() bool block_was_connected; { LOCK(cs_main); - block_index = LookupBlockIndex(block_hash); + block_index = g_chainman.m_blockman.LookupBlockIndex(block_hash); if (!block_index) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } @@ -2413,10 +2411,21 @@ static RPCHelpMan dumptxoutset() FILE* file{fsbridge::fopen(temppath, "wb")}; CAutoFile afile{file, SER_DISK, CLIENT_VERSION}; + NodeContext& node = EnsureNodeContext(request.context); + UniValue result = CreateUTXOSnapshot(node, node.chainman->ActiveChainstate(), afile); + fs::rename(temppath, path); + + result.pushKV("path", path.string()); + return result; +}, + }; +} + +UniValue CreateUTXOSnapshot(NodeContext& node, CChainState& chainstate, CAutoFile& afile) +{ std::unique_ptr<CCoinsViewCursor> pcursor; CCoinsStats stats; CBlockIndex* tip; - NodeContext& node = EnsureNodeContext(request.context); { // We need to lock cs_main to ensure that the coinsdb isn't written to @@ -2433,14 +2442,14 @@ static RPCHelpMan dumptxoutset() // LOCK(::cs_main); - ::ChainstateActive().ForceFlushStateToDisk(); + chainstate.ForceFlushStateToDisk(); - if (!GetUTXOStats(&::ChainstateActive().CoinsDB(), stats, CoinStatsHashType::NONE, node.rpc_interruption_point)) { + if (!GetUTXOStats(&chainstate.CoinsDB(), stats, CoinStatsHashType::NONE, node.rpc_interruption_point)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } - pcursor = std::unique_ptr<CCoinsViewCursor>(::ChainstateActive().CoinsDB().Cursor()); - tip = LookupBlockIndex(stats.hashBlock); + pcursor = std::unique_ptr<CCoinsViewCursor>(chainstate.CoinsDB().Cursor()); + tip = g_chainman.m_blockman.LookupBlockIndex(stats.hashBlock); CHECK_NONFATAL(tip); } @@ -2464,16 +2473,13 @@ static RPCHelpMan dumptxoutset() } afile.fclose(); - fs::rename(temppath, path); UniValue result(UniValue::VOBJ); result.pushKV("coins_written", stats.coins_count); result.pushKV("base_hash", tip->GetBlockHash().ToString()); result.pushKV("base_height", tip->nHeight); - result.pushKV("path", path.string()); + return result; -}, - }; } void RegisterBlockchainRPCCommands(CRPCTable &t) diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h index e4ce80400e..d8cae4dd24 100644 --- a/src/rpc/blockchain.h +++ b/src/rpc/blockchain.h @@ -6,6 +6,7 @@ #define BITCOIN_RPC_BLOCKCHAIN_H #include <amount.h> +#include <streams.h> #include <sync.h> #include <stdint.h> @@ -16,6 +17,7 @@ extern RecursiveMutex cs_main; class CBlock; class CBlockIndex; class CBlockPolicyEstimator; +class CChainState; class CTxMemPool; class ChainstateManager; class UniValue; @@ -57,4 +59,10 @@ CTxMemPool& EnsureMemPool(const util::Ref& context); ChainstateManager& EnsureChainman(const util::Ref& context); CBlockPolicyEstimator& EnsureFeeEstimator(const util::Ref& context); +/** + * Helper to create UTXO snapshots given a chainstate and a file handle. + * @return a UniValue map containing metadata about the snapshot. + */ +UniValue CreateUTXOSnapshot(NodeContext& node, CChainState& chainstate, CAutoFile& afile); + #endif diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index c723f1d7ea..50987a735b 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -375,7 +375,7 @@ static RPCHelpMan generateblock() LOCK(cs_main); BlockValidationState state; - if (!TestBlockValidity(state, chainparams, block, LookupBlockIndex(block.hashPrevBlock), false, false)) { + if (!TestBlockValidity(state, chainparams, ::ChainstateActive(), block, g_chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock), false, false)) { throw JSONRPCError(RPC_VERIFY_ERROR, strprintf("TestBlockValidity failed: %s", state.ToString())); } } @@ -618,7 +618,7 @@ static RPCHelpMan getblocktemplate() throw JSONRPCError(RPC_DESERIALIZATION_ERROR, "Block decode failed"); uint256 hash = block.GetHash(); - const CBlockIndex* pindex = LookupBlockIndex(hash); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (pindex) { if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) return "duplicate"; @@ -632,7 +632,7 @@ static RPCHelpMan getblocktemplate() if (block.hashPrevBlock != pindexPrev->GetBlockHash()) return "inconclusive-not-best-prevblk"; BlockValidationState state; - TestBlockValidity(state, Params(), block, pindexPrev, false, true); + TestBlockValidity(state, Params(), ::ChainstateActive(), block, pindexPrev, false, true); return BIP22ValidationResult(state); } @@ -966,7 +966,7 @@ static RPCHelpMan submitblock() uint256 hash = block.GetHash(); { LOCK(cs_main); - const CBlockIndex* pindex = LookupBlockIndex(hash); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(hash); if (pindex) { if (pindex->IsValid(BLOCK_VALID_SCRIPTS)) { return "duplicate"; @@ -979,7 +979,7 @@ static RPCHelpMan submitblock() { LOCK(cs_main); - const CBlockIndex* pindex = LookupBlockIndex(block.hashPrevBlock); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock); if (pindex) { UpdateUncommittedBlockStructures(block, pindex, Params().GetConsensus()); } @@ -1023,7 +1023,7 @@ static RPCHelpMan submitheader() } { LOCK(cs_main); - if (!LookupBlockIndex(h.hashPrevBlock)) { + if (!g_chainman.m_blockman.LookupBlockIndex(h.hashPrevBlock)) { throw JSONRPCError(RPC_VERIFY_ERROR, "Must submit previous header (" + h.hashPrevBlock.GetHex() + ") first"); } } diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index b75a7b8d26..38a0bddddb 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -365,13 +365,13 @@ static RPCHelpMan signmessagewithprivkey() static RPCHelpMan setmocktime() { return RPCHelpMan{"setmocktime", - "\nSet the local time to given timestamp (-regtest only)\n", - { - {"timestamp", RPCArg::Type::NUM, RPCArg::Optional::NO, UNIX_EPOCH_TIME + "\n" - " Pass 0 to go back to using the system time."}, - }, - RPCResult{RPCResult::Type::NONE, "", ""}, - RPCExamples{""}, + "\nSet the local time to given timestamp (-regtest only)\n", + { + {"timestamp", RPCArg::Type::NUM, RPCArg::Optional::NO, UNIX_EPOCH_TIME + "\n" + "Pass 0 to go back to using the system time."}, + }, + RPCResult{RPCResult::Type::NONE, "", ""}, + RPCExamples{""}, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { if (!Params().IsMockableChain()) { @@ -386,7 +386,10 @@ static RPCHelpMan setmocktime() LOCK(cs_main); RPCTypeCheck(request.params, {UniValue::VNUM}); - int64_t time = request.params[0].get_int64(); + const int64_t time{request.params[0].get_int64()}; + if (time < 0) { + throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Mocktime can not be negative: %s.", time)); + } SetMockTime(time); if (request.context.Has<NodeContext>()) { for (const auto& chain_client : request.context.Get<NodeContext>().chain_clients) { diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 47d77b341a..0224ee697a 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -77,13 +77,12 @@ static RPCHelpMan ping() [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { NodeContext& node = EnsureNodeContext(request.context); - if(!node.connman) + if (!node.peerman) { throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); + } // Request that each node send a ping during next message processing pass - node.connman->ForEachNode([](CNode* pnode) { - pnode->fPingQueued = true; - }); + node.peerman->SendPings(); return NullUniValue; }, }; @@ -104,7 +103,7 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "addr", "(host:port) The IP address and port of the peer"}, {RPCResult::Type::STR, "addrbind", "(ip:port) Bind address of the connection to the peer"}, {RPCResult::Type::STR, "addrlocal", "(ip:port) Local address as reported by the peer"}, - {RPCResult::Type::STR, "network", "Network (ipv4, ipv6, or onion) the peer connected through"}, + {RPCResult::Type::STR, "network", "Network (" + Join(GetNetworkNames(/* append_unroutable */ true), ", ") + ")"}, {RPCResult::Type::NUM, "mapped_as", "The AS in the BGP route to the peer used for diversifying\n" "peer selection (only available if the asmap config flag is set)"}, {RPCResult::Type::STR_HEX, "services", "The services offered"}, @@ -209,8 +208,8 @@ static RPCHelpMan getpeerinfo() if (stats.m_min_ping_usec < std::numeric_limits<int64_t>::max()) { obj.pushKV("minping", ((double)stats.m_min_ping_usec) / 1e6); } - if (stats.m_ping_wait_usec > 0) { - obj.pushKV("pingwait", ((double)stats.m_ping_wait_usec) / 1e6); + if (fStateStats && statestats.m_ping_wait_usec > 0) { + obj.pushKV("pingwait", ((double)statestats.m_ping_wait_usec) / 1e6); } obj.pushKV("version", stats.nVersion); // Use the sanitized form of subver here, to avoid tricksy remote peers from @@ -587,7 +586,7 @@ static RPCHelpMan getnetworkinfo() { {RPCResult::Type::OBJ, "", "", { - {RPCResult::Type::STR, "name", "network (ipv4, ipv6 or onion)"}, + {RPCResult::Type::STR, "name", "network (" + Join(GetNetworkNames(), ", ") + ")"}, {RPCResult::Type::BOOL, "limited", "is the network limited using -onlynet?"}, {RPCResult::Type::BOOL, "reachable", "is the network reachable?"}, {RPCResult::Type::STR, "proxy", "(\"host:port\") the proxy that is used for this network, or empty if none"}, diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 031ed31da1..47c776bbd1 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -54,7 +54,7 @@ static void TxToJSON(const CTransaction& tx, const uint256 hashBlock, UniValue& LOCK(cs_main); entry.pushKV("blockhash", hashBlock.GetHex()); - CBlockIndex* pindex = LookupBlockIndex(hashBlock); + CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(hashBlock); if (pindex) { if (::ChainActive().Contains(pindex)) { entry.pushKV("confirmations", 1 + ::ChainActive().Height() - pindex->nHeight); @@ -178,7 +178,7 @@ static RPCHelpMan getrawtransaction() LOCK(cs_main); uint256 blockhash = ParseHashV(request.params[2], "parameter 3"); - blockindex = LookupBlockIndex(blockhash); + blockindex = g_chainman.m_blockman.LookupBlockIndex(blockhash); if (!blockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block hash not found"); } @@ -260,7 +260,7 @@ static RPCHelpMan gettxoutproof() if (!request.params[1].isNull()) { LOCK(cs_main); hashBlock = ParseHashV(request.params[1], "blockhash"); - pblockindex = LookupBlockIndex(hashBlock); + pblockindex = g_chainman.m_blockman.LookupBlockIndex(hashBlock); if (!pblockindex) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found"); } @@ -290,7 +290,7 @@ static RPCHelpMan gettxoutproof() if (!tx || hashBlock.IsNull()) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Transaction not yet in block"); } - pblockindex = LookupBlockIndex(hashBlock); + pblockindex = g_chainman.m_blockman.LookupBlockIndex(hashBlock); if (!pblockindex) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Transaction index corrupt"); } @@ -350,7 +350,7 @@ static RPCHelpMan verifytxoutproof() LOCK(cs_main); - const CBlockIndex* pindex = LookupBlockIndex(merkleBlock.header.GetHash()); + const CBlockIndex* pindex = g_chainman.m_blockman.LookupBlockIndex(merkleBlock.header.GetHash()); if (!pindex || !::ChainActive().Contains(pindex) || pindex->nTx == 0) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Block not found in chain"); } @@ -816,10 +816,11 @@ static RPCHelpMan sendrawtransaction() { return RPCHelpMan{"sendrawtransaction", "\nSubmit a raw transaction (serialized, hex-encoded) to local node and network.\n" - "\nNote that the transaction will be sent unconditionally to all peers, so using this\n" + "\nThe transaction will be sent unconditionally to all peers, so using sendrawtransaction\n" "for manual rebroadcast may degrade privacy by leaking the transaction's origin, as\n" "nodes will normally not rebroadcast non-wallet transactions already in their mempool.\n" - "\nAlso see createrawtransaction and signrawtransactionwithkey calls.\n", + "\nA specific exception, RPC_TRANSACTION_ALREADY_IN_CHAIN, may throw if the transaction cannot be added to the mempool.\n" + "\nRelated RPCs: createrawtransaction, signrawtransactionwithkey\n", { {"hexstring", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The hex string of the raw transaction"}, {"maxfeerate", RPCArg::Type::AMOUNT, /* default */ FormatMoney(DEFAULT_MAX_RAW_TX_FEE_RATE.GetFeePerK()), @@ -945,44 +946,35 @@ static RPCHelpMan testmempoolaccept() result_0.pushKV("txid", tx->GetHash().GetHex()); result_0.pushKV("wtxid", tx->GetWitnessHash().GetHex()); - TxValidationState state; - bool test_accept_res; - CAmount fee{0}; - { - LOCK(cs_main); - test_accept_res = AcceptToMemoryPool(mempool, state, std::move(tx), - nullptr /* plTxnReplaced */, false /* bypass_limits */, /* test_accept */ true, &fee); - } - - // Check that fee does not exceed maximum fee - if (test_accept_res && max_raw_tx_fee && fee > max_raw_tx_fee) { - result_0.pushKV("allowed", false); - result_0.pushKV("reject-reason", "max-fee-exceeded"); - result.push_back(std::move(result_0)); - return result; - } - result_0.pushKV("allowed", test_accept_res); + const MempoolAcceptResult accept_result = WITH_LOCK(cs_main, return AcceptToMemoryPool(::ChainstateActive(), mempool, std::move(tx), + false /* bypass_limits */, /* test_accept */ true)); // Only return the fee and vsize if the transaction would pass ATMP. // These can be used to calculate the feerate. - if (test_accept_res) { - result_0.pushKV("vsize", virtual_size); - UniValue fees(UniValue::VOBJ); - fees.pushKV("base", ValueFromAmount(fee)); - result_0.pushKV("fees", fees); + if (accept_result.m_result_type == MempoolAcceptResult::ResultType::VALID) { + const CAmount fee = accept_result.m_base_fees.value(); + // Check that fee does not exceed maximum fee + if (max_raw_tx_fee && fee > max_raw_tx_fee) { + result_0.pushKV("allowed", false); + result_0.pushKV("reject-reason", "max-fee-exceeded"); + } else { + result_0.pushKV("allowed", true); + result_0.pushKV("vsize", virtual_size); + UniValue fees(UniValue::VOBJ); + fees.pushKV("base", ValueFromAmount(fee)); + result_0.pushKV("fees", fees); + } + result.push_back(std::move(result_0)); } else { - if (state.IsInvalid()) { - if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { - result_0.pushKV("reject-reason", "missing-inputs"); - } else { - result_0.pushKV("reject-reason", strprintf("%s", state.GetRejectReason())); - } + result_0.pushKV("allowed", false); + const TxValidationState state = accept_result.m_state; + if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { + result_0.pushKV("reject-reason", "missing-inputs"); } else { result_0.pushKV("reject-reason", state.GetRejectReason()); } + result.push_back(std::move(result_0)); } - - result.push_back(std::move(result_0)); return result; }, }; @@ -1344,7 +1336,7 @@ static RPCHelpMan combinepsbt() CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << merged_psbt; - return EncodeBase64(MakeUCharSpan(ssTx)); + return EncodeBase64(ssTx); }, }; } @@ -1483,7 +1475,7 @@ static RPCHelpMan createpsbt() CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << psbtx; - return EncodeBase64(MakeUCharSpan(ssTx)); + return EncodeBase64(ssTx); }, }; } @@ -1552,7 +1544,7 @@ static RPCHelpMan converttopsbt() CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << psbtx; - return EncodeBase64(MakeUCharSpan(ssTx)); + return EncodeBase64(ssTx); }, }; } @@ -1643,7 +1635,7 @@ static RPCHelpMan utxoupdatepsbt() CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << psbtx; - return EncodeBase64(MakeUCharSpan(ssTx)); + return EncodeBase64(ssTx); }, }; } @@ -1739,7 +1731,7 @@ static RPCHelpMan joinpsbts() CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); ssTx << shuffled_psbt; - return EncodeBase64(MakeUCharSpan(ssTx)); + return EncodeBase64(ssTx); }, }; } diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index bfdba5253c..e890c0108a 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -113,23 +113,6 @@ std::vector<unsigned char> ParseHexO(const UniValue& o, std::string strKey) return ParseHexV(find_value(o, strKey), strKey); } -CoinStatsHashType ParseHashType(const UniValue& param, const CoinStatsHashType default_type) -{ - if (param.isNull()) { - return default_type; - } else { - std::string hash_type_input = param.get_str(); - - if (hash_type_input == "hash_serialized_2") { - return CoinStatsHashType::HASH_SERIALIZED; - } else if (hash_type_input == "none") { - return CoinStatsHashType::NONE; - } else { - throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%d is not a valid hash_type", hash_type_input)); - } - } -} - std::string HelpExampleCli(const std::string& methodname, const std::string& args) { return "> bitcoin-cli " + methodname + " " + args + "\n"; diff --git a/src/rpc/util.h b/src/rpc/util.h index 444a013ca1..c54ce85f60 100644 --- a/src/rpc/util.h +++ b/src/rpc/util.h @@ -77,8 +77,6 @@ extern uint256 ParseHashO(const UniValue& o, std::string strKey); extern std::vector<unsigned char> ParseHexV(const UniValue& v, std::string strName); extern std::vector<unsigned char> ParseHexO(const UniValue& o, std::string strKey); -CoinStatsHashType ParseHashType(const UniValue& param, const CoinStatsHashType default_type); - extern CAmount AmountFromValue(const UniValue& value); extern std::string HelpExampleCli(const std::string& methodname, const std::string& args); extern std::string HelpExampleRpc(const std::string& methodname, const std::string& args); diff --git a/src/scheduler.h b/src/scheduler.h index d7fe00d1b4..9eec8c0fa0 100644 --- a/src/scheduler.h +++ b/src/scheduler.h @@ -9,6 +9,7 @@ #include <functional> #include <list> #include <map> +#include <thread> #include <sync.h> @@ -35,6 +36,8 @@ public: CScheduler(); ~CScheduler(); + std::thread m_service_thread; + typedef std::function<void()> Function; /** Call func at/after time t */ @@ -62,8 +65,7 @@ public: void MockForward(std::chrono::seconds delta_seconds); /** - * Services the queue 'forever'. Should be run in a thread, - * and interrupted using boost::interrupt_thread + * Services the queue 'forever'. Should be run in a thread. */ void serviceQueue(); @@ -72,12 +74,14 @@ public: { WITH_LOCK(newTaskMutex, stopRequested = true); newTaskScheduled.notify_all(); + if (m_service_thread.joinable()) m_service_thread.join(); } /** Tell any threads running serviceQueue to stop when there is no work left to be done */ void StopWhenDrained() { WITH_LOCK(newTaskMutex, stopWhenEmpty = true); newTaskScheduled.notify_all(); + if (m_service_thread.joinable()) m_service_thread.join(); } /** diff --git a/src/script/bitcoinconsensus.h b/src/script/bitcoinconsensus.h index c5dceac848..b6939127e1 100644 --- a/src/script/bitcoinconsensus.h +++ b/src/script/bitcoinconsensus.h @@ -11,14 +11,12 @@ #if defined(BUILD_BITCOIN_INTERNAL) && defined(HAVE_CONFIG_H) #include <config/bitcoin-config.h> #if defined(_WIN32) - #if defined(DLL_EXPORT) - #if defined(HAVE_FUNC_ATTRIBUTE_DLLEXPORT) - #define EXPORT_SYMBOL __declspec(dllexport) - #else - #define EXPORT_SYMBOL - #endif + #if defined(HAVE_DLLEXPORT_ATTRIBUTE) + #define EXPORT_SYMBOL __declspec(dllexport) + #else + #define EXPORT_SYMBOL #endif - #elif defined(HAVE_FUNC_ATTRIBUTE_VISIBILITY) + #elif defined(HAVE_DEFAULT_VISIBILITY_ATTRIBUTE) #define EXPORT_SYMBOL __attribute__ ((visibility ("default"))) #endif #elif defined(MSC_VER) && !defined(STATIC_LIBBITCOINCONSENSUS) diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 9e4b8a9dd6..6ab01882ac 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -179,6 +179,9 @@ public: /** Get the descriptor string form including private data (if available in arg). */ virtual bool ToPrivateString(const SigningProvider& arg, std::string& out) const = 0; + /** Get the descriptor string form with the xpub at the last hardened derivation */ + virtual bool ToNormalizedString(const SigningProvider& arg, std::string& out, bool priv) const = 0; + /** Derive a private key, if private data is available in arg. */ virtual bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const = 0; }; @@ -212,6 +215,21 @@ public: ret = "[" + OriginString() + "]" + std::move(sub); return true; } + bool ToNormalizedString(const SigningProvider& arg, std::string& ret, bool priv) const override + { + std::string sub; + if (!m_provider->ToNormalizedString(arg, sub, priv)) return false; + // If m_provider is a BIP32PubkeyProvider, we may get a string formatted like a OriginPubkeyProvider + // In that case, we need to strip out the leading square bracket and fingerprint from the substring, + // and append that to our own origin string. + if (sub[0] == '[') { + sub = sub.substr(9); + ret = "[" + OriginString() + std::move(sub); + } else { + ret = "[" + OriginString() + "]" + std::move(sub); + } + return true; + } bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const override { return m_provider->GetPrivKey(pos, arg, key); @@ -243,6 +261,12 @@ public: ret = EncodeSecret(key); return true; } + bool ToNormalizedString(const SigningProvider& arg, std::string& ret, bool priv) const override + { + if (priv) return ToPrivateString(arg, ret); + ret = ToString(); + return true; + } bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const override { return arg.GetKey(m_pubkey.GetID(), key); @@ -386,6 +410,56 @@ public: } return true; } + bool ToNormalizedString(const SigningProvider& arg, std::string& out, bool priv) const override + { + // For hardened derivation type, just return the typical string, nothing to normalize + if (m_derive == DeriveType::HARDENED) { + if (priv) return ToPrivateString(arg, out); + out = ToString(); + return true; + } + // Step backwards to find the last hardened step in the path + int i = (int)m_path.size() - 1; + for (; i >= 0; --i) { + if (m_path.at(i) >> 31) { + break; + } + } + // Either no derivation or all unhardened derivation + if (i == -1) { + if (priv) return ToPrivateString(arg, out); + out = ToString(); + return true; + } + // Derive the xpub at the last hardened step + CExtKey xprv; + if (!GetExtKey(arg, xprv)) return false; + KeyOriginInfo origin; + int k = 0; + for (; k <= i; ++k) { + // Derive + xprv.Derive(xprv, m_path.at(k)); + // Add to the path + origin.path.push_back(m_path.at(k)); + // First derivation element, get the fingerprint for origin + if (k == 0) { + std::copy(xprv.vchFingerprint, xprv.vchFingerprint + 4, origin.fingerprint); + } + } + // Build the remaining path + KeyPath end_path; + for (; k < (int)m_path.size(); ++k) { + end_path.push_back(m_path.at(k)); + } + // Build the string + std::string origin_str = HexStr(origin.fingerprint) + FormatHDKeypath(origin.path); + out = "[" + origin_str + "]" + (priv ? EncodeExtKey(xprv) : EncodeExtPubKey(xprv.Neuter())) + FormatHDKeypath(end_path); + if (IsRange()) { + out += "/*"; + assert(m_derive == DeriveType::UNHARDENED); + } + return true; + } bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const override { CExtKey extkey; @@ -449,7 +523,7 @@ public: return false; } - bool ToStringHelper(const SigningProvider* arg, std::string& out, bool priv) const + bool ToStringHelper(const SigningProvider* arg, std::string& out, bool priv, bool normalized) const { std::string extra = ToStringExtra(); size_t pos = extra.size() > 0 ? 1 : 0; @@ -457,7 +531,9 @@ public: for (const auto& pubkey : m_pubkey_args) { if (pos++) ret += ","; std::string tmp; - if (priv) { + if (normalized) { + if (!pubkey->ToNormalizedString(*arg, tmp, priv)) return false; + } else if (priv) { if (!pubkey->ToPrivateString(*arg, tmp)) return false; } else { tmp = pubkey->ToString(); @@ -467,7 +543,7 @@ public: if (m_subdescriptor_arg) { if (pos++) ret += ","; std::string tmp; - if (!m_subdescriptor_arg->ToStringHelper(arg, tmp, priv)) return false; + if (!m_subdescriptor_arg->ToStringHelper(arg, tmp, priv, normalized)) return false; ret += std::move(tmp); } out = std::move(ret) + ")"; @@ -477,13 +553,20 @@ public: std::string ToString() const final { std::string ret; - ToStringHelper(nullptr, ret, false); + ToStringHelper(nullptr, ret, false, false); return AddChecksum(ret); } bool ToPrivateString(const SigningProvider& arg, std::string& out) const final { - bool ret = ToStringHelper(&arg, out, true); + bool ret = ToStringHelper(&arg, out, true, false); + out = AddChecksum(out); + return ret; + } + + bool ToNormalizedString(const SigningProvider& arg, std::string& out, bool priv) const override final + { + bool ret = ToStringHelper(&arg, out, priv, true); out = AddChecksum(out); return ret; } diff --git a/src/script/descriptor.h b/src/script/descriptor.h index 17b43e7c81..46d51fa587 100644 --- a/src/script/descriptor.h +++ b/src/script/descriptor.h @@ -93,6 +93,9 @@ struct Descriptor { /** Convert the descriptor to a private string. This fails if the provided provider does not have the relevant private keys. */ virtual bool ToPrivateString(const SigningProvider& provider, std::string& out) const = 0; + /** Convert the descriptor to a normalized string. Normalized descriptors have the xpub at the last hardened step. This fails if the provided provider does not have the private keys to derive that xpub. */ + virtual bool ToNormalizedString(const SigningProvider& provider, std::string& out, bool priv) const = 0; + /** Expand a descriptor at a specified position. * * @param[in] pos The position at which to expand the descriptor. If IsRange() is false, this is ignored. diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp index 2bfe206597..c6d898a25a 100644 --- a/src/script/sigcache.cpp +++ b/src/script/sigcache.cpp @@ -11,7 +11,11 @@ #include <util/system.h> #include <cuckoocache.h> -#include <boost/thread/shared_mutex.hpp> + +#include <algorithm> +#include <mutex> +#include <shared_mutex> +#include <vector> namespace { /** @@ -27,7 +31,7 @@ private: CSHA256 m_salted_hasher_schnorr; typedef CuckooCache::cache<uint256, SignatureCacheHasher> map_type; map_type setValid; - boost::shared_mutex cs_sigcache; + std::shared_mutex cs_sigcache; public: CSignatureCache() @@ -62,13 +66,13 @@ public: bool Get(const uint256& entry, const bool erase) { - boost::shared_lock<boost::shared_mutex> lock(cs_sigcache); + std::shared_lock<std::shared_mutex> lock(cs_sigcache); return setValid.contains(entry, erase); } void Set(const uint256& entry) { - boost::unique_lock<boost::shared_mutex> lock(cs_sigcache); + std::unique_lock<std::shared_mutex> lock(cs_sigcache); setValid.insert(entry); } uint32_t setup_bytes(size_t n) diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 8afbe9ebed..dba5ce621a 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -106,8 +106,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator std::vector<valtype> vSolutions; whichTypeRet = Solver(scriptPubKey, vSolutions); - switch (whichTypeRet) - { + switch (whichTypeRet) { case TxoutType::NONSTANDARD: case TxoutType::NULL_DATA: case TxoutType::WITNESS_UNKNOWN: @@ -173,10 +172,8 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator // Could not find witnessScript, add to missing sigdata.missing_witness_script = uint256(vSolutions[0]); return false; - - default: - return false; - } + } // no default case, so the compiler can warn about missing cases + assert(false); } static CScript PushAll(const std::vector<valtype>& values) diff --git a/src/script/standard.cpp b/src/script/standard.cpp index 7967c01858..4d882cd1f1 100644 --- a/src/script/standard.cpp +++ b/src/script/standard.cpp @@ -45,8 +45,7 @@ WitnessV0ScriptHash::WitnessV0ScriptHash(const CScript& in) std::string GetTxnOutputType(TxoutType t) { - switch (t) - { + switch (t) { case TxoutType::NONSTANDARD: return "nonstandard"; case TxoutType::PUBKEY: return "pubkey"; case TxoutType::PUBKEYHASH: return "pubkeyhash"; @@ -182,7 +181,8 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) std::vector<valtype> vSolutions; TxoutType whichType = Solver(scriptPubKey, vSolutions); - if (whichType == TxoutType::PUBKEY) { + switch (whichType) { + case TxoutType::PUBKEY: { CPubKey pubKey(vSolutions[0]); if (!pubKey.IsValid()) return false; @@ -190,26 +190,28 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) addressRet = PKHash(pubKey); return true; } - else if (whichType == TxoutType::PUBKEYHASH) - { + case TxoutType::PUBKEYHASH: { addressRet = PKHash(uint160(vSolutions[0])); return true; } - else if (whichType == TxoutType::SCRIPTHASH) - { + case TxoutType::SCRIPTHASH: { addressRet = ScriptHash(uint160(vSolutions[0])); return true; - } else if (whichType == TxoutType::WITNESS_V0_KEYHASH) { + } + case TxoutType::WITNESS_V0_KEYHASH: { WitnessV0KeyHash hash; std::copy(vSolutions[0].begin(), vSolutions[0].end(), hash.begin()); addressRet = hash; return true; - } else if (whichType == TxoutType::WITNESS_V0_SCRIPTHASH) { + } + case TxoutType::WITNESS_V0_SCRIPTHASH: { WitnessV0ScriptHash hash; std::copy(vSolutions[0].begin(), vSolutions[0].end(), hash.begin()); addressRet = hash; return true; - } else if (whichType == TxoutType::WITNESS_UNKNOWN || whichType == TxoutType::WITNESS_V1_TAPROOT) { + } + case TxoutType::WITNESS_UNKNOWN: + case TxoutType::WITNESS_V1_TAPROOT: { WitnessUnknown unk; unk.version = vSolutions[0][0]; std::copy(vSolutions[1].begin(), vSolutions[1].end(), unk.program); @@ -217,8 +219,13 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) addressRet = unk; return true; } - // Multisig txns have more than one address... - return false; + case TxoutType::MULTISIG: + // Multisig txns have more than one address... + case TxoutType::NULL_DATA: + case TxoutType::NONSTANDARD: + return false; + } // no default case, so the compiler can warn about missing cases + assert(false); } bool ExtractDestinations(const CScript& scriptPubKey, TxoutType& typeRet, std::vector<CTxDestination>& addressRet, int& nRequiredRet) diff --git a/src/streams.h b/src/streams.h index fce411d3df..e78da31cbc 100644 --- a/src/streams.h +++ b/src/streams.h @@ -6,17 +6,19 @@ #ifndef BITCOIN_STREAMS_H #define BITCOIN_STREAMS_H -#include <support/allocators/zeroafterfree.h> #include <serialize.h> +#include <span.h> +#include <support/allocators/zeroafterfree.h> #include <algorithm> #include <assert.h> #include <ios> #include <limits> +#include <optional> #include <stdint.h> #include <stdio.h> -#include <string> #include <string.h> +#include <string> #include <utility> #include <vector> @@ -202,14 +204,14 @@ public: class CDataStream { protected: - typedef CSerializeData vector_type; + using vector_type = SerializeData; vector_type vch; - unsigned int nReadPos; + unsigned int nReadPos{0}; int nType; int nVersion; -public: +public: typedef vector_type::allocator_type allocator_type; typedef vector_type::size_type size_type; typedef vector_type::difference_type difference_type; @@ -221,62 +223,22 @@ public: typedef vector_type::reverse_iterator reverse_iterator; explicit CDataStream(int nTypeIn, int nVersionIn) - { - Init(nTypeIn, nVersionIn); - } - - CDataStream(const_iterator pbegin, const_iterator pend, int nTypeIn, int nVersionIn) : vch(pbegin, pend) - { - Init(nTypeIn, nVersionIn); - } - - CDataStream(const char* pbegin, const char* pend, int nTypeIn, int nVersionIn) : vch(pbegin, pend) - { - Init(nTypeIn, nVersionIn); - } - - CDataStream(const vector_type& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end()) - { - Init(nTypeIn, nVersionIn); - } - - CDataStream(const std::vector<char>& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end()) - { - Init(nTypeIn, nVersionIn); - } + : nType{nTypeIn}, + nVersion{nVersionIn} {} - CDataStream(const std::vector<unsigned char>& vchIn, int nTypeIn, int nVersionIn) : vch(vchIn.begin(), vchIn.end()) - { - Init(nTypeIn, nVersionIn); - } + explicit CDataStream(Span<const uint8_t> sp, int nTypeIn, int nVersionIn) + : vch(sp.data(), sp.data() + sp.size()), + nType{nTypeIn}, + nVersion{nVersionIn} {} template <typename... Args> CDataStream(int nTypeIn, int nVersionIn, Args&&... args) + : nType{nTypeIn}, + nVersion{nVersionIn} { - Init(nTypeIn, nVersionIn); ::SerializeMany(*this, std::forward<Args>(args)...); } - void Init(int nTypeIn, int nVersionIn) - { - nReadPos = 0; - nType = nTypeIn; - nVersion = nVersionIn; - } - - CDataStream& operator+=(const CDataStream& b) - { - vch.insert(vch.end(), b.begin(), b.end()); - return *this; - } - - friend CDataStream operator+(const CDataStream& a, const CDataStream& b) - { - CDataStream ret = a; - ret += b; - return (ret); - } - std::string str() const { return (std::string(begin(), end())); @@ -297,12 +259,12 @@ public: const_reference operator[](size_type pos) const { return vch[pos + nReadPos]; } reference operator[](size_type pos) { return vch[pos + nReadPos]; } void clear() { vch.clear(); nReadPos = 0; } - iterator insert(iterator it, const char x=char()) { return vch.insert(it, x); } - void insert(iterator it, size_type n, const char x) { vch.insert(it, n, x); } + iterator insert(iterator it, const uint8_t x) { return vch.insert(it, x); } + void insert(iterator it, size_type n, const uint8_t x) { vch.insert(it, n, x); } value_type* data() { return vch.data() + nReadPos; } const value_type* data() const { return vch.data() + nReadPos; } - void insert(iterator it, std::vector<char>::const_iterator first, std::vector<char>::const_iterator last) + void insert(iterator it, std::vector<uint8_t>::const_iterator first, std::vector<uint8_t>::const_iterator last) { if (last == first) return; assert(last - first > 0); @@ -373,12 +335,17 @@ public: nReadPos = 0; } - bool Rewind(size_type n) + bool Rewind(std::optional<size_type> n = std::nullopt) { + // Total rewind if no size is passed + if (!n) { + nReadPos = 0; + return true; + } // Rewind by n characters if the buffer hasn't been compacted yet - if (n > nReadPos) + if (*n > nReadPos) return false; - nReadPos -= n; + nReadPos -= *n; return true; } @@ -462,11 +429,6 @@ public: return (*this); } - void GetAndClear(CSerializeData &d) { - d.insert(d.end(), begin(), end()); - clear(); - } - /** * XOR the contents of this stream with a certain key. * diff --git a/src/support/allocators/zeroafterfree.h b/src/support/allocators/zeroafterfree.h index c7ed5ef308..418f0ee656 100644 --- a/src/support/allocators/zeroafterfree.h +++ b/src/support/allocators/zeroafterfree.h @@ -42,7 +42,7 @@ struct zero_after_free_allocator : public std::allocator<T> { } }; -// Byte-vector that clears its contents before deletion. -typedef std::vector<char, zero_after_free_allocator<char> > CSerializeData; +/** Byte-vector that clears its contents before deletion. */ +using SerializeData = std::vector<uint8_t, zero_after_free_allocator<uint8_t>>; #endif // BITCOIN_SUPPORT_ALLOCATORS_ZEROAFTERFREE_H diff --git a/src/test/base32_tests.cpp b/src/test/base32_tests.cpp index 3f7c5e99ee..3b44564ddb 100644 --- a/src/test/base32_tests.cpp +++ b/src/test/base32_tests.cpp @@ -17,7 +17,7 @@ BOOST_AUTO_TEST_CASE(base32_testvectors) static const std::string vstrIn[] = {"","f","fo","foo","foob","fooba","foobar"}; static const std::string vstrOut[] = {"","my======","mzxq====","mzxw6===","mzxw6yq=","mzxw6ytb","mzxw6ytboi======"}; static const std::string vstrOutNoPadding[] = {"","my","mzxq","mzxw6","mzxw6yq","mzxw6ytb","mzxw6ytboi"}; - for (unsigned int i=0; i<sizeof(vstrIn)/sizeof(vstrIn[0]); i++) + for (unsigned int i=0; i<std::size(vstrIn); i++) { std::string strEnc = EncodeBase32(vstrIn[i]); BOOST_CHECK_EQUAL(strEnc, vstrOut[i]); diff --git a/src/test/base64_tests.cpp b/src/test/base64_tests.cpp index bb8d102bd0..714fccffaa 100644 --- a/src/test/base64_tests.cpp +++ b/src/test/base64_tests.cpp @@ -16,7 +16,7 @@ BOOST_AUTO_TEST_CASE(base64_testvectors) { static const std::string vstrIn[] = {"","f","fo","foo","foob","fooba","foobar"}; static const std::string vstrOut[] = {"","Zg==","Zm8=","Zm9v","Zm9vYg==","Zm9vYmE=","Zm9vYmFy"}; - for (unsigned int i=0; i<sizeof(vstrIn)/sizeof(vstrIn[0]); i++) + for (unsigned int i=0; i<std::size(vstrIn); i++) { std::string strEnc = EncodeBase64(vstrIn[i]); BOOST_CHECK_EQUAL(strEnc, vstrOut[i]); diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp index 00c4bdc14e..633a95ce96 100644 --- a/src/test/blockfilter_index_tests.cpp +++ b/src/test/blockfilter_index_tests.cpp @@ -178,7 +178,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) const CBlockIndex* block_index; { LOCK(cs_main); - block_index = LookupBlockIndex(block->GetHash()); + block_index = g_chainman.m_blockman.LookupBlockIndex(block->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); @@ -196,7 +196,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) const CBlockIndex* block_index; { LOCK(cs_main); - block_index = LookupBlockIndex(block->GetHash()); + block_index = g_chainman.m_blockman.LookupBlockIndex(block->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); @@ -210,7 +210,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) const CBlockIndex* block_index; { LOCK(cs_main); - block_index = LookupBlockIndex(block->GetHash()); + block_index = g_chainman.m_blockman.LookupBlockIndex(block->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); @@ -231,14 +231,14 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) { LOCK(cs_main); - block_index = LookupBlockIndex(chainA[i]->GetHash()); + block_index = g_chainman.m_blockman.LookupBlockIndex(chainA[i]->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); CheckFilterLookups(filter_index, block_index, chainA_last_header); { LOCK(cs_main); - block_index = LookupBlockIndex(chainB[i]->GetHash()); + block_index = g_chainman.m_blockman.LookupBlockIndex(chainB[i]->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); CheckFilterLookups(filter_index, block_index, chainB_last_header); diff --git a/src/test/bloom_tests.cpp b/src/test/bloom_tests.cpp index 736c260eeb..5a98558240 100644 --- a/src/test/bloom_tests.cpp +++ b/src/test/bloom_tests.cpp @@ -42,11 +42,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize) CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << filter; - std::vector<unsigned char> vch = ParseHex("03614e9b050000000000000001"); - std::vector<char> expected(vch.size()); - - for (unsigned int i = 0; i < vch.size(); i++) - expected[i] = (char)vch[i]; + std::vector<uint8_t> expected = ParseHex("03614e9b050000000000000001"); BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end()); @@ -72,11 +68,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_serialize_with_tweak) CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << filter; - std::vector<unsigned char> vch = ParseHex("03ce4299050000000100008001"); - std::vector<char> expected(vch.size()); - - for (unsigned int i = 0; i < vch.size(); i++) - expected[i] = (char)vch[i]; + std::vector<uint8_t> expected = ParseHex("03ce4299050000000100008001"); BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end()); } @@ -96,11 +88,7 @@ BOOST_AUTO_TEST_CASE(bloom_create_insert_key) CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); stream << filter; - std::vector<unsigned char> vch = ParseHex("038fc16b080000000000000001"); - std::vector<char> expected(vch.size()); - - for (unsigned int i = 0; i < vch.size(); i++) - expected[i] = (char)vch[i]; + std::vector<unsigned char> expected = ParseHex("038fc16b080000000000000001"); BOOST_CHECK_EQUAL_COLLECTIONS(stream.begin(), stream.end(), expected.begin(), expected.end()); } @@ -352,11 +340,7 @@ BOOST_AUTO_TEST_CASE(merkle_block_3_and_serialize) CDataStream merkleStream(SER_NETWORK, PROTOCOL_VERSION); merkleStream << merkleBlock; - std::vector<unsigned char> vch = ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f19630101"); - std::vector<char> expected(vch.size()); - - for (unsigned int i = 0; i < vch.size(); i++) - expected[i] = (char)vch[i]; + std::vector<uint8_t> expected = ParseHex("0100000079cda856b143d9db2c1caff01d1aecc8630d30625d10e8b4b8b0000000000000b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f196367291b4d4c86041b8fa45d630100000001b50cc069d6a3e33e3ff84a5c41d9d3febe7c770fdcc96b2c3ff60abe184f19630101"); BOOST_CHECK_EQUAL_COLLECTIONS(expected.begin(), expected.end(), merkleStream.begin(), merkleStream.end()); } diff --git a/src/test/checkqueue_tests.cpp b/src/test/checkqueue_tests.cpp index 76d6727044..21921375b3 100644 --- a/src/test/checkqueue_tests.cpp +++ b/src/test/checkqueue_tests.cpp @@ -10,7 +10,6 @@ #include <util/time.h> #include <boost/test/unit_test.hpp> -#include <boost/thread/thread.hpp> #include <atomic> #include <condition_variable> @@ -363,11 +362,11 @@ BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks) { auto queue = MakeUnique<Standard_Queue>(QUEUE_BATCH_SIZE); { - boost::thread_group tg; + std::vector<std::thread> tg; std::atomic<int> nThreads {0}; std::atomic<int> fails {0}; for (size_t i = 0; i < 3; ++i) { - tg.create_thread( + tg.emplace_back( [&]{ CCheckQueueControl<FakeCheck> control(queue.get()); // While sleeping, no other thread should execute to this point @@ -376,11 +375,13 @@ BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks) fails += observed != nThreads; }); } - tg.join_all(); + for (auto& thread: tg) { + if (thread.joinable()) thread.join(); + } BOOST_REQUIRE_EQUAL(fails, 0); } { - boost::thread_group tg; + std::vector<std::thread> tg; std::mutex m; std::condition_variable cv; bool has_lock{false}; @@ -389,7 +390,7 @@ BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks) bool done_ack{false}; { std::unique_lock<std::mutex> l(m); - tg.create_thread([&]{ + tg.emplace_back([&]{ CCheckQueueControl<FakeCheck> control(queue.get()); std::unique_lock<std::mutex> ll(m); has_lock = true; @@ -415,7 +416,9 @@ BOOST_AUTO_TEST_CASE(test_CheckQueueControl_Locks) cv.notify_one(); BOOST_REQUIRE(!fails); } - tg.join_all(); + for (auto& thread: tg) { + if (thread.joinable()) thread.join(); + } } } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp index 3a951d28ae..35b66cfc53 100644 --- a/src/test/cuckoocache_tests.cpp +++ b/src/test/cuckoocache_tests.cpp @@ -1,13 +1,18 @@ // Copyright (c) 2012-2020 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <boost/test/unit_test.hpp> #include <cuckoocache.h> -#include <deque> #include <random.h> #include <script/sigcache.h> #include <test/util/setup_common.h> + +#include <boost/test/unit_test.hpp> + +#include <deque> +#include <mutex> +#include <shared_mutex> #include <thread> +#include <vector> /** Test Suite for CuckooCache * @@ -199,11 +204,11 @@ static void test_cache_erase_parallel(size_t megabytes) * "future proofed". */ std::vector<uint256> hashes_insert_copy = hashes; - boost::shared_mutex mtx; + std::shared_mutex mtx; { /** Grab lock to make sure we release inserts */ - boost::unique_lock<boost::shared_mutex> l(mtx); + std::unique_lock<std::shared_mutex> l(mtx); /** Insert the first half */ for (uint32_t i = 0; i < (n_insert / 2); ++i) set.insert(hashes_insert_copy[i]); @@ -217,7 +222,7 @@ static void test_cache_erase_parallel(size_t megabytes) /** Each thread is emplaced with x copy-by-value */ threads.emplace_back([&, x] { - boost::shared_lock<boost::shared_mutex> l(mtx); + std::shared_lock<std::shared_mutex> l(mtx); size_t ntodo = (n_insert/4)/3; size_t start = ntodo*x; size_t end = ntodo*(x+1); @@ -232,7 +237,7 @@ static void test_cache_erase_parallel(size_t megabytes) for (std::thread& t : threads) t.join(); /** Grab lock to make sure we observe erases */ - boost::unique_lock<boost::shared_mutex> l(mtx); + std::unique_lock<std::shared_mutex> l(mtx); /** Insert the second half */ for (uint32_t i = (n_insert / 2); i < n_insert; ++i) set.insert(hashes_insert_copy[i]); diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index cf6009d591..0d480e35ea 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -85,7 +85,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) // Mock an outbound peer CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::OUTBOUND_FULL_RELAY); + CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr1, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false); dummyNode1.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); @@ -136,7 +136,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerManager &peerLogic, CConnmanTest* connman) { CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE); - vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr, 0, 0, CAddress(), "", ConnectionType::OUTBOUND_FULL_RELAY)); + vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false)); CNode &node = *vNodes.back(); node.SetCommonVersion(PROTOCOL_VERSION); @@ -229,7 +229,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) banman->ClearBanned(); CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, NODE_NETWORK, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::INBOUND); + CNode dummyNode1(id++, NODE_NETWORK, INVALID_SOCKET, addr1, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false); dummyNode1.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); dummyNode1.fSuccessfullyConnected = true; @@ -242,7 +242,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) BOOST_CHECK(!banman->IsDiscouraged(ip(0xa0b0c001|0x0000ff00))); // Different IP, not discouraged CAddress addr2(ip(0xa0b0c002), NODE_NONE); - CNode dummyNode2(id++, NODE_NETWORK, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", ConnectionType::INBOUND); + CNode dummyNode2(id++, NODE_NETWORK, INVALID_SOCKET, addr2, /* nKeyedNetGroupIn */ 1, /* nLocalHostNonceIn */ 1, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false); dummyNode2.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode2); dummyNode2.fSuccessfullyConnected = true; @@ -279,7 +279,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) SetMockTime(nStartTime); // Overrides future calls to GetTime() CAddress addr(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode(id++, NODE_NETWORK, INVALID_SOCKET, addr, 4, 4, CAddress(), "", ConnectionType::INBOUND); + CNode dummyNode(id++, NODE_NETWORK, INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 4, /* nLocalHostNonceIn */ 4, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false); dummyNode.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode); dummyNode.fSuccessfullyConnected = true; diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 20132d5782..acbd6a01ee 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -65,7 +65,7 @@ std::string UseHInsteadOfApostrophe(const std::string& desc) const std::set<std::vector<uint32_t>> ONLY_EMPTY{{}}; -void DoCheck(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, +void DoCheck(const std::string& prv, const std::string& pub, const std::string& norm_prv, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, bool replace_apostrophe_with_h_in_prv=false, bool replace_apostrophe_with_h_in_pub=false) { FlatSigningProvider keys_priv, keys_pub; @@ -112,6 +112,17 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st BOOST_CHECK(EqualDescriptor(prv, prv1)); BOOST_CHECK(!parse_pub->ToPrivateString(keys_pub, prv1)); + // Check that private can produce the normalized descriptors + std::string norm1; + BOOST_CHECK(parse_priv->ToNormalizedString(keys_priv, norm1, false)); + BOOST_CHECK(EqualDescriptor(norm1, norm_pub)); + BOOST_CHECK(parse_pub->ToNormalizedString(keys_priv, norm1, false)); + BOOST_CHECK(EqualDescriptor(norm1, norm_pub)); + BOOST_CHECK(parse_priv->ToNormalizedString(keys_priv, norm1, true)); + BOOST_CHECK(EqualDescriptor(norm1, norm_prv)); + BOOST_CHECK(parse_pub->ToNormalizedString(keys_priv, norm1, true)); + BOOST_CHECK(EqualDescriptor(norm1, norm_prv)); + // Check whether IsRange on both returns the expected result BOOST_CHECK_EQUAL(parse_pub->IsRange(), (flags & RANGE) != 0); BOOST_CHECK_EQUAL(parse_priv->IsRange(), (flags & RANGE) != 0); @@ -251,29 +262,29 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st BOOST_CHECK_MESSAGE(left_paths.empty(), "Not all expected key paths found: " + prv); } -void Check(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY) +void Check(const std::string& prv, const std::string& pub, const std::string& norm_prv, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY) { bool found_apostrophes_in_prv = false; bool found_apostrophes_in_pub = false; // Do not replace apostrophes with 'h' in prv and pub - DoCheck(prv, pub, flags, scripts, type, paths); + DoCheck(prv, pub, norm_prv, norm_pub, flags, scripts, type, paths); // Replace apostrophes with 'h' in prv but not in pub, if apostrophes are found in prv if (prv.find('\'') != std::string::npos) { found_apostrophes_in_prv = true; - DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false); + DoCheck(prv, pub, norm_prv, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false); } // Replace apostrophes with 'h' in pub but not in prv, if apostrophes are found in pub if (pub.find('\'') != std::string::npos) { found_apostrophes_in_pub = true; - DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true); + DoCheck(prv, pub, norm_prv, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true); } // Replace apostrophes with 'h' both in prv and in pub, if apostrophes are found in both if (found_apostrophes_in_prv && found_apostrophes_in_pub) { - DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true); + DoCheck(prv, pub, norm_prv, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true); } } @@ -284,50 +295,51 @@ BOOST_FIXTURE_TEST_SUITE(descriptor_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(descriptor_test) { // Basic single-key compressed - Check("combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac","76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac","00149a1c78a507689f6f54b847ad1cef1e614ee23f1e","a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, nullopt); - Check("pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac"}}, nullopt); - Check("pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac"}}, OutputType::LEGACY, {{1,0x80000002UL,3,0x80000004UL}}); - Check("wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"00149a1c78a507689f6f54b847ad1cef1e614ee23f1e"}}, OutputType::BECH32); - Check("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, OutputType::P2SH_SEGWIT); + Check("combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac","76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac","00149a1c78a507689f6f54b847ad1cef1e614ee23f1e","a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, nullopt); + Check("pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac"}}, nullopt); + Check("pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac"}}, OutputType::LEGACY, {{1,0x80000002UL,3,0x80000004UL}}); + Check("wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"00149a1c78a507689f6f54b847ad1cef1e614ee23f1e"}}, OutputType::BECH32); + Check("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, OutputType::P2SH_SEGWIT); CheckUnparsable("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY2))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5))", "Pubkey '03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5' is invalid"); // Invalid pubkey CheckUnparsable("pkh(deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh(deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "Key origin start '[ character expected but not found, got 'd' instead"); // Missing start bracket in key origin CheckUnparsable("pkh([deadbeef]/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef]/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "Multiple ']' characters found for a single pubkey"); // Multiple end brackets in key origin // Basic single-key uncompressed - Check("combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac","76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, nullopt); - Check("pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac"}}, nullopt); - Check("pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, OutputType::LEGACY); + Check("combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)",SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac","76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, nullopt); + Check("pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac"}}, nullopt); + Check("pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, OutputType::LEGACY); CheckUnparsable("wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "Uncompressed keys are not allowed"); // No uncompressed keys in witness CheckUnparsable("wsh(pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "wsh(pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "Uncompressed keys are not allowed"); // No uncompressed keys in witness CheckUnparsable("sh(wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "sh(wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "Uncompressed keys are not allowed"); // No uncompressed keys in witness // Some unconventional single-key constructions - Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}}, OutputType::LEGACY); - Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}}, OutputType::LEGACY); - Check("wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa"}}, OutputType::BECH32); - Check("wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b"}}, OutputType::BECH32); - Check("sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a91472d0c5a3bfad8c3e7bd5303a72b94240e80b6f1787"}}, OutputType::P2SH_SEGWIT); - Check("sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a914b61b92e2ca21bac1e72a3ab859a742982bea960a87"}}, OutputType::P2SH_SEGWIT); + Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}}, OutputType::LEGACY); + Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}}, OutputType::LEGACY); + Check("wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa"}}, OutputType::BECH32); + Check("wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b"}}, OutputType::BECH32); + Check("sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", "sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a91472d0c5a3bfad8c3e7bd5303a72b94240e80b6f1787"}}, OutputType::P2SH_SEGWIT); + Check("sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", "sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a914b61b92e2ca21bac1e72a3ab859a742982bea960a87"}}, OutputType::P2SH_SEGWIT); // Versions with BIP32 derivations - Check("combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", SIGNABLE, {{"2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac","76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac","001431a507b815593dfc51ffc7245ae7e5aee304246e","a9142aafb926eb247cb18240a7f4c07983ad1f37922687"}}, nullopt); - Check("pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", DEFAULT, {{"210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac"}}, nullopt, {{0}}); - Check("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, OutputType::LEGACY, {{0xFFFFFFFFUL,0}}); - Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, OutputType::BECH32, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}}); - Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED | DERIVE_HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, OutputType::P2SH_SEGWIT, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); - Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, nullopt, {{0}, {1}}); + Check("combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", "combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", SIGNABLE, {{"2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac","76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac","001431a507b815593dfc51ffc7245ae7e5aee304246e","a9142aafb926eb247cb18240a7f4c07983ad1f37922687"}}, nullopt); + Check("pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", "pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", DEFAULT, {{"210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac"}}, nullopt, {{0}}); + Check("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", "pkh([bd16bee5/2147483647']xprv9vHkqa6XAPwKqSKSEJMcAB3yoCZhaSVsGZbSkFY5L3Lfjjk8sjZucbsbvEw5o3QrSA69nPfZDCgFnNnLhQ2ohpZuwummndnPasDw2Qr6dC2/0)", "pkh([bd16bee5/2147483647']xpub69H7F5dQzmVd3vPuLKtcXJziMEQByuDidnX3YdwgtNsecY5HRGtAAQC5mXTt4dsv9RzyjgDjAQs9VGVV6ydYCHnprc9vvaA5YtqWyL6hyds/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, OutputType::LEGACY, {{0xFFFFFFFFUL,0}}); + Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", "wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, OutputType::BECH32, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}}); + Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED | DERIVE_HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, OutputType::P2SH_SEGWIT, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); + Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", "combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, nullopt, {{0}, {1}}); CheckUnparsable("combo([012345678]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([012345678]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", "Fingerprint is not 4 bytes (9 characters instead of 8 characters)"); // Too long key fingerprint CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483648)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483648)", "Key path value 2147483648 is out of range"); // BIP 32 path element overflow CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/1aa)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1aa)", "Key path value '1aa' is not a valid uint32"); // Path is not valid uint + Check("pkh([01234567/10/20]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh([01234567/10/20]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", "pkh([01234567/10/20/2147483647']xprv9vHkqa6XAPwKqSKSEJMcAB3yoCZhaSVsGZbSkFY5L3Lfjjk8sjZucbsbvEw5o3QrSA69nPfZDCgFnNnLhQ2ohpZuwummndnPasDw2Qr6dC2/0)", "pkh([01234567/10/20/2147483647']xpub69H7F5dQzmVd3vPuLKtcXJziMEQByuDidnX3YdwgtNsecY5HRGtAAQC5mXTt4dsv9RzyjgDjAQs9VGVV6ydYCHnprc9vvaA5YtqWyL6hyds/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, OutputType::LEGACY, {{10, 20, 0xFFFFFFFFUL, 0}}); // Multisig constructions - Check("multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); - Check("sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); - Check("sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); - Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); - Check("sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", RANGE, {{"5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae"}, {"52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae"}, {"5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae"}}, nullopt, {{0}, {1}, {2}, {0, 0, 0}, {0, 0, 1}, {0, 0, 2}}); - Check("wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", HARDENED | RANGE | DERIVE_HARDENED, {{"0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f"},{"002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203"},{"0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c"}}, OutputType::BECH32, {{0xFFFFFFFFUL,0}, {1,2,0}, {1,2,1}, {1,2,2}, {10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); - Check("sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", SIGNABLE, {{"a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87"}}, OutputType::P2SH_SEGWIT); + Check("multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); + Check("sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); + Check("sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); + Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", "sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); + Check("sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", "sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", RANGE, {{"5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae"}, {"52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae"}, {"5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae"}}, nullopt, {{0}, {1}, {2}, {0, 0, 0}, {0, 0, 1}, {0, 0, 2}}); + Check("wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "wsh(multi(2,[bd16bee5/2147483647']xprv9vHkqa6XAPwKqSKSEJMcAB3yoCZhaSVsGZbSkFY5L3Lfjjk8sjZucbsbvEw5o3QrSA69nPfZDCgFnNnLhQ2ohpZuwummndnPasDw2Qr6dC2/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[bd16bee5/2147483647']xpub69H7F5dQzmVd3vPuLKtcXJziMEQByuDidnX3YdwgtNsecY5HRGtAAQC5mXTt4dsv9RzyjgDjAQs9VGVV6ydYCHnprc9vvaA5YtqWyL6hyds/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", HARDENED | RANGE | DERIVE_HARDENED, {{"0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f"},{"002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203"},{"0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c"}}, OutputType::BECH32, {{0xFFFFFFFFUL,0}, {1,2,0}, {1,2,1}, {1,2,2}, {10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); + Check("sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", "sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", SIGNABLE, {{"a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87"}}, OutputType::P2SH_SEGWIT); CheckUnparsable("sh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9))","sh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232))", "P2SH script is too large, 547 bytes is larger than 520 bytes"); // P2SH does not fit 16 compressed pubkeys in a redeemscript CheckUnparsable("wsh(multi(2,[aaaaaaaa][aaaaaaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[aaaaaaaa][aaaaaaaa]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "Multiple ']' characters found for a single pubkey"); // Double key origin descriptor CheckUnparsable("wsh(multi(2,[aaaagaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[aaagaaaa]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "Fingerprint 'aaagaaaa' is not hex"); // Non hex fingerprint @@ -350,8 +362,8 @@ BOOST_AUTO_TEST_CASE(descriptor_test) CheckUnparsable("wsh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "wsh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", "Cannot have wsh within wsh"); // Cannot embed P2WSH inside P2WSH // Checksums - Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); - Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); + Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", "sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); + Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", "sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#", "Expected 8 character checksum, not 0 characters"); // Empty checksum CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfyq", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5tq", "Expected 8 character checksum, not 9 characters"); // Too long checksum CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxf", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5", "Expected 8 character checksum, not 7 characters"); // Too short checksum diff --git a/src/test/fs_tests.cpp b/src/test/fs_tests.cpp index ec487aa3ff..e52cd5230c 100644 --- a/src/test/fs_tests.cpp +++ b/src/test/fs_tests.cpp @@ -5,6 +5,7 @@ #include <fs.h> #include <test/util/setup_common.h> #include <util/system.h> +#include <util/getuniquepath.h> #include <boost/test/unit_test.hpp> @@ -69,6 +70,21 @@ BOOST_AUTO_TEST_CASE(fsbridge_fstream) BOOST_CHECK_EQUAL(tmpfile1, fsbridge::AbsPathJoin(tmpfile1, "")); BOOST_CHECK_EQUAL(tmpfile1, fsbridge::AbsPathJoin(tmpfile1, {})); } + { + fs::path p1 = GetUniquePath(tmpfolder); + fs::path p2 = GetUniquePath(tmpfolder); + fs::path p3 = GetUniquePath(tmpfolder); + + // Ensure that the parent path is always the same. + BOOST_CHECK_EQUAL(tmpfolder, p1.parent_path()); + BOOST_CHECK_EQUAL(tmpfolder, p2.parent_path()); + BOOST_CHECK_EQUAL(tmpfolder, p3.parent_path()); + + // Ensure that generated paths are actually different. + BOOST_CHECK(p1 != p2); + BOOST_CHECK(p2 != p3); + BOOST_CHECK(p1 != p3); + } } -BOOST_AUTO_TEST_SUITE_END() +BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp index c2bb3a1a4e..17ac48fca7 100644 --- a/src/test/fuzz/crypto.cpp +++ b/src/test/fuzz/crypto.cpp @@ -4,7 +4,6 @@ #include <crypto/hmac_sha256.h> #include <crypto/hmac_sha512.h> -#include <crypto/muhash.h> #include <crypto/ripemd160.h> #include <crypto/sha1.h> #include <crypto/sha256.h> @@ -36,7 +35,6 @@ FUZZ_TARGET(crypto) CSHA512 sha512; SHA3_256 sha3; CSipHasher sip_hasher{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>()}; - MuHash3072 muhash; while (fuzzed_data_provider.ConsumeBool()) { CallOneOf( @@ -63,12 +61,6 @@ FUZZ_TARGET(crypto) (void)Hash(data); (void)Hash160(data); (void)sha512.Size(); - - if (fuzzed_data_provider.ConsumeBool()) { - muhash *= MuHash3072(data); - } else { - muhash /= MuHash3072(data); - } }, [&] { (void)hash160.Reset(); @@ -78,7 +70,6 @@ FUZZ_TARGET(crypto) (void)sha256.Reset(); (void)sha3.Reset(); (void)sha512.Reset(); - muhash = MuHash3072(); }, [&] { CallOneOf( @@ -122,10 +113,6 @@ FUZZ_TARGET(crypto) [&] { data.resize(SHA3_256::OUTPUT_SIZE); sha3.Finalize(data); - }, - [&] { - uint256 out; - muhash.Finalize(out); }); }); } diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 74dec6475e..ba5f0c1a75 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -30,8 +30,6 @@ #include <stdint.h> #include <unistd.h> -#include <vector> - #include <test/fuzz/fuzz.h> void initialize_deserialize() @@ -71,7 +69,7 @@ T Deserialize(CDataStream ds) } template <typename T> -void DeserializeFromFuzzingInput(const std::vector<uint8_t>& buffer, T& obj, const Optional<int> protocol_version = nullopt) +void DeserializeFromFuzzingInput(FuzzBufferType buffer, T& obj, const Optional<int> protocol_version = nullopt) { CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION); if (protocol_version) { diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp index fd87667755..edb270d437 100644 --- a/src/test/fuzz/fuzz.cpp +++ b/src/test/fuzz/fuzz.cpp @@ -13,15 +13,15 @@ const std::function<void(const std::string&)> G_TEST_LOG_FUN{}; -std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize>>& FuzzTargets() +std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize, TypeHidden>>& FuzzTargets() { - static std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize>> g_fuzz_targets; + static std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize, TypeHidden>> g_fuzz_targets; return g_fuzz_targets; } -void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, TypeInitialize init) +void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, TypeInitialize init, TypeHidden hidden) { - const auto it_ins = FuzzTargets().try_emplace(name, std::move(target), std::move(init)); + const auto it_ins = FuzzTargets().try_emplace(name, std::move(target), std::move(init), hidden); Assert(it_ins.second); } @@ -31,6 +31,7 @@ void initialize() { if (std::getenv("PRINT_ALL_FUZZ_TARGETS_AND_ABORT")) { for (const auto& t : FuzzTargets()) { + if (std::get<2>(t.second)) continue; std::cout << t.first << std::endl; } Assert(false); @@ -59,8 +60,7 @@ static bool read_stdin(std::vector<uint8_t>& data) extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { static const auto& test_one_input = *Assert(g_test_one_input); - const std::vector<uint8_t> input(data, data + size); - test_one_input(input); + test_one_input({data, size}); return 0; } @@ -72,7 +72,7 @@ extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) } #if defined(PROVIDE_MAIN_FUNCTION) -__attribute__((weak)) int main(int argc, char** argv) +int main(int argc, char** argv) { initialize(); static const auto& test_one_input = *Assert(g_test_one_input); diff --git a/src/test/fuzz/fuzz.h b/src/test/fuzz/fuzz.h index 52841e069a..4abc52c15a 100644 --- a/src/test/fuzz/fuzz.h +++ b/src/test/fuzz/fuzz.h @@ -5,29 +5,36 @@ #ifndef BITCOIN_TEST_FUZZ_FUZZ_H #define BITCOIN_TEST_FUZZ_FUZZ_H +#include <span.h> + #include <cstdint> #include <functional> #include <string_view> -#include <vector> -using TypeTestOneInput = std::function<void(const std::vector<uint8_t>&)>; +using FuzzBufferType = Span<const uint8_t>; + +using TypeTestOneInput = std::function<void(FuzzBufferType)>; using TypeInitialize = std::function<void()>; +using TypeHidden = bool; -void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, TypeInitialize init); +void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, TypeInitialize init, TypeHidden hidden); -inline void FuzzFrameworkEmptyFun() {} +inline void FuzzFrameworkEmptyInitFun() {} #define FUZZ_TARGET(name) \ - FUZZ_TARGET_INIT(name, FuzzFrameworkEmptyFun) - -#define FUZZ_TARGET_INIT(name, init_fun) \ - void name##_fuzz_target(const std::vector<uint8_t>&); \ - struct name##_Before_Main { \ - name##_Before_Main() \ - { \ - FuzzFrameworkRegisterTarget(#name, name##_fuzz_target, init_fun); \ - } \ - } const static g_##name##_before_main; \ - void name##_fuzz_target(const std::vector<uint8_t>& buffer) + FUZZ_TARGET_INIT(name, FuzzFrameworkEmptyInitFun) + +#define FUZZ_TARGET_INIT(name, init_fun) \ + FUZZ_TARGET_INIT_HIDDEN(name, init_fun, false) + +#define FUZZ_TARGET_INIT_HIDDEN(name, init_fun, hidden) \ + void name##_fuzz_target(FuzzBufferType); \ + struct name##_Before_Main { \ + name##_Before_Main() \ + { \ + FuzzFrameworkRegisterTarget(#name, name##_fuzz_target, init_fun, hidden); \ + } \ + } const static g_##name##_before_main; \ + void name##_fuzz_target(FuzzBufferType buffer) #endif // BITCOIN_TEST_FUZZ_FUZZ_H diff --git a/src/test/fuzz/load_external_block_file.cpp b/src/test/fuzz/load_external_block_file.cpp index 207ee586bc..95597bf082 100644 --- a/src/test/fuzz/load_external_block_file.cpp +++ b/src/test/fuzz/load_external_block_file.cpp @@ -27,5 +27,5 @@ FUZZ_TARGET_INIT(load_external_block_file, initialize_load_external_block_file) return; } FlatFilePos flat_file_pos; - LoadExternalBlockFile(Params(), fuzzed_block_file, fuzzed_data_provider.ConsumeBool() ? &flat_file_pos : nullptr); + ::ChainstateActive().LoadExternalBlockFile(Params(), fuzzed_block_file, fuzzed_data_provider.ConsumeBool() ? &flat_file_pos : nullptr); } diff --git a/src/test/fuzz/muhash.cpp b/src/test/fuzz/muhash.cpp index 8f843ca773..2d761cef15 100644 --- a/src/test/fuzz/muhash.cpp +++ b/src/test/fuzz/muhash.cpp @@ -41,6 +41,11 @@ FUZZ_TARGET(muhash) muhash.Finalize(out2); assert(out == out2); + MuHash3072 muhash3; + muhash3 *= muhash; + uint256 out3; + muhash3.Finalize(out3); + assert(out == out3); // Test that removing all added elements brings the object back to it's initial state muhash /= muhash; @@ -50,4 +55,9 @@ FUZZ_TARGET(muhash) muhash2.Finalize(out2); assert(out == out2); + + muhash3.Remove(data); + muhash3.Remove(data2); + muhash3.Finalize(out3); + assert(out == out3); } diff --git a/src/test/fuzz/netaddress.cpp b/src/test/fuzz/netaddress.cpp index 6e9bb47ff6..a42080eb66 100644 --- a/src/test/fuzz/netaddress.cpp +++ b/src/test/fuzz/netaddress.cpp @@ -9,7 +9,6 @@ #include <cassert> #include <cstdint> -#include <netinet/in.h> #include <vector> FUZZ_TARGET(netaddress) diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index e7cc0f5297..442e32d4ca 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -30,14 +30,32 @@ #include <iostream> #include <memory> #include <string> -#include <vector> namespace { const TestingSetup* g_setup; } // namespace +size_t& GetNumMsgTypes() +{ + static size_t g_num_msg_types{0}; + return g_num_msg_types; +} +#define FUZZ_TARGET_MSG(msg_type) \ + struct msg_type##_Count_Before_Main { \ + msg_type##_Count_Before_Main() \ + { \ + ++GetNumMsgTypes(); \ + } \ + } const static g_##msg_type##_count_before_main; \ + FUZZ_TARGET_INIT(process_message_##msg_type, initialize_process_message) \ + { \ + fuzz_target(buffer, #msg_type); \ + } + void initialize_process_message() { + Assert(GetNumMsgTypes() == getAllNetMessageTypes().size()); // If this fails, add or remove the message type below + static const auto testing_setup = MakeFuzzingContext<const TestingSetup>(); g_setup = testing_setup.get(); for (int i = 0; i < 2 * COINBASE_MATURITY; i++) { @@ -46,7 +64,7 @@ void initialize_process_message() SyncWithValidationInterfaceQueue(); } -void fuzz_target(const std::vector<uint8_t>& buffer, const std::string& LIMIT_TO_MESSAGE_TYPE) +void fuzz_target(FuzzBufferType buffer, const std::string& LIMIT_TO_MESSAGE_TYPE) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); @@ -61,7 +79,7 @@ void fuzz_target(const std::vector<uint8_t>& buffer, const std::string& LIMIT_TO } CNode& p2p_node = *ConsumeNodeAsUniquePtr(fuzzed_data_provider).release(); - const bool successfully_connected{true}; + const bool successfully_connected{fuzzed_data_provider.ConsumeBool()}; p2p_node.fSuccessfullyConnected = successfully_connected; connman.AddTestNode(p2p_node); g_setup->m_node.peerman->InitializeNode(&p2p_node); @@ -87,27 +105,37 @@ void fuzz_target(const std::vector<uint8_t>& buffer, const std::string& LIMIT_TO } FUZZ_TARGET_INIT(process_message, initialize_process_message) { fuzz_target(buffer, ""); } -FUZZ_TARGET_INIT(process_message_addr, initialize_process_message) { fuzz_target(buffer, "addr"); } -FUZZ_TARGET_INIT(process_message_block, initialize_process_message) { fuzz_target(buffer, "block"); } -FUZZ_TARGET_INIT(process_message_blocktxn, initialize_process_message) { fuzz_target(buffer, "blocktxn"); } -FUZZ_TARGET_INIT(process_message_cmpctblock, initialize_process_message) { fuzz_target(buffer, "cmpctblock"); } -FUZZ_TARGET_INIT(process_message_feefilter, initialize_process_message) { fuzz_target(buffer, "feefilter"); } -FUZZ_TARGET_INIT(process_message_filteradd, initialize_process_message) { fuzz_target(buffer, "filteradd"); } -FUZZ_TARGET_INIT(process_message_filterclear, initialize_process_message) { fuzz_target(buffer, "filterclear"); } -FUZZ_TARGET_INIT(process_message_filterload, initialize_process_message) { fuzz_target(buffer, "filterload"); } -FUZZ_TARGET_INIT(process_message_getaddr, initialize_process_message) { fuzz_target(buffer, "getaddr"); } -FUZZ_TARGET_INIT(process_message_getblocks, initialize_process_message) { fuzz_target(buffer, "getblocks"); } -FUZZ_TARGET_INIT(process_message_getblocktxn, initialize_process_message) { fuzz_target(buffer, "getblocktxn"); } -FUZZ_TARGET_INIT(process_message_getdata, initialize_process_message) { fuzz_target(buffer, "getdata"); } -FUZZ_TARGET_INIT(process_message_getheaders, initialize_process_message) { fuzz_target(buffer, "getheaders"); } -FUZZ_TARGET_INIT(process_message_headers, initialize_process_message) { fuzz_target(buffer, "headers"); } -FUZZ_TARGET_INIT(process_message_inv, initialize_process_message) { fuzz_target(buffer, "inv"); } -FUZZ_TARGET_INIT(process_message_mempool, initialize_process_message) { fuzz_target(buffer, "mempool"); } -FUZZ_TARGET_INIT(process_message_notfound, initialize_process_message) { fuzz_target(buffer, "notfound"); } -FUZZ_TARGET_INIT(process_message_ping, initialize_process_message) { fuzz_target(buffer, "ping"); } -FUZZ_TARGET_INIT(process_message_pong, initialize_process_message) { fuzz_target(buffer, "pong"); } -FUZZ_TARGET_INIT(process_message_sendcmpct, initialize_process_message) { fuzz_target(buffer, "sendcmpct"); } -FUZZ_TARGET_INIT(process_message_sendheaders, initialize_process_message) { fuzz_target(buffer, "sendheaders"); } -FUZZ_TARGET_INIT(process_message_tx, initialize_process_message) { fuzz_target(buffer, "tx"); } -FUZZ_TARGET_INIT(process_message_verack, initialize_process_message) { fuzz_target(buffer, "verack"); } -FUZZ_TARGET_INIT(process_message_version, initialize_process_message) { fuzz_target(buffer, "version"); } +FUZZ_TARGET_MSG(addr); +FUZZ_TARGET_MSG(addrv2); +FUZZ_TARGET_MSG(block); +FUZZ_TARGET_MSG(blocktxn); +FUZZ_TARGET_MSG(cfcheckpt); +FUZZ_TARGET_MSG(cfheaders); +FUZZ_TARGET_MSG(cfilter); +FUZZ_TARGET_MSG(cmpctblock); +FUZZ_TARGET_MSG(feefilter); +FUZZ_TARGET_MSG(filteradd); +FUZZ_TARGET_MSG(filterclear); +FUZZ_TARGET_MSG(filterload); +FUZZ_TARGET_MSG(getaddr); +FUZZ_TARGET_MSG(getblocks); +FUZZ_TARGET_MSG(getblocktxn); +FUZZ_TARGET_MSG(getcfcheckpt); +FUZZ_TARGET_MSG(getcfheaders); +FUZZ_TARGET_MSG(getcfilters); +FUZZ_TARGET_MSG(getdata); +FUZZ_TARGET_MSG(getheaders); +FUZZ_TARGET_MSG(headers); +FUZZ_TARGET_MSG(inv); +FUZZ_TARGET_MSG(mempool); +FUZZ_TARGET_MSG(merkleblock); +FUZZ_TARGET_MSG(notfound); +FUZZ_TARGET_MSG(ping); +FUZZ_TARGET_MSG(pong); +FUZZ_TARGET_MSG(sendaddrv2); +FUZZ_TARGET_MSG(sendcmpct); +FUZZ_TARGET_MSG(sendheaders); +FUZZ_TARGET_MSG(tx); +FUZZ_TARGET_MSG(verack); +FUZZ_TARGET_MSG(version); +FUZZ_TARGET_MSG(wtxidrelay); diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp index 810f0aac92..ef45196671 100644 --- a/src/test/fuzz/process_messages.cpp +++ b/src/test/fuzz/process_messages.cpp @@ -46,7 +46,7 @@ FUZZ_TARGET_INIT(process_messages, initialize_process_messages) peers.push_back(ConsumeNodeAsUniquePtr(fuzzed_data_provider, i).release()); CNode& p2p_node = *peers.back(); - const bool successfully_connected{true}; + const bool successfully_connected{fuzzed_data_provider.ConsumeBool()}; p2p_node.fSuccessfullyConnected = successfully_connected; p2p_node.fPauseSend = false; g_setup->m_node.peerman->InitializeNode(&p2p_node); diff --git a/src/test/fuzz/script_assets_test_minimizer.cpp b/src/test/fuzz/script_assets_test_minimizer.cpp index 2091ad5d91..8d9a939dab 100644 --- a/src/test/fuzz/script_assets_test_minimizer.cpp +++ b/src/test/fuzz/script_assets_test_minimizer.cpp @@ -28,12 +28,12 @@ // // (normal build) // $ mkdir dump -// $ for N in $(seq 1 10); do TEST_DUMP_DIR=dump test/functional/feature_taproot --dumptests; done +// $ for N in $(seq 1 10); do TEST_DUMP_DIR=dump test/functional/feature_taproot.py --dumptests; done // $ ... // -// (fuzz test build) +// (libFuzzer build) // $ mkdir dump-min -// $ ./src/test/fuzz/script_assets_test_minimizer -merge=1 dump-min/ dump/ +// $ FUZZ=script_assets_test_minimizer ./src/test/fuzz/fuzz -merge=1 -use_value_profile=1 dump-min/ dump/ // $ (echo -en '[\n'; cat dump-min/* | head -c -2; echo -en '\n]') >script_assets_test.json namespace { @@ -190,7 +190,7 @@ ECCVerifyHandle handle; } // namespace -FUZZ_TARGET(script_assets_test_minimizer) +FUZZ_TARGET_INIT_HIDDEN(script_assets_test_minimizer, FuzzFrameworkEmptyInitFun, /* hidden */ true) { if (buffer.size() < 2 || buffer.back() != '\n' || buffer[buffer.size() - 2] != ',') return; const std::string str((const char*)buffer.data(), buffer.size() - 2); diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp index 282a2cd8ca..ec8a3b23db 100644 --- a/src/test/fuzz/string.cpp +++ b/src/test/fuzz/string.cpp @@ -67,6 +67,7 @@ FUZZ_TARGET(string) } OutputType output_type; (void)ParseOutputType(random_string_1, output_type); + (void)RemovePrefix(random_string_1, random_string_2); (void)ResolveErrMsg(random_string_1, random_string_2); try { (void)RPCConvertNamedValues(random_string_1, random_string_vector); @@ -78,7 +79,9 @@ FUZZ_TARGET(string) } (void)SanitizeString(random_string_1); (void)SanitizeString(random_string_1, fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 3)); +#ifndef WIN32 (void)ShellEscape(random_string_1); +#endif // WIN32 int port_out; std::string host_out; SplitHostPort(random_string_1, port_out, host_out); diff --git a/src/test/fuzz/system.cpp b/src/test/fuzz/system.cpp index 47b38b6d23..3621702e45 100644 --- a/src/test/fuzz/system.cpp +++ b/src/test/fuzz/system.cpp @@ -54,7 +54,7 @@ FUZZ_TARGET(system) if (args_manager.GetArgFlags(argument_name) != nullopt) { return; } - args_manager.AddArg(argument_name, fuzzed_data_provider.ConsumeRandomLengthString(16), fuzzed_data_provider.ConsumeIntegral<unsigned int>(), options_category); + args_manager.AddArg(argument_name, fuzzed_data_provider.ConsumeRandomLengthString(16), fuzzed_data_provider.ConsumeIntegral<unsigned int>() & ~ArgsManager::COMMAND, options_category); }, [&] { // Avoid hitting: diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h index e48771efa2..7a2dcfe84a 100644 --- a/src/test/fuzz/util.h +++ b/src/test/fuzz/util.h @@ -61,7 +61,7 @@ void CallOneOf(FuzzedDataProvider& fuzzed_data_provider, Callables... callables) [[nodiscard]] inline CDataStream ConsumeDataStream(FuzzedDataProvider& fuzzed_data_provider, const size_t max_length = 4096) noexcept { - return {ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length), SER_NETWORK, INIT_PROTO_VERSION}; + return CDataStream{ConsumeRandomLengthByteVector(fuzzed_data_provider, max_length), SER_NETWORK, INIT_PROTO_VERSION}; } [[nodiscard]] inline std::vector<std::string> ConsumeRandomLengthStringVector(FuzzedDataProvider& fuzzed_data_provider, const size_t max_vector_size = 16, const size_t max_string_length = 16) noexcept diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp index 87f6470afa..41a626c0ea 100644 --- a/src/test/hash_tests.cpp +++ b/src/test/hash_tests.cpp @@ -107,14 +107,14 @@ BOOST_AUTO_TEST_CASE(siphash) // Check test vectors from spec, one byte at a time CSipHasher hasher2(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL); - for (uint8_t x=0; x<ARRAYLEN(siphash_4_2_testvec); ++x) + for (uint8_t x=0; x<std::size(siphash_4_2_testvec); ++x) { BOOST_CHECK_EQUAL(hasher2.Finalize(), siphash_4_2_testvec[x]); hasher2.Write(&x, 1); } // Check test vectors from spec, eight bytes at a time CSipHasher hasher3(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL); - for (uint8_t x=0; x<ARRAYLEN(siphash_4_2_testvec); x+=8) + for (uint8_t x=0; x<std::size(siphash_4_2_testvec); x+=8) { BOOST_CHECK_EQUAL(hasher3.Finalize(), siphash_4_2_testvec[x]); hasher3.Write(uint64_t(x)|(uint64_t(x+1)<<8)|(uint64_t(x+2)<<16)|(uint64_t(x+3)<<24)| diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index e967273636..aa628371e6 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -28,7 +28,7 @@ struct MinerTestingSetup : public TestingSetup { void TestPackageSelection(const CChainParams& chainparams, const CScript& scriptPubKey, const std::vector<CTransactionRef>& txFirst) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_node.mempool->cs); bool TestSequenceLocks(const CTransaction& tx, int flags) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_node.mempool->cs) { - return CheckSequenceLocks(*m_node.mempool, tx, flags); + return CheckSequenceLocks(::ChainstateActive(), *m_node.mempool, tx, flags); } BlockAssembler AssemblerForTest(const CChainParams& params); }; @@ -123,6 +123,7 @@ void MinerTestingSetup::TestPackageSelection(const CChainParams& chainparams, co m_node.mempool->addUnchecked(entry.Fee(50000).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); std::unique_ptr<CBlockTemplate> pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey); + BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 4U); BOOST_CHECK(pblocktemplate->block.vtx[1]->GetHash() == hashParentTx); BOOST_CHECK(pblocktemplate->block.vtx[2]->GetHash() == hashHighFeeTx); BOOST_CHECK(pblocktemplate->block.vtx[3]->GetHash() == hashMediumFeeTx); @@ -157,6 +158,7 @@ void MinerTestingSetup::TestPackageSelection(const CChainParams& chainparams, co hashLowFeeTx = tx.GetHash(); m_node.mempool->addUnchecked(entry.Fee(feeToUse+2).FromTx(tx)); pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey); + BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 6U); BOOST_CHECK(pblocktemplate->block.vtx[4]->GetHash() == hashFreeTx); BOOST_CHECK(pblocktemplate->block.vtx[5]->GetHash() == hashLowFeeTx); @@ -191,6 +193,7 @@ void MinerTestingSetup::TestPackageSelection(const CChainParams& chainparams, co tx.vout[0].nValue = 100000000 - 10000; // 10k satoshi fee m_node.mempool->addUnchecked(entry.Fee(10000).FromTx(tx)); pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey); + BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 9U); BOOST_CHECK(pblocktemplate->block.vtx[8]->GetHash() == hashLowFeeTx2); } @@ -216,11 +219,10 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) // We can't make transactions until we have inputs // Therefore, load 110 blocks :) - static_assert(sizeof(blockinfo) / sizeof(*blockinfo) == 110, "Should have 110 blocks to import"); + static_assert(std::size(blockinfo) == 110, "Should have 110 blocks to import"); int baseheight = 0; std::vector<CTransactionRef> txFirst; - for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i) - { + for (const auto& bi : blockinfo) { CBlock *pblock = &pblocktemplate->block; // pointer for convenience { LOCK(cs_main); @@ -229,7 +231,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; txCoinbase.vin[0].scriptSig = CScript(); - txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce); + txCoinbase.vin[0].scriptSig.push_back(bi.extranonce); txCoinbase.vin[0].scriptSig.push_back(::ChainActive().Height()); txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this) txCoinbase.vout[0].scriptPubKey = CScript(); @@ -239,7 +241,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) if (txFirst.size() < 4) txFirst.push_back(pblock->vtx[0]); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); - pblock->nNonce = blockinfo[i].nonce; + pblock->nNonce = bi.nonce; } std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock); BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlock(chainparams, shared_pblock, true, nullptr)); @@ -435,7 +437,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) tx.nLockTime = 0; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); - BOOST_CHECK(CheckFinalTx(CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail BOOST_CHECK(SequenceLocks(CTransaction(tx), flags, prevheights, CreateBlockIndex(::ChainActive().Tip()->nHeight + 2))); // Sequence locks pass on 2nd block @@ -445,7 +447,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) prevheights[0] = baseheight + 2; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(CheckFinalTx(CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) @@ -461,7 +463,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) tx.nLockTime = ::ChainActive().Tip()->nHeight + 1; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(!CheckFinalTx(CTransaction(tx), flags)); // Locktime fails + BOOST_CHECK(!CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime fails BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass BOOST_CHECK(IsFinalTx(CTransaction(tx), ::ChainActive().Tip()->nHeight + 2, ::ChainActive().Tip()->GetMedianTimePast())); // Locktime passes on 2nd block @@ -472,7 +474,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) prevheights[0] = baseheight + 4; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(!CheckFinalTx(CTransaction(tx), flags)); // Locktime fails + BOOST_CHECK(!CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime fails BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass BOOST_CHECK(IsFinalTx(CTransaction(tx), ::ChainActive().Tip()->nHeight + 2, ::ChainActive().Tip()->GetMedianTimePast() + 1)); // Locktime passes 1 second later @@ -481,7 +483,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) prevheights[0] = ::ChainActive().Tip()->nHeight + 1; tx.nLockTime = 0; tx.vin[0].nSequence = 0; - BOOST_CHECK(CheckFinalTx(CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass tx.vin[0].nSequence = 1; BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index a1b41e17ed..1c7c35528e 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -192,14 +192,15 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) id++, NODE_NETWORK, hSocket, addr, /* nKeyedNetGroupIn = */ 0, /* nLocalHostNonceIn = */ 0, - CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY); + CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY, + /* inbound_onion = */ false); BOOST_CHECK(pnode1->IsFullOutboundConn() == true); BOOST_CHECK(pnode1->IsManualConn() == false); BOOST_CHECK(pnode1->IsBlockOnlyConn() == false); BOOST_CHECK(pnode1->IsFeelerConn() == false); BOOST_CHECK(pnode1->IsAddrFetchConn() == false); BOOST_CHECK(pnode1->IsInboundConn() == false); - BOOST_CHECK(pnode1->IsInboundOnion() == false); + BOOST_CHECK(pnode1->m_inbound_onion == false); BOOST_CHECK_EQUAL(pnode1->ConnectedThroughNetwork(), Network::NET_IPV4); std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>( @@ -214,7 +215,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) BOOST_CHECK(pnode2->IsFeelerConn() == false); BOOST_CHECK(pnode2->IsAddrFetchConn() == false); BOOST_CHECK(pnode2->IsInboundConn() == true); - BOOST_CHECK(pnode2->IsInboundOnion() == false); + BOOST_CHECK(pnode2->m_inbound_onion == false); BOOST_CHECK_EQUAL(pnode2->ConnectedThroughNetwork(), Network::NET_IPV4); std::unique_ptr<CNode> pnode3 = MakeUnique<CNode>( @@ -229,7 +230,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) BOOST_CHECK(pnode3->IsFeelerConn() == false); BOOST_CHECK(pnode3->IsAddrFetchConn() == false); BOOST_CHECK(pnode3->IsInboundConn() == false); - BOOST_CHECK(pnode3->IsInboundOnion() == false); + BOOST_CHECK(pnode3->m_inbound_onion == false); BOOST_CHECK_EQUAL(pnode3->ConnectedThroughNetwork(), Network::NET_IPV4); std::unique_ptr<CNode> pnode4 = MakeUnique<CNode>( @@ -244,7 +245,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) BOOST_CHECK(pnode4->IsFeelerConn() == false); BOOST_CHECK(pnode4->IsAddrFetchConn() == false); BOOST_CHECK(pnode4->IsInboundConn() == true); - BOOST_CHECK(pnode4->IsInboundOnion() == true); + BOOST_CHECK(pnode4->m_inbound_onion == true); BOOST_CHECK_EQUAL(pnode4->ConnectedThroughNetwork(), Network::NET_ONION); } @@ -679,7 +680,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) in_addr ipv4AddrPeer; ipv4AddrPeer.s_addr = 0xa0b0c001; CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK); - std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND_FULL_RELAY); + std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress{}, /* pszDest */ std::string{}, ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false); pnode->fSuccessfullyConnected.store(true); // the peer claims to be reaching us via IPv6 @@ -690,7 +691,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) pnode->SetAddrLocal(addrLocal); // before patch, this causes undefined behavior detectable with clang's -fsanitize=memory - AdvertiseLocal(&*pnode); + GetLocalAddrForPeer(&*pnode); // suppress no-checks-run warning; if this test fails, it's by triggering a sanitizer BOOST_CHECK(1); @@ -793,7 +794,7 @@ std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(const int n_c candidates.push_back({ /* id */ id, /* nTimeConnected */ static_cast<int64_t>(random_context.randrange(100)), - /* nMinPingUsecTime */ static_cast<int64_t>(random_context.randrange(100)), + /* m_min_ping_time */ static_cast<int64_t>(random_context.randrange(100)), /* nLastBlockTime */ static_cast<int64_t>(random_context.randrange(100)), /* nLastTXTime */ static_cast<int64_t>(random_context.randrange(100)), /* fRelevantServices */ random_context.randbool(), @@ -853,7 +854,7 @@ BOOST_AUTO_TEST_CASE(node_eviction_test) // from eviction. BOOST_CHECK(!IsEvicted( number_of_nodes, [](NodeEvictionCandidate& candidate) { - candidate.nMinPingUsecTime = candidate.id; + candidate.m_min_ping_time = candidate.id; }, {0, 1, 2, 3, 4, 5, 6, 7}, random_context)); @@ -900,7 +901,7 @@ BOOST_AUTO_TEST_CASE(node_eviction_test) BOOST_CHECK(!IsEvicted( number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { candidate.nKeyedNetGroup = number_of_nodes - candidate.id; // 4 protected - candidate.nMinPingUsecTime = candidate.id; // 8 protected + candidate.m_min_ping_time = candidate.id; // 8 protected candidate.nLastTXTime = number_of_nodes - candidate.id; // 4 protected candidate.nLastBlockTime = number_of_nodes - candidate.id; // 4 protected }, diff --git a/src/test/sanity_tests.cpp b/src/test/sanity_tests.cpp index 740b2c72db..3e4b963fe3 100644 --- a/src/test/sanity_tests.cpp +++ b/src/test/sanity_tests.cpp @@ -5,6 +5,7 @@ #include <compat/sanity.h> #include <key.h> #include <test/util/setup_common.h> +#include <util/time.h> #include <boost/test/unit_test.hpp> @@ -15,6 +16,7 @@ BOOST_AUTO_TEST_CASE(basic_sanity) BOOST_CHECK_MESSAGE(glibc_sanity_test() == true, "libc sanity test"); BOOST_CHECK_MESSAGE(glibcxx_sanity_test() == true, "stdlib sanity test"); BOOST_CHECK_MESSAGE(ECC_InitSanityCheck() == true, "secp256k1 sanity test"); + BOOST_CHECK_MESSAGE(ChronoSanityCheck() == true, "chrono epoch test"); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/scheduler_tests.cpp b/src/test/scheduler_tests.cpp index 21162356b8..d57c000b92 100644 --- a/src/test/scheduler_tests.cpp +++ b/src/test/scheduler_tests.cpp @@ -7,10 +7,11 @@ #include <util/time.h> #include <boost/test/unit_test.hpp> -#include <boost/thread/thread.hpp> #include <functional> #include <mutex> +#include <thread> +#include <vector> BOOST_AUTO_TEST_SUITE(scheduler_tests) @@ -69,16 +70,16 @@ BOOST_AUTO_TEST_CASE(manythreads) BOOST_CHECK(last > now); // As soon as these are created they will start running and servicing the queue - boost::thread_group microThreads; + std::vector<std::thread> microThreads; for (int i = 0; i < 5; i++) - microThreads.create_thread(std::bind(&CScheduler::serviceQueue, µTasks)); + microThreads.emplace_back(std::bind(&CScheduler::serviceQueue, µTasks)); UninterruptibleSleep(std::chrono::microseconds{600}); now = std::chrono::system_clock::now(); // More threads and more tasks: for (int i = 0; i < 5; i++) - microThreads.create_thread(std::bind(&CScheduler::serviceQueue, µTasks)); + microThreads.emplace_back(std::bind(&CScheduler::serviceQueue, µTasks)); for (int i = 0; i < 100; i++) { std::chrono::system_clock::time_point t = now + std::chrono::microseconds(randomMsec(rng)); std::chrono::system_clock::time_point tReschedule = now + std::chrono::microseconds(500 + randomMsec(rng)); @@ -91,7 +92,10 @@ BOOST_AUTO_TEST_CASE(manythreads) // Drain the task queue then exit threads microTasks.StopWhenDrained(); - microThreads.join_all(); // ... wait until all the threads are done + // wait until all the threads are done + for (auto& thread: microThreads) { + if (thread.joinable()) thread.join(); + } int counterSum = 0; for (int i = 0; i < 10; i++) { @@ -131,9 +135,9 @@ BOOST_AUTO_TEST_CASE(singlethreadedscheduler_ordered) // if the queues only permit execution of one task at once then // the extra threads should effectively be doing nothing // if they don't we'll get out of order behaviour - boost::thread_group threads; + std::vector<std::thread> threads; for (int i = 0; i < 5; ++i) { - threads.create_thread(std::bind(&CScheduler::serviceQueue, &scheduler)); + threads.emplace_back(std::bind(&CScheduler::serviceQueue, &scheduler)); } // these are not atomic, if SinglethreadedSchedulerClient prevents @@ -157,7 +161,9 @@ BOOST_AUTO_TEST_CASE(singlethreadedscheduler_ordered) // finish up scheduler.StopWhenDrained(); - threads.join_all(); + for (auto& thread: threads) { + if (thread.joinable()) thread.join(); + } BOOST_CHECK_EQUAL(counter1, 100); BOOST_CHECK_EQUAL(counter2, 100); diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp index 366385b619..4dc0dd5f51 100644 --- a/src/test/script_standard_tests.cpp +++ b/src/test/script_standard_tests.cpp @@ -107,6 +107,22 @@ BOOST_AUTO_TEST_CASE(script_standard_Solver_success) BOOST_CHECK_EQUAL(solutions.size(), 1U); BOOST_CHECK(solutions[0] == ToByteVector(scriptHash)); + // TxoutType::WITNESS_V1_TAPROOT + s.clear(); + s << OP_1 << ToByteVector(uint256::ZERO); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::WITNESS_V1_TAPROOT); + BOOST_CHECK_EQUAL(solutions.size(), 2U); + BOOST_CHECK(solutions[0] == std::vector<unsigned char>{1}); + BOOST_CHECK(solutions[1] == ToByteVector(uint256::ZERO)); + + // TxoutType::WITNESS_UNKNOWN + s.clear(); + s << OP_16 << ToByteVector(uint256::ONE); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::WITNESS_UNKNOWN); + BOOST_CHECK_EQUAL(solutions.size(), 2U); + BOOST_CHECK(solutions[0] == std::vector<unsigned char>{16}); + BOOST_CHECK(solutions[1] == ToByteVector(uint256::ONE)); + // TxoutType::NONSTANDARD s.clear(); s << OP_9 << OP_ADD << OP_11 << OP_EQUAL; diff --git a/src/test/scriptnum_tests.cpp b/src/test/scriptnum_tests.cpp index 281018be9f..746d4d3c6b 100644 --- a/src/test/scriptnum_tests.cpp +++ b/src/test/scriptnum_tests.cpp @@ -164,9 +164,9 @@ static void RunOperators(const int64_t& num1, const int64_t& num2) BOOST_AUTO_TEST_CASE(creation) { - for(size_t i = 0; i < sizeof(values) / sizeof(values[0]); ++i) + for(size_t i = 0; i < std::size(values); ++i) { - for(size_t j = 0; j < sizeof(offsets) / sizeof(offsets[0]); ++j) + for(size_t j = 0; j < std::size(offsets); ++j) { RunCreate(values[i]); RunCreate(values[i] + offsets[j]); @@ -177,9 +177,9 @@ BOOST_AUTO_TEST_CASE(creation) BOOST_AUTO_TEST_CASE(operators) { - for(size_t i = 0; i < sizeof(values) / sizeof(values[0]); ++i) + for(size_t i = 0; i < std::size(values); ++i) { - for(size_t j = 0; j < sizeof(offsets) / sizeof(offsets[0]); ++j) + for(size_t j = 0; j < std::size(offsets); ++j) { RunOperators(values[i], values[i]); RunOperators(values[i], -values[i]); diff --git a/src/test/serialize_tests.cpp b/src/test/serialize_tests.cpp index f625b67c2a..f77cda7ba2 100644 --- a/src/test/serialize_tests.cpp +++ b/src/test/serialize_tests.cpp @@ -320,7 +320,7 @@ BOOST_AUTO_TEST_CASE(insert_delete) ss.insert(ss.end(), c); BOOST_CHECK_EQUAL(ss.size(), 6U); - BOOST_CHECK_EQUAL(ss[4], (char)0xff); + BOOST_CHECK_EQUAL(ss[4], 0xff); BOOST_CHECK_EQUAL(ss[5], c); ss.insert(ss.begin()+2, c); @@ -334,19 +334,14 @@ BOOST_AUTO_TEST_CASE(insert_delete) ss.erase(ss.begin()+ss.size()-1); BOOST_CHECK_EQUAL(ss.size(), 5U); - BOOST_CHECK_EQUAL(ss[4], (char)0xff); + BOOST_CHECK_EQUAL(ss[4], 0xff); ss.erase(ss.begin()+1); BOOST_CHECK_EQUAL(ss.size(), 4U); BOOST_CHECK_EQUAL(ss[0], 0); BOOST_CHECK_EQUAL(ss[1], 1); BOOST_CHECK_EQUAL(ss[2], 2); - BOOST_CHECK_EQUAL(ss[3], (char)0xff); - - // Make sure GetAndClear does the right thing: - CSerializeData d; - ss.GetAndClear(d); - BOOST_CHECK_EQUAL(ss.size(), 0U); + BOOST_CHECK_EQUAL(ss[3], 0xff); } BOOST_AUTO_TEST_CASE(class_methods) diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index bc862de78a..2eb980e8cd 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -88,7 +88,7 @@ void static RandomScript(CScript &script) { script = CScript(); int ops = (InsecureRandRange(10)); for (int i=0; i<ops; i++) - script << oplist[InsecureRandRange(sizeof(oplist)/sizeof(oplist[0]))]; + script << oplist[InsecureRandRange(std::size(oplist))]; } void static RandomTransaction(CMutableTransaction &tx, bool fSingle) { diff --git a/src/test/sock_tests.cpp b/src/test/sock_tests.cpp new file mode 100644 index 0000000000..ed9780dfb5 --- /dev/null +++ b/src/test/sock_tests.cpp @@ -0,0 +1,149 @@ +// Copyright (c) 2021-2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <compat.h> +#include <test/util/setup_common.h> +#include <util/sock.h> +#include <util/system.h> + +#include <boost/test/unit_test.hpp> + +#include <thread> + +using namespace std::chrono_literals; + +BOOST_FIXTURE_TEST_SUITE(sock_tests, BasicTestingSetup) + +static bool SocketIsClosed(const SOCKET& s) +{ + // Notice that if another thread is running and creates its own socket after `s` has been + // closed, it may be assigned the same file descriptor number. In this case, our test will + // wrongly pretend that the socket is not closed. + int type; + socklen_t len = sizeof(type); + return getsockopt(s, SOL_SOCKET, SO_TYPE, (sockopt_arg_type)&type, &len) == SOCKET_ERROR; +} + +static SOCKET CreateSocket() +{ + const SOCKET s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + BOOST_REQUIRE(s != static_cast<SOCKET>(SOCKET_ERROR)); + return s; +} + +BOOST_AUTO_TEST_CASE(constructor_and_destructor) +{ + const SOCKET s = CreateSocket(); + Sock* sock = new Sock(s); + BOOST_CHECK_EQUAL(sock->Get(), s); + BOOST_CHECK(!SocketIsClosed(s)); + delete sock; + BOOST_CHECK(SocketIsClosed(s)); +} + +BOOST_AUTO_TEST_CASE(move_constructor) +{ + const SOCKET s = CreateSocket(); + Sock* sock1 = new Sock(s); + Sock* sock2 = new Sock(std::move(*sock1)); + delete sock1; + BOOST_CHECK(!SocketIsClosed(s)); + BOOST_CHECK_EQUAL(sock2->Get(), s); + delete sock2; + BOOST_CHECK(SocketIsClosed(s)); +} + +BOOST_AUTO_TEST_CASE(move_assignment) +{ + const SOCKET s = CreateSocket(); + Sock* sock1 = new Sock(s); + Sock* sock2 = new Sock(); + *sock2 = std::move(*sock1); + delete sock1; + BOOST_CHECK(!SocketIsClosed(s)); + BOOST_CHECK_EQUAL(sock2->Get(), s); + delete sock2; + BOOST_CHECK(SocketIsClosed(s)); +} + +BOOST_AUTO_TEST_CASE(release) +{ + SOCKET s = CreateSocket(); + Sock* sock = new Sock(s); + BOOST_CHECK_EQUAL(sock->Release(), s); + delete sock; + BOOST_CHECK(!SocketIsClosed(s)); + BOOST_REQUIRE(CloseSocket(s)); +} + +BOOST_AUTO_TEST_CASE(reset) +{ + const SOCKET s = CreateSocket(); + Sock sock(s); + sock.Reset(); + BOOST_CHECK(SocketIsClosed(s)); +} + +#ifndef WIN32 // Windows does not have socketpair(2). + +static void CreateSocketPair(int s[2]) +{ + BOOST_REQUIRE_EQUAL(socketpair(AF_UNIX, SOCK_STREAM, 0, s), 0); +} + +static void SendAndRecvMessage(const Sock& sender, const Sock& receiver) +{ + const char* msg = "abcd"; + constexpr ssize_t msg_len = 4; + char recv_buf[10]; + + BOOST_CHECK_EQUAL(sender.Send(msg, msg_len, 0), msg_len); + BOOST_CHECK_EQUAL(receiver.Recv(recv_buf, sizeof(recv_buf), 0), msg_len); + BOOST_CHECK_EQUAL(strncmp(msg, recv_buf, msg_len), 0); +} + +BOOST_AUTO_TEST_CASE(send_and_receive) +{ + int s[2]; + CreateSocketPair(s); + + Sock* sock0 = new Sock(s[0]); + Sock* sock1 = new Sock(s[1]); + + SendAndRecvMessage(*sock0, *sock1); + + Sock* sock0moved = new Sock(std::move(*sock0)); + Sock* sock1moved = new Sock(); + *sock1moved = std::move(*sock1); + + delete sock0; + delete sock1; + + SendAndRecvMessage(*sock1moved, *sock0moved); + + delete sock0moved; + delete sock1moved; + + BOOST_CHECK(SocketIsClosed(s[0])); + BOOST_CHECK(SocketIsClosed(s[1])); +} + +BOOST_AUTO_TEST_CASE(wait) +{ + int s[2]; + CreateSocketPair(s); + + Sock sock0(s[0]); + Sock sock1(s[1]); + + std::thread waiter([&sock0]() { sock0.Wait(24h, Sock::RECV); }); + + BOOST_REQUIRE_EQUAL(sock1.Send("a", 1, 0), 1); + + waiter.join(); +} + +#endif /* WIN32 */ + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/streams_tests.cpp b/src/test/streams_tests.cpp index 8f5ee1e8e2..3079c9ff29 100644 --- a/src/test/streams_tests.cpp +++ b/src/test/streams_tests.cpp @@ -149,7 +149,7 @@ BOOST_AUTO_TEST_CASE(bitstream_reader_writer) BOOST_AUTO_TEST_CASE(streams_serializedata_xor) { - std::vector<char> in; + std::vector<uint8_t> in; std::vector<char> expected_xor; std::vector<unsigned char> key; CDataStream ds(in, 0, 0); diff --git a/src/test/txvalidation_tests.cpp b/src/test/txvalidation_tests.cpp index 7e6246d68f..8d14071297 100644 --- a/src/test/txvalidation_tests.cpp +++ b/src/test/txvalidation_tests.cpp @@ -30,25 +30,21 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_reject_coinbase, TestChain100Setup) BOOST_CHECK(CTransaction(coinbaseTx).IsCoinBase()); - TxValidationState state; - LOCK(cs_main); unsigned int initialPoolSize = m_node.mempool->size(); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, MakeTransactionRef(coinbaseTx), + true /* bypass_limits */); - BOOST_CHECK_EQUAL( - false, - AcceptToMemoryPool(*m_node.mempool, state, MakeTransactionRef(coinbaseTx), - nullptr /* plTxnReplaced */, - true /* bypass_limits */)); + BOOST_CHECK(result.m_result_type == MempoolAcceptResult::ResultType::INVALID); // Check that the transaction hasn't been added to mempool. BOOST_CHECK_EQUAL(m_node.mempool->size(), initialPoolSize); // Check that the validation state reflects the unsuccessful attempt. - BOOST_CHECK(state.IsInvalid()); - BOOST_CHECK_EQUAL(state.GetRejectReason(), "coinbase"); - BOOST_CHECK(state.GetResult() == TxValidationResult::TX_CONSENSUS); + BOOST_CHECK(result.m_state.IsInvalid()); + BOOST_CHECK_EQUAL(result.m_state.GetRejectReason(), "coinbase"); + BOOST_CHECK(result.m_state.GetResult() == TxValidationResult::TX_CONSENSUS); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index bed2ba3608..288f807abd 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -28,9 +28,9 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) const auto ToMemPool = [this](const CMutableTransaction& tx) { LOCK(cs_main); - TxValidationState state; - return AcceptToMemoryPool(*m_node.mempool, state, MakeTransactionRef(tx), - nullptr /* plTxnReplaced */, true /* bypass_limits */); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, MakeTransactionRef(tx), + true /* bypass_limits */); + return result.m_result_type == MempoolAcceptResult::ResultType::VALID; }; // Create a double-spend of mature coinbase txn: diff --git a/src/test/util/script.h b/src/test/util/script.h new file mode 100644 index 0000000000..abd14c2067 --- /dev/null +++ b/src/test/util/script.h @@ -0,0 +1,21 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_TEST_UTIL_SCRIPT_H +#define BITCOIN_TEST_UTIL_SCRIPT_H + +#include <crypto/sha256.h> +#include <script/script.h> + +static const std::vector<uint8_t> WITNESS_STACK_ELEM_OP_TRUE{uint8_t{OP_TRUE}}; +static const CScript P2WSH_OP_TRUE{ + CScript{} + << OP_0 + << ToByteVector([] { + uint256 hash; + CSHA256().Write(WITNESS_STACK_ELEM_OP_TRUE.data(), WITNESS_STACK_ELEM_OP_TRUE.size()).Finalize(hash.begin()); + return hash; + }())}; + +#endif // BITCOIN_TEST_UTIL_SCRIPT_H diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 8a9694f55b..1ffe435531 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -77,6 +77,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve { "dummy", "-printtoconsole=0", + "-logsourcelocations", "-logtimemicros", "-logthreadnames", "-debug", @@ -131,7 +132,7 @@ ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::ve // We have to run a scheduler thread to prevent ActivateBestChain // from blocking due to queue overrun. m_node.scheduler = MakeUnique<CScheduler>(); - threadGroup.create_thread([&] { TraceThread("scheduler", [&] { m_node.scheduler->serviceQueue(); }); }); + m_node.scheduler->m_service_thread = std::thread([&] { TraceThread("scheduler", [&] { m_node.scheduler->serviceQueue(); }); }); GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler); pblocktree.reset(new CBlockTreeDB(1 << 20, true)); @@ -150,8 +151,6 @@ ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::ve ChainTestingSetup::~ChainTestingSetup() { if (m_node.scheduler) m_node.scheduler->stop(); - threadGroup.interrupt_all(); - threadGroup.join_all(); StopScriptCheckWorkerThreads(); GetMainSignals().FlushBackgroundCallbacks(); GetMainSignals().UnregisterBackgroundSignalScheduler(); @@ -185,7 +184,7 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const } BlockValidationState state; - if (!ActivateBestChain(state, chainparams)) { + if (!::ChainstateActive().ActivateBestChain(state, chainparams)) { throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString())); } @@ -201,14 +200,43 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const } } -TestChain100Setup::TestChain100Setup() +TestChain100Setup::TestChain100Setup(bool deterministic) { + m_deterministic = deterministic; + + if (m_deterministic) { + SetMockTime(1598887952); + constexpr std::array<unsigned char, 32> vchKey = { + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 + } + }; + coinbaseKey.Set(vchKey.begin(), vchKey.end(), false); + } else { + coinbaseKey.MakeNewKey(true); + } + // Generate a 100-block chain: - coinbaseKey.MakeNewKey(true); + this->mineBlocks(COINBASE_MATURITY); + + if (m_deterministic) { + LOCK(::cs_main); + assert( + m_node.chainman->ActiveChain().Tip()->GetBlockHash().ToString() == + "49c95db1e470fed04496d801c9d8fbb78155d2c7f855232c918823d2c17d0cf6"); + } +} + +void TestChain100Setup::mineBlocks(int num_blocks) +{ CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG; - for (int i = 0; i < COINBASE_MATURITY; i++) { + for (int i = 0; i < num_blocks; i++) + { std::vector<CMutableTransaction> noTxns; CBlock b = CreateAndProcessBlock(noTxns, scriptPubKey); + if (m_deterministic) { + SetMockTime(GetTime() + 1); + } m_coinbase_txns.push_back(b.vtx[0]); } } @@ -233,9 +261,61 @@ CBlock TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransa return block; } + +CMutableTransaction TestChain100Setup::CreateValidMempoolTransaction(CTransactionRef input_transaction, + int input_vout, + int input_height, + CKey input_signing_key, + CScript output_destination, + CAmount output_amount) +{ + // Transaction we will submit to the mempool + CMutableTransaction mempool_txn; + + // Create an input + COutPoint outpoint_to_spend(input_transaction->GetHash(), input_vout); + CTxIn input(outpoint_to_spend); + mempool_txn.vin.push_back(input); + + // Create an output + CTxOut output(output_amount, output_destination); + mempool_txn.vout.push_back(output); + + // Sign the transaction + // - Add the signing key to a keystore + FillableSigningProvider keystore; + keystore.AddKey(input_signing_key); + // - Populate a CoinsViewCache with the unspent output + CCoinsView coins_view; + CCoinsViewCache coins_cache(&coins_view); + AddCoins(coins_cache, *input_transaction.get(), input_height); + // - Use GetCoin to properly populate utxo_to_spend, + Coin utxo_to_spend; + assert(coins_cache.GetCoin(outpoint_to_spend, utxo_to_spend)); + // - Then add it to a map to pass in to SignTransaction + std::map<COutPoint, Coin> input_coins; + input_coins.insert({outpoint_to_spend, utxo_to_spend}); + // - Default signature hashing type + int nHashType = SIGHASH_ALL; + std::map<int, std::string> input_errors; + assert(SignTransaction(mempool_txn, &keystore, input_coins, nHashType, input_errors)); + + // Add transaction to the mempool + { + LOCK(cs_main); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool.get(), MakeTransactionRef(mempool_txn), /* bypass_limits */ false); + assert(result.m_result_type == MempoolAcceptResult::ResultType::VALID); + } + + return mempool_txn; +} + TestChain100Setup::~TestChain100Setup() { gArgs.ForceSetArg("-segwitheight", "0"); + if (m_deterministic) { + SetMockTime(0); + } } CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction& tx) const diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h index dff8cf825e..33f24e7c44 100644 --- a/src/test/util/setup_common.h +++ b/src/test/util/setup_common.h @@ -17,8 +17,7 @@ #include <util/string.h> #include <type_traits> - -#include <boost/thread/thread.hpp> +#include <vector> /** This is connected to the logger. Can be used to redirect logs to any other log */ extern const std::function<void(const std::string&)> G_TEST_LOG_FUN; @@ -79,7 +78,6 @@ struct BasicTestingSetup { explicit BasicTestingSetup(const std::string& chainName = CBaseChainParams::MAIN, const std::vector<const char*>& extra_args = {}); ~BasicTestingSetup(); -private: const fs::path m_path_root; }; @@ -88,7 +86,6 @@ private: * initialization behaviour. */ struct ChainTestingSetup : public BasicTestingSetup { - boost::thread_group threadGroup; explicit ChainTestingSetup(const std::string& chainName = CBaseChainParams::MAIN, const std::vector<const char*>& extra_args = {}); ~ChainTestingSetup(); @@ -114,7 +111,7 @@ class CScript; * Testing fixture that pre-creates a 100-block REGTEST-mode block chain */ struct TestChain100Setup : public RegTestingSetup { - TestChain100Setup(); + TestChain100Setup(bool deterministic = false); /** * Create a new block with just given transactions, coinbase paying to @@ -123,12 +120,38 @@ struct TestChain100Setup : public RegTestingSetup { CBlock CreateAndProcessBlock(const std::vector<CMutableTransaction>& txns, const CScript& scriptPubKey); + //! Mine a series of new blocks on the active chain. + void mineBlocks(int num_blocks); + + /** + * Create a transaction and submit to the mempool. + * + * @param input_transaction The transaction to spend + * @param input_vout The vout to spend from the input_transaction + * @param input_height The height of the block that included the input_transaction + * @param input_signing_key The key to spend the input_transaction + * @param output_destination Where to send the output + * @param output_amount How much to send + */ + CMutableTransaction CreateValidMempoolTransaction(CTransactionRef input_transaction, + int input_vout, + int input_height, + CKey input_signing_key, + CScript output_destination, + CAmount output_amount = CAmount(1 * COIN)); + ~TestChain100Setup(); + bool m_deterministic; std::vector<CTransactionRef> m_coinbase_txns; // For convenience, coinbase transactions CKey coinbaseKey; // private/public key needed to spend coinbase transactions }; + +struct TestChain100DeterministicSetup : public TestChain100Setup { + TestChain100DeterministicSetup() : TestChain100Setup(true) { } +}; + class CTxMemPoolEntry; struct TestMemPoolEntryHelper diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 4133f2623b..845854bd4b 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -13,6 +13,7 @@ #include <test/util/setup_common.h> #include <test/util/str.h> #include <uint256.h> +#include <util/getuniquepath.h> #include <util/message.h> // For MessageSign(), MessageVerify(), MESSAGE_MAGIC #include <util/moneystr.h> #include <util/spanparsing.h> @@ -1816,7 +1817,7 @@ BOOST_AUTO_TEST_CASE(test_DirIsWritable) BOOST_CHECK_EQUAL(DirIsWritable(tmpdirname), true); // Should not be able to write to a non-existent dir. - tmpdirname = tmpdirname / fs::unique_path(); + tmpdirname = GetUniquePath(tmpdirname); BOOST_CHECK_EQUAL(DirIsWritable(tmpdirname), false); fs::create_directory(tmpdirname); @@ -2200,4 +2201,17 @@ BOOST_AUTO_TEST_CASE(message_hash) BOOST_CHECK_NE(message_hash1, signature_hash); } +BOOST_AUTO_TEST_CASE(remove_prefix) +{ + BOOST_CHECK_EQUAL(RemovePrefix("./util/system.h", "./"), "util/system.h"); + BOOST_CHECK_EQUAL(RemovePrefix("foo", "foo"), ""); + BOOST_CHECK_EQUAL(RemovePrefix("foo", "fo"), "o"); + BOOST_CHECK_EQUAL(RemovePrefix("foo", "f"), "oo"); + BOOST_CHECK_EQUAL(RemovePrefix("foo", ""), "foo"); + BOOST_CHECK_EQUAL(RemovePrefix("fo", "foo"), "fo"); + BOOST_CHECK_EQUAL(RemovePrefix("f", "foo"), "f"); + BOOST_CHECK_EQUAL(RemovePrefix("", "foo"), ""); + BOOST_CHECK_EQUAL(RemovePrefix("", ""), ""); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index ea17cb50f1..0c87c4d360 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -11,6 +11,7 @@ #include <pow.h> #include <random.h> #include <script/standard.h> +#include <test/util/script.h> #include <test/util/setup_common.h> #include <util/time.h> #include <validation.h> @@ -18,8 +19,6 @@ #include <thread> -static const std::vector<unsigned char> V_OP_TRUE{OP_TRUE}; - namespace validation_block_tests { struct MinerTestingSetup : public RegTestingSetup { std::shared_ptr<CBlock> Block(const uint256& prev_hash); @@ -64,27 +63,17 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash) static int i = 0; static uint64_t time = Params().GenesisBlock().nTime; - CScript pubKey; - pubKey << i++ << OP_TRUE; - - auto ptemplate = BlockAssembler(*m_node.mempool, Params()).CreateNewBlock(pubKey); + auto ptemplate = BlockAssembler(*m_node.mempool, Params()).CreateNewBlock(CScript{} << i++ << OP_TRUE); auto pblock = std::make_shared<CBlock>(ptemplate->block); pblock->hashPrevBlock = prev_hash; pblock->nTime = ++time; - pubKey.clear(); - { - WitnessV0ScriptHash witness_program; - CSHA256().Write(&V_OP_TRUE[0], V_OP_TRUE.size()).Finalize(witness_program.begin()); - pubKey << OP_0 << ToByteVector(witness_program); - } - // Make the coinbase transaction with two outputs: // One zero-value one that has a unique pubkey to make sure that blocks at the same height can have a different hash // Another one that has the coinbase reward in a P2WSH with OP_TRUE as witness program to make it easy to spend CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.vout.resize(2); - txCoinbase.vout[1].scriptPubKey = pubKey; + txCoinbase.vout[1].scriptPubKey = P2WSH_OP_TRUE; txCoinbase.vout[1].nValue = txCoinbase.vout[0].nValue; txCoinbase.vout[0].nValue = 0; txCoinbase.vin[0].scriptWitness.SetNull(); @@ -95,8 +84,8 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash) std::shared_ptr<CBlock> MinerTestingSetup::FinalizeBlock(std::shared_ptr<CBlock> pblock) { - LOCK(cs_main); // For LookupBlockIndex - GenerateCoinbaseCommitment(*pblock, LookupBlockIndex(pblock->hashPrevBlock), Params().GetConsensus()); + LOCK(cs_main); // For g_chainman.m_blockman.LookupBlockIndex + GenerateCoinbaseCommitment(*pblock, g_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock), Params().GetConsensus()); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); @@ -254,7 +243,7 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) for (int num_txs = 22; num_txs > 0; --num_txs) { CMutableTransaction mtx; mtx.vin.push_back(CTxIn{COutPoint{last_mined->vtx[0]->GetHash(), 1}, CScript{}}); - mtx.vin[0].scriptWitness.stack.push_back(V_OP_TRUE); + mtx.vin[0].scriptWitness.stack.push_back(WITNESS_STACK_ELEM_OP_TRUE); mtx.vout.push_back(last_mined->vtx[0]->vout[1]); mtx.vout[0].nValue -= 1000; txs.push_back(MakeTransactionRef(mtx)); @@ -283,15 +272,9 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) // Add the txs to the tx pool { LOCK(cs_main); - TxValidationState state; - std::list<CTransactionRef> plTxnReplaced; for (const auto& tx : txs) { - BOOST_REQUIRE(AcceptToMemoryPool( - *m_node.mempool, - state, - tx, - &plTxnReplaced, - /* bypass_limits */ false)); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, tx, false /* bypass_limits */); + BOOST_REQUIRE(result.m_result_type == MempoolAcceptResult::ResultType::VALID); } } diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 3d8570e27c..94d4277019 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -4,13 +4,18 @@ // #include <chainparams.h> #include <consensus/validation.h> +#include <node/utxo_snapshot.h> #include <random.h> +#include <rpc/blockchain.h> #include <sync.h> #include <test/util/setup_common.h> #include <uint256.h> #include <validation.h> #include <validationinterface.h> +#include <tinyformat.h> +#include <univalue.h> + #include <vector> #include <boost/test/unit_test.hpp> @@ -28,6 +33,8 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) std::vector<CChainState*> chainstates; const CChainParams& chainparams = Params(); + BOOST_CHECK(!manager.SnapshotBlockhash().has_value()); + // Create a legacy (IBD) chainstate. // CChainState& c1 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(mempool)); @@ -54,10 +61,17 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) auto& validated_cs = manager.ValidatedChainstate(); BOOST_CHECK_EQUAL(&validated_cs, &c1); + BOOST_CHECK(!manager.SnapshotBlockhash().has_value()); + // Create a snapshot-based chainstate. // - CChainState& c2 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(mempool, GetRandHash())); + const uint256 snapshot_blockhash = GetRandHash(); + CChainState& c2 = WITH_LOCK(::cs_main, return manager.InitializeChainstate( + mempool, snapshot_blockhash)); chainstates.push_back(&c2); + + BOOST_CHECK_EQUAL(manager.SnapshotBlockhash().value(), snapshot_blockhash); + c2.InitCoinsDB( /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); WITH_LOCK(::cs_main, c2.InitCoinsCache(1 << 23)); @@ -155,4 +169,175 @@ BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches) BOOST_CHECK_CLOSE(c2.m_coinsdb_cache_size_bytes, max_cache * 0.95, 1); } +auto NoMalleation = [](CAutoFile& file, SnapshotMetadata& meta){}; + +template<typename F = decltype(NoMalleation)> +static bool +CreateAndActivateUTXOSnapshot(NodeContext& node, const fs::path root, F malleation = NoMalleation) +{ + // Write out a snapshot to the test's tempdir. + // + int height; + WITH_LOCK(::cs_main, height = node.chainman->ActiveHeight()); + fs::path snapshot_path = root / tfm::format("test_snapshot.%d.dat", height); + FILE* outfile{fsbridge::fopen(snapshot_path, "wb")}; + CAutoFile auto_outfile{outfile, SER_DISK, CLIENT_VERSION}; + + UniValue result = CreateUTXOSnapshot(node, node.chainman->ActiveChainstate(), auto_outfile); + BOOST_TEST_MESSAGE( + "Wrote UTXO snapshot to " << snapshot_path.make_preferred().string() << ": " << result.write()); + + // Read the written snapshot in and then activate it. + // + FILE* infile{fsbridge::fopen(snapshot_path, "rb")}; + CAutoFile auto_infile{infile, SER_DISK, CLIENT_VERSION}; + SnapshotMetadata metadata; + auto_infile >> metadata; + + malleation(auto_infile, metadata); + + return node.chainman->ActivateSnapshot(auto_infile, metadata, /*in_memory*/ true); +} + +//! Test basic snapshot activation. +BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, TestChain100DeterministicSetup) +{ + ChainstateManager& chainman = *Assert(m_node.chainman); + + size_t initial_size; + size_t initial_total_coins{100}; + + // Make some initial assertions about the contents of the chainstate. + { + LOCK(::cs_main); + CCoinsViewCache& ibd_coinscache = chainman.ActiveChainstate().CoinsTip(); + initial_size = ibd_coinscache.GetCacheSize(); + size_t total_coins{0}; + + for (CTransactionRef& txn : m_coinbase_txns) { + COutPoint op{txn->GetHash(), 0}; + BOOST_CHECK(ibd_coinscache.HaveCoin(op)); + total_coins++; + } + + BOOST_CHECK_EQUAL(total_coins, initial_total_coins); + BOOST_CHECK_EQUAL(initial_size, initial_total_coins); + } + + // Snapshot should refuse to load at this height. + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot(m_node, m_path_root)); + BOOST_CHECK(chainman.ActiveChainstate().m_from_snapshot_blockhash.IsNull()); + BOOST_CHECK_EQUAL( + chainman.ActiveChainstate().m_from_snapshot_blockhash, + chainman.SnapshotBlockhash().value_or(uint256())); + + // Mine 10 more blocks, putting at us height 110 where a valid assumeutxo value can + // be found. + mineBlocks(10); + initial_size += 10; + initial_total_coins += 10; + + // Should not load malleated snapshots + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot( + m_node, m_path_root, [](CAutoFile& auto_infile, SnapshotMetadata& metadata) { + // A UTXO is missing but count is correct + metadata.m_coins_count -= 1; + + COutPoint outpoint; + Coin coin; + + auto_infile >> outpoint; + auto_infile >> coin; + })); + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot( + m_node, m_path_root, [](CAutoFile& auto_infile, SnapshotMetadata& metadata) { + // Coins count is larger than coins in file + metadata.m_coins_count += 1; + })); + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot( + m_node, m_path_root, [](CAutoFile& auto_infile, SnapshotMetadata& metadata) { + // Coins count is smaller than coins in file + metadata.m_coins_count -= 1; + })); + + BOOST_REQUIRE(CreateAndActivateUTXOSnapshot(m_node, m_path_root)); + + // Ensure our active chain is the snapshot chainstate. + BOOST_CHECK(!chainman.ActiveChainstate().m_from_snapshot_blockhash.IsNull()); + BOOST_CHECK_EQUAL( + chainman.ActiveChainstate().m_from_snapshot_blockhash, + *chainman.SnapshotBlockhash()); + + // To be checked against later when we try loading a subsequent snapshot. + uint256 loaded_snapshot_blockhash{*chainman.SnapshotBlockhash()}; + + // Make some assertions about the both chainstates. These checks ensure the + // legacy chainstate hasn't changed and that the newly created chainstate + // reflects the expected content. + { + LOCK(::cs_main); + int chains_tested{0}; + + for (CChainState* chainstate : chainman.GetAll()) { + BOOST_TEST_MESSAGE("Checking coins in " << chainstate->ToString()); + CCoinsViewCache& coinscache = chainstate->CoinsTip(); + + // Both caches will be empty initially. + BOOST_CHECK_EQUAL((unsigned int)0, coinscache.GetCacheSize()); + + size_t total_coins{0}; + + for (CTransactionRef& txn : m_coinbase_txns) { + COutPoint op{txn->GetHash(), 0}; + BOOST_CHECK(coinscache.HaveCoin(op)); + total_coins++; + } + + BOOST_CHECK_EQUAL(initial_size , coinscache.GetCacheSize()); + BOOST_CHECK_EQUAL(total_coins, initial_total_coins); + chains_tested++; + } + + BOOST_CHECK_EQUAL(chains_tested, 2); + } + + // Mine some new blocks on top of the activated snapshot chainstate. + constexpr size_t new_coins{100}; + mineBlocks(new_coins); // Defined in TestChain100Setup. + + { + LOCK(::cs_main); + size_t coins_in_active{0}; + size_t coins_in_ibd{0}; + size_t coins_missing_ibd{0}; + + for (CChainState* chainstate : chainman.GetAll()) { + BOOST_TEST_MESSAGE("Checking coins in " << chainstate->ToString()); + CCoinsViewCache& coinscache = chainstate->CoinsTip(); + bool is_ibd = chainman.IsBackgroundIBD(chainstate); + + for (CTransactionRef& txn : m_coinbase_txns) { + COutPoint op{txn->GetHash(), 0}; + if (coinscache.HaveCoin(op)) { + (is_ibd ? coins_in_ibd : coins_in_active)++; + } else if (is_ibd) { + coins_missing_ibd++; + } + } + } + + BOOST_CHECK_EQUAL(coins_in_active, initial_total_coins + new_coins); + BOOST_CHECK_EQUAL(coins_in_ibd, initial_total_coins); + BOOST_CHECK_EQUAL(coins_missing_ibd, new_coins); + } + + // Snapshot should refuse to load after one has already loaded. + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot(m_node, m_path_root)); + + // Snapshot blockhash should be unchanged. + BOOST_CHECK_EQUAL( + chainman.ActiveChainstate().m_from_snapshot_blockhash, + loaded_snapshot_blockhash); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/validation_tests.cpp b/src/test/validation_tests.cpp index 9e37f14921..ecf9453094 100644 --- a/src/test/validation_tests.cpp +++ b/src/test/validation_tests.cpp @@ -5,6 +5,7 @@ #include <chainparams.h> #include <net.h> #include <signet.h> +#include <uint256.h> #include <validation.h> #include <test/util/setup_common.h> @@ -119,4 +120,27 @@ BOOST_AUTO_TEST_CASE(signet_parse_tests) BOOST_CHECK(!CheckSignetBlockSolution(block, signet_params->GetConsensus())); } +//! Test retrieval of valid assumeutxo values. +BOOST_AUTO_TEST_CASE(test_assumeutxo) +{ + const auto params = CreateChainParams(*m_node.args, CBaseChainParams::REGTEST); + + // These heights don't have assumeutxo configurations associated, per the contents + // of chainparams.cpp. + std::vector<int> bad_heights{0, 100, 111, 115, 209, 211}; + + for (auto empty : bad_heights) { + const auto out = ExpectedAssumeutxo(empty, *params); + BOOST_CHECK(!out); + } + + const auto out110 = *ExpectedAssumeutxo(110, *params); + BOOST_CHECK_EQUAL(out110.hash_serialized, uint256S("76fd7334ac7c1baf57ddc0c626f073a655a35d98a4258cd1382c8cc2b8392e10")); + BOOST_CHECK_EQUAL(out110.nChainTx, (unsigned int)110); + + const auto out210 = *ExpectedAssumeutxo(210, *params); + BOOST_CHECK_EQUAL(out210.hash_serialized, uint256S("9c5ed99ef98544b34f8920b6d1802f72ac28ae6e2bd2bd4c316ff10c230df3f2")); + BOOST_CHECK_EQUAL(out210.nChainTx, (unsigned int)210); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 908ad35e1b..605c77fc3a 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -14,6 +14,7 @@ #include <netbase.h> #include <util/strencodings.h> #include <util/system.h> +#include <util/time.h> #include <deque> #include <functional> diff --git a/src/txdb.cpp b/src/txdb.cpp index 72460e7c69..4b4766e1ba 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -47,11 +47,15 @@ CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, b void CCoinsViewDB::ResizeCache(size_t new_cache_size) { - // Have to do a reset first to get the original `m_db` state to release its - // filesystem lock. - m_db.reset(); - m_db = MakeUnique<CDBWrapper>( - m_ldb_path, new_cache_size, m_is_memory, /*fWipe*/ false, /*obfuscate*/ true); + // We can't do this operation with an in-memory DB since we'll lose all the coins upon + // reset. + if (!m_is_memory) { + // Have to do a reset first to get the original `m_db` state to release its + // filesystem lock. + m_db.reset(); + m_db = MakeUnique<CDBWrapper>( + m_ldb_path, new_cache_size, m_is_memory, /*fWipe*/ false, /*obfuscate*/ true); + } } bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const { diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 470e665844..899835019a 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -9,14 +9,14 @@ #include <consensus/tx_verify.h> #include <consensus/validation.h> #include <optional.h> -#include <validation.h> -#include <policy/policy.h> #include <policy/fees.h> +#include <policy/policy.h> #include <policy/settings.h> #include <reverse_iterator.h> -#include <util/system.h> #include <util/moneystr.h> +#include <util/system.h> #include <util/time.h> +#include <validation.h> #include <validationinterface.h> CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFee, @@ -396,7 +396,10 @@ void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry, setEntries &setAnces nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); - if (minerPolicyEstimator) {minerPolicyEstimator->processTransaction(entry, validFeeEstimate);} + m_total_fee += entry.GetFee(); + if (minerPolicyEstimator) { + minerPolicyEstimator->processTransaction(entry, validFeeEstimate); + } vTxHashes.emplace_back(tx.GetWitnessHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; @@ -432,6 +435,7 @@ void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) vTxHashes.clear(); totalTxSize -= it->GetTxSize(); + m_total_fee -= it->GetFee(); cachedInnerUsage -= it->DynamicMemoryUsage(); cachedInnerUsage -= memusage::DynamicUsage(it->GetMemPoolParentsConst()) + memusage::DynamicUsage(it->GetMemPoolChildrenConst()); mapTx.erase(it); @@ -499,7 +503,7 @@ void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReaso RemoveStaged(setAllRemoves, false, reason); } -void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) +void CTxMemPool::removeForReorg(CChainState& active_chainstate, int flags) { // Remove transactions spending a coinbase which are now immature and no-longer-final transactions AssertLockHeld(cs); @@ -507,8 +511,9 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction& tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); - bool validLP = TestLockPointValidity(&lp); - if (!CheckFinalTx(tx, flags) || !CheckSequenceLocks(*this, tx, flags, &lp, validLP)) { + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + bool validLP = TestLockPointValidity(active_chainstate.m_chain, &lp); + if (!CheckFinalTx(active_chainstate.m_chain.Tip(), tx, flags) || !CheckSequenceLocks(active_chainstate, *this, tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be invalid // So it's critical that we remove the tx and not depend on the LockPoints. txToRemove.insert(it); @@ -517,8 +522,9 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash); if (it2 != mapTx.end()) continue; - const Coin &coin = pcoins->AccessCoin(txin.prevout); + const Coin &coin = active_chainstate.CoinsTip().AccessCoin(txin.prevout); if (m_check_ratio != 0) assert(!coin.IsSpent()); + unsigned int nMemPoolHeight = active_chainstate.m_chain.Tip()->nHeight + 1; if (coin.IsSpent() || (coin.IsCoinBase() && ((signed long)nMemPoolHeight) - coin.nHeight < COINBASE_MATURITY)) { txToRemove.insert(it); break; @@ -590,6 +596,7 @@ void CTxMemPool::_clear() mapTx.clear(); mapNextTx.clear(); totalTxSize = 0; + m_total_fee = 0; cachedInnerUsage = 0; lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = false; @@ -623,15 +630,17 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; + CAmount check_total_fee{0}; uint64_t innerUsage = 0; CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache*>(pcoins)); - const int64_t spendheight = GetSpendHeight(mempoolDuplicate); + const int64_t spendheight = g_chainman.m_blockman.GetSpendHeight(mempoolDuplicate); std::list<const CTxMemPoolEntry*> waitingOnDependants; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->GetTxSize(); + check_total_fee += it->GetFee(); innerUsage += it->DynamicMemoryUsage(); const CTransaction& tx = it->GetTx(); innerUsage += memusage::DynamicUsage(it->GetMemPoolParentsConst()) + memusage::DynamicUsage(it->GetMemPoolChildrenConst()); @@ -726,6 +735,7 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const } assert(totalTxSize == checkTotal); + assert(m_total_fee == check_total_fee); assert(innerUsage == cachedInnerUsage); } diff --git a/src/txmempool.h b/src/txmempool.h index 0a9cd81ff5..b8de326737 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -19,8 +19,8 @@ #include <optional.h> #include <policy/feerate.h> #include <primitives/transaction.h> -#include <sync.h> #include <random.h> +#include <sync.h> #include <util/hasher.h> #include <boost/multi_index_container.hpp> @@ -29,6 +29,7 @@ #include <boost/multi_index/sequenced_index.hpp> class CBlockIndex; +class CChainState; extern RecursiveMutex cs_main; /** Fake height value used in Coin to signify they are only in the memory pool (since 0.8) */ @@ -478,8 +479,9 @@ private: std::atomic<unsigned int> nTransactionsUpdated{0}; //!< Used by getblocktemplate to trigger CreateNewBlock() invocation CBlockPolicyEstimator* minerPolicyEstimator; - uint64_t totalTxSize; //!< sum of all mempool tx's virtual sizes. Differs from serialized tx size since witness data is discounted. Defined in BIP 141. - uint64_t cachedInnerUsage; //!< sum of dynamic memory usage of all the map elements (NOT the maps themselves) + uint64_t totalTxSize GUARDED_BY(cs); //!< sum of all mempool tx's virtual sizes. Differs from serialized tx size since witness data is discounted. Defined in BIP 141. + CAmount m_total_fee GUARDED_BY(cs); //!< sum of all mempool tx's fees (NOT modified fee) + uint64_t cachedInnerUsage GUARDED_BY(cs); //!< sum of dynamic memory usage of all the map elements (NOT the maps themselves) mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; @@ -615,7 +617,7 @@ public: void addUnchecked(const CTxMemPoolEntry& entry, setEntries& setAncestors, bool validFeeEstimate = true) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void removeRecursive(const CTransaction& tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs); - void removeForReorg(const CCoinsViewCache* pcoins, unsigned int nMemPoolHeight, int flags) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); + void removeForReorg(CChainState& active_chainstate, int flags) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void removeConflicts(const CTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(cs); void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight) EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -724,6 +726,12 @@ public: return totalTxSize; } + CAmount GetTotalFee() const EXCLUSIVE_LOCKS_REQUIRED(cs) + { + AssertLockHeld(cs); + return m_total_fee; + } + bool exists(const GenTxid& gtxid) const { LOCK(cs); diff --git a/src/util/getuniquepath.cpp b/src/util/getuniquepath.cpp new file mode 100644 index 0000000000..9839d2f624 --- /dev/null +++ b/src/util/getuniquepath.cpp @@ -0,0 +1,10 @@ +#include <random.h> +#include <fs.h> +#include <util/strencodings.h> + +fs::path GetUniquePath(const fs::path& base) +{ + FastRandomContext rnd; + fs::path tmpFile = base / HexStr(rnd.randbytes(8)); + return tmpFile; +}
\ No newline at end of file diff --git a/src/util/getuniquepath.h b/src/util/getuniquepath.h new file mode 100644 index 0000000000..e0c6147876 --- /dev/null +++ b/src/util/getuniquepath.h @@ -0,0 +1,19 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_UTIL_GETUNIQUEPATH_H +#define BITCOIN_UTIL_GETUNIQUEPATH_H + +#include <fs.h> + +/** + * Helper function for getting a unique path + * + * @param[in] base Base path + * @returns base joined with a random 8-character long string. + * @post Returned path is unique with high probability. + */ +fs::path GetUniquePath(const fs::path& base); + +#endif // BITCOIN_UTIL_GETUNIQUEPATH_H
\ No newline at end of file diff --git a/src/util/macros.h b/src/util/macros.h index 36ea87c0fe..0887c80fd7 100644 --- a/src/util/macros.h +++ b/src/util/macros.h @@ -8,4 +8,11 @@ #define PASTE(x, y) x ## y #define PASTE2(x, y) PASTE(x, y) +/** + * Converts the parameter X to a string after macro replacement on X has been performed. + * Don't merge these into one macro! + */ +#define STRINGIZE(X) DO_STRINGIZE(X) +#define DO_STRINGIZE(X) #X + #endif // BITCOIN_UTIL_MACROS_H diff --git a/src/util/sock.cpp b/src/util/sock.cpp new file mode 100644 index 0000000000..4c65b5b680 --- /dev/null +++ b/src/util/sock.cpp @@ -0,0 +1,149 @@ +// Copyright (c) 2020-2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <compat.h> +#include <logging.h> +#include <tinyformat.h> +#include <util/sock.h> +#include <util/system.h> +#include <util/time.h> + +#include <codecvt> +#include <cwchar> +#include <locale> +#include <string> + +#ifdef USE_POLL +#include <poll.h> +#endif + +Sock::Sock() : m_socket(INVALID_SOCKET) {} + +Sock::Sock(SOCKET s) : m_socket(s) {} + +Sock::Sock(Sock&& other) +{ + m_socket = other.m_socket; + other.m_socket = INVALID_SOCKET; +} + +Sock::~Sock() { Reset(); } + +Sock& Sock::operator=(Sock&& other) +{ + Reset(); + m_socket = other.m_socket; + other.m_socket = INVALID_SOCKET; + return *this; +} + +SOCKET Sock::Get() const { return m_socket; } + +SOCKET Sock::Release() +{ + const SOCKET s = m_socket; + m_socket = INVALID_SOCKET; + return s; +} + +void Sock::Reset() { CloseSocket(m_socket); } + +ssize_t Sock::Send(const void* data, size_t len, int flags) const +{ + return send(m_socket, static_cast<const char*>(data), len, flags); +} + +ssize_t Sock::Recv(void* buf, size_t len, int flags) const +{ + return recv(m_socket, static_cast<char*>(buf), len, flags); +} + +bool Sock::Wait(std::chrono::milliseconds timeout, Event requested) const +{ +#ifdef USE_POLL + pollfd fd; + fd.fd = m_socket; + fd.events = 0; + if (requested & RECV) { + fd.events |= POLLIN; + } + if (requested & SEND) { + fd.events |= POLLOUT; + } + + return poll(&fd, 1, count_milliseconds(timeout)) != SOCKET_ERROR; +#else + if (!IsSelectableSocket(m_socket)) { + return false; + } + + fd_set fdset_recv; + fd_set fdset_send; + FD_ZERO(&fdset_recv); + FD_ZERO(&fdset_send); + + if (requested & RECV) { + FD_SET(m_socket, &fdset_recv); + } + + if (requested & SEND) { + FD_SET(m_socket, &fdset_send); + } + + timeval timeout_struct = MillisToTimeval(timeout); + + return select(m_socket + 1, &fdset_recv, &fdset_send, nullptr, &timeout_struct) != SOCKET_ERROR; +#endif /* USE_POLL */ +} + +#ifdef WIN32 +std::string NetworkErrorString(int err) +{ + wchar_t buf[256]; + buf[0] = 0; + if(FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK, + nullptr, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + buf, ARRAYSIZE(buf), nullptr)) + { + return strprintf("%s (%d)", std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf), err); + } + else + { + return strprintf("Unknown error (%d)", err); + } +} +#else +std::string NetworkErrorString(int err) +{ + char buf[256]; + buf[0] = 0; + /* Too bad there are two incompatible implementations of the + * thread-safe strerror. */ + const char *s; +#ifdef STRERROR_R_CHAR_P /* GNU variant can return a pointer outside the passed buffer */ + s = strerror_r(err, buf, sizeof(buf)); +#else /* POSIX variant always returns message in buffer */ + s = buf; + if (strerror_r(err, buf, sizeof(buf))) + buf[0] = 0; +#endif + return strprintf("%s (%d)", s, err); +} +#endif + +bool CloseSocket(SOCKET& hSocket) +{ + if (hSocket == INVALID_SOCKET) + return false; +#ifdef WIN32 + int ret = closesocket(hSocket); +#else + int ret = close(hSocket); +#endif + if (ret) { + LogPrintf("Socket close failed: %d. Error: %s\n", hSocket, NetworkErrorString(WSAGetLastError())); + } + hSocket = INVALID_SOCKET; + return ret != SOCKET_ERROR; +} diff --git a/src/util/sock.h b/src/util/sock.h new file mode 100644 index 0000000000..26fe60f18f --- /dev/null +++ b/src/util/sock.h @@ -0,0 +1,118 @@ +// Copyright (c) 2020-2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_UTIL_SOCK_H +#define BITCOIN_UTIL_SOCK_H + +#include <compat.h> + +#include <chrono> +#include <string> + +/** + * RAII helper class that manages a socket. Mimics `std::unique_ptr`, but instead of a pointer it + * contains a socket and closes it automatically when it goes out of scope. + */ +class Sock +{ +public: + /** + * Default constructor, creates an empty object that does nothing when destroyed. + */ + Sock(); + + /** + * Take ownership of an existent socket. + */ + explicit Sock(SOCKET s); + + /** + * Copy constructor, disabled because closing the same socket twice is undesirable. + */ + Sock(const Sock&) = delete; + + /** + * Move constructor, grab the socket from another object and close ours (if set). + */ + Sock(Sock&& other); + + /** + * Destructor, close the socket or do nothing if empty. + */ + virtual ~Sock(); + + /** + * Copy assignment operator, disabled because closing the same socket twice is undesirable. + */ + Sock& operator=(const Sock&) = delete; + + /** + * Move assignment operator, grab the socket from another object and close ours (if set). + */ + virtual Sock& operator=(Sock&& other); + + /** + * Get the value of the contained socket. + * @return socket or INVALID_SOCKET if empty + */ + virtual SOCKET Get() const; + + /** + * Get the value of the contained socket and drop ownership. It will not be closed by the + * destructor after this call. + * @return socket or INVALID_SOCKET if empty + */ + virtual SOCKET Release(); + + /** + * Close if non-empty. + */ + virtual void Reset(); + + /** + * send(2) wrapper. Equivalent to `send(this->Get(), data, len, flags);`. Code that uses this + * wrapper can be unit-tested if this method is overridden by a mock Sock implementation. + */ + virtual ssize_t Send(const void* data, size_t len, int flags) const; + + /** + * recv(2) wrapper. Equivalent to `recv(this->Get(), buf, len, flags);`. Code that uses this + * wrapper can be unit-tested if this method is overridden by a mock Sock implementation. + */ + virtual ssize_t Recv(void* buf, size_t len, int flags) const; + + using Event = uint8_t; + + /** + * If passed to `Wait()`, then it will wait for readiness to read from the socket. + */ + static constexpr Event RECV = 0b01; + + /** + * If passed to `Wait()`, then it will wait for readiness to send to the socket. + */ + static constexpr Event SEND = 0b10; + + /** + * Wait for readiness for input (recv) or output (send). + * @param[in] timeout Wait this much for at least one of the requested events to occur. + * @param[in] requested Wait for those events, bitwise-or of `RECV` and `SEND`. + * @return true on success and false otherwise + */ + virtual bool Wait(std::chrono::milliseconds timeout, Event requested) const; + +private: + /** + * Contained socket. `INVALID_SOCKET` designates the object is empty. + */ + SOCKET m_socket; +}; + +/** Return readable error string for a network error code */ +std::string NetworkErrorString(int err); + +/** Close socket and set hSocket to INVALID_SOCKET */ +bool CloseSocket(SOCKET& hSocket); + +#endif // BITCOIN_UTIL_SOCK_H diff --git a/src/util/strencodings.h b/src/util/strencodings.h index b4a61202ef..98379e9138 100644 --- a/src/util/strencodings.h +++ b/src/util/strencodings.h @@ -17,8 +17,6 @@ #include <string> #include <vector> -#define ARRAYLEN(array) (sizeof(array)/sizeof((array)[0])) - /** Used by SanitizeString() */ enum SafeChars { diff --git a/src/util/string.h b/src/util/string.h index 5ffdc80d88..b26facc502 100644 --- a/src/util/string.h +++ b/src/util/string.h @@ -25,6 +25,14 @@ return str.substr(front, end - front + 1); } +[[nodiscard]] inline std::string RemovePrefix(const std::string& str, const std::string& prefix) +{ + if (str.substr(0, prefix.size()) == prefix) { + return str.substr(prefix.size()); + } + return str; +} + /** * Join a list of items * diff --git a/src/util/system.cpp b/src/util/system.cpp index d1fb921642..9a2e719bbc 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -3,7 +3,6 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <sync.h> #include <util/system.h> #ifdef HAVE_BOOST_PROCESS @@ -11,6 +10,9 @@ #endif // HAVE_BOOST_PROCESS #include <chainparamsbase.h> +#include <sync.h> +#include <util/check.h> +#include <util/getuniquepath.h> #include <util/strencodings.h> #include <util/string.h> #include <util/translation.h> @@ -123,7 +125,7 @@ void ReleaseDirectoryLocks() bool DirIsWritable(const fs::path& directory) { - fs::path tmpFile = directory / fs::unique_path(); + fs::path tmpFile = GetUniquePath(directory); FILE* file = fsbridge::fopen(tmpFile, "a"); if (!file) return false; @@ -310,8 +312,22 @@ bool ArgsManager::ParseParameters(int argc, const char* const argv[], std::strin key[0] = '-'; #endif - if (key[0] != '-') + if (key[0] != '-') { + if (!m_accept_any_command && m_command.empty()) { + // The first non-dash arg is a registered command + Optional<unsigned int> flags = GetArgFlags(key); + if (!flags || !(*flags & ArgsManager::COMMAND)) { + error = strprintf("Invalid command '%s'", argv[i]); + return false; + } + } + m_command.push_back(key); + while (++i < argc) { + // The remaining args are command args + m_command.push_back(argv[i]); + } break; + } // Transform --foo to -foo if (key.length() > 1 && key[1] == '-') @@ -359,6 +375,26 @@ Optional<unsigned int> ArgsManager::GetArgFlags(const std::string& name) const return nullopt; } +std::optional<const ArgsManager::Command> ArgsManager::GetCommand() const +{ + Command ret; + LOCK(cs_args); + auto it = m_command.begin(); + if (it == m_command.end()) { + // No command was passed + return std::nullopt; + } + if (!m_accept_any_command) { + // The registered command + ret.command = *(it++); + } + while (it != m_command.end()) { + // The unregistered command and args (if any) + ret.args.push_back(*(it++)); + } + return ret; +} + std::vector<std::string> ArgsManager::GetArgs(const std::string& strArg) const { std::vector<std::string> result; @@ -504,8 +540,22 @@ void ArgsManager::ForceSetArg(const std::string& strArg, const std::string& strV m_settings.forced_settings[SettingName(strArg)] = strValue; } +void ArgsManager::AddCommand(const std::string& cmd, const std::string& help, const OptionsCategory& cat) +{ + Assert(cmd.find('=') == std::string::npos); + Assert(cmd.at(0) != '-'); + + LOCK(cs_args); + m_accept_any_command = false; // latch to false + std::map<std::string, Arg>& arg_map = m_available_args[cat]; + auto ret = arg_map.emplace(cmd, Arg{"", help, ArgsManager::COMMAND}); + Assert(ret.second); // Fail on duplicate commands +} + void ArgsManager::AddArg(const std::string& name, const std::string& help, unsigned int flags, const OptionsCategory& cat) { + Assert((flags & ArgsManager::COMMAND) == 0); // use AddCommand + // Split arg name from its help param size_t eq_index = name.find('='); if (eq_index == std::string::npos) { diff --git a/src/util/system.h b/src/util/system.h index 010fc5b49f..5959bc4196 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -35,8 +35,6 @@ #include <utility> #include <vector> -#include <boost/thread/condition_variable.hpp> // for boost::thread_interrupted - class UniValue; // Application startup time (used for uptime calculation) @@ -168,7 +166,7 @@ struct SectionInfo class ArgsManager { public: - enum Flags { + enum Flags : uint32_t { // Boolean options can accept negation syntax -noOPTION or -noOPTION=1 ALLOW_BOOL = 0x01, ALLOW_INT = 0x02, @@ -183,6 +181,7 @@ public: NETWORK_ONLY = 0x200, // This argument's value is sensitive (such as a password). SENSITIVE = 0x400, + COMMAND = 0x800, }; protected: @@ -195,9 +194,11 @@ protected: mutable RecursiveMutex cs_args; util::Settings m_settings GUARDED_BY(cs_args); + std::vector<std::string> m_command GUARDED_BY(cs_args); std::string m_network GUARDED_BY(cs_args); std::set<std::string> m_network_only_args GUARDED_BY(cs_args); std::map<OptionsCategory, std::map<std::string, Arg>> m_available_args GUARDED_BY(cs_args); + bool m_accept_any_command GUARDED_BY(cs_args){true}; std::list<SectionInfo> m_config_sections GUARDED_BY(cs_args); [[nodiscard]] bool ReadConfigStream(std::istream& stream, const std::string& filepath, std::string& error, bool ignore_invalid_keys = false); @@ -248,6 +249,20 @@ public: */ const std::list<SectionInfo> GetUnrecognizedSections() const; + struct Command { + /** The command (if one has been registered with AddCommand), or empty */ + std::string command; + /** + * If command is non-empty: Any args that followed it + * If command is empty: The unregistered command and any args that followed it + */ + std::vector<std::string> args; + }; + /** + * Get the command and command args (returns std::nullopt if no command provided) + */ + std::optional<const Command> GetCommand() const; + /** * Return a vector of strings of the given argument * @@ -334,6 +349,11 @@ public: void AddArg(const std::string& name, const std::string& help, unsigned int flags, const OptionsCategory& cat); /** + * Add subcommand + */ + void AddCommand(const std::string& cmd, const std::string& help, const OptionsCategory& cat); + + /** * Add many hidden arguments */ void AddHiddenArgs(const std::vector<std::string>& args); @@ -450,11 +470,6 @@ template <typename Callable> void TraceThread(const char* name, Callable func) func(); LogPrintf("%s thread exit\n", name); } - catch (const boost::thread_interrupted&) - { - LogPrintf("%s thread interrupt\n", name); - throw; - } catch (const std::exception& e) { PrintExceptionContinue(&e, name); throw; diff --git a/src/util/time.cpp b/src/util/time.cpp index e96972fe12..e6f0986a39 100644 --- a/src/util/time.cpp +++ b/src/util/time.cpp @@ -7,8 +7,11 @@ #include <config/bitcoin-config.h> #endif +#include <compat.h> #include <util/time.h> +#include <util/check.h> + #include <atomic> #include <boost/date_time/posix_time/posix_time.hpp> #include <ctime> @@ -18,7 +21,7 @@ void UninterruptibleSleep(const std::chrono::microseconds& n) { std::this_thread::sleep_for(n); } -static std::atomic<int64_t> nMockTime(0); //!< For unit testing +static std::atomic<int64_t> nMockTime(0); //!< For testing int64_t GetTime() { @@ -30,6 +33,49 @@ int64_t GetTime() return now; } +bool ChronoSanityCheck() +{ + // std::chrono::system_clock.time_since_epoch and time_t(0) are not guaranteed + // to use the Unix epoch timestamp, prior to C++20, but in practice they almost + // certainly will. Any differing behavior will be assumed to be an error, unless + // certain platforms prove to consistently deviate, at which point we'll cope + // with it by adding offsets. + + // Create a new clock from time_t(0) and make sure that it represents 0 + // seconds from the system_clock's time_since_epoch. Then convert that back + // to a time_t and verify that it's the same as before. + const time_t time_t_epoch{}; + auto clock = std::chrono::system_clock::from_time_t(time_t_epoch); + if (std::chrono::duration_cast<std::chrono::seconds>(clock.time_since_epoch()).count() != 0) { + return false; + } + + time_t time_val = std::chrono::system_clock::to_time_t(clock); + if (time_val != time_t_epoch) { + return false; + } + + // Check that the above zero time is actually equal to the known unix timestamp. + struct tm epoch; +#ifdef HAVE_GMTIME_R + if (gmtime_r(&time_val, &epoch) == nullptr) { +#else + if (gmtime_s(&epoch, &time_val) != 0) { +#endif + return false; + } + + if ((epoch.tm_sec != 0) || + (epoch.tm_min != 0) || + (epoch.tm_hour != 0) || + (epoch.tm_mday != 1) || + (epoch.tm_mon != 0) || + (epoch.tm_year != 70)) { + return false; + } + return true; +} + template <typename T> T GetTime() { @@ -44,35 +90,43 @@ template std::chrono::seconds GetTime(); template std::chrono::milliseconds GetTime(); template std::chrono::microseconds GetTime(); +template <typename T> +static T GetSystemTime() +{ + const auto now = std::chrono::duration_cast<T>(std::chrono::system_clock::now().time_since_epoch()); + assert(now.count() > 0); + return now; +} + void SetMockTime(int64_t nMockTimeIn) { + Assert(nMockTimeIn >= 0); nMockTime.store(nMockTimeIn, std::memory_order_relaxed); } -int64_t GetMockTime() +void SetMockTime(std::chrono::seconds mock_time_in) +{ + nMockTime.store(mock_time_in.count(), std::memory_order_relaxed); +} + +std::chrono::seconds GetMockTime() { - return nMockTime.load(std::memory_order_relaxed); + return std::chrono::seconds(nMockTime.load(std::memory_order_relaxed)); } int64_t GetTimeMillis() { - int64_t now = (boost::posix_time::microsec_clock::universal_time() - - boost::posix_time::ptime(boost::gregorian::date(1970,1,1))).total_milliseconds(); - assert(now > 0); - return now; + return int64_t{GetSystemTime<std::chrono::milliseconds>().count()}; } int64_t GetTimeMicros() { - int64_t now = (boost::posix_time::microsec_clock::universal_time() - - boost::posix_time::ptime(boost::gregorian::date(1970,1,1))).total_microseconds(); - assert(now > 0); - return now; + return int64_t{GetSystemTime<std::chrono::microseconds>().count()}; } int64_t GetSystemTimeInSeconds() { - return GetTimeMicros()/1000000; + return int64_t{GetSystemTime<std::chrono::seconds>().count()}; } std::string FormatISO8601DateTime(int64_t nTime) { @@ -114,3 +168,16 @@ int64_t ParseISO8601DateTime(const std::string& str) return 0; return (ptime - epoch).total_seconds(); } + +struct timeval MillisToTimeval(int64_t nTimeout) +{ + struct timeval timeout; + timeout.tv_sec = nTimeout / 1000; + timeout.tv_usec = (nTimeout % 1000) * 1000; + return timeout; +} + +struct timeval MillisToTimeval(std::chrono::milliseconds ms) +{ + return MillisToTimeval(count_milliseconds(ms)); +} diff --git a/src/util/time.h b/src/util/time.h index c69f604dc6..56131ce0fe 100644 --- a/src/util/time.h +++ b/src/util/time.h @@ -6,6 +6,8 @@ #ifndef BITCOIN_UTIL_TIME_H #define BITCOIN_UTIL_TIME_H +#include <compat.h> + #include <chrono> #include <stdint.h> #include <string> @@ -25,6 +27,7 @@ void UninterruptibleSleep(const std::chrono::microseconds& n); * interface that doesn't support std::chrono (e.g. RPC, debug log, or the GUI) */ inline int64_t count_seconds(std::chrono::seconds t) { return t.count(); } +inline int64_t count_milliseconds(std::chrono::milliseconds t) { return t.count(); } inline int64_t count_microseconds(std::chrono::microseconds t) { return t.count(); } /** @@ -40,10 +43,19 @@ int64_t GetTimeMicros(); /** Returns the system time (not mockable) */ int64_t GetSystemTimeInSeconds(); // Like GetTime(), but not mockable -/** For testing. Set e.g. with the setmocktime rpc, or -mocktime argument */ +/** + * DEPRECATED + * Use SetMockTime with chrono type + * + * @param[in] nMockTimeIn Time in seconds. + */ void SetMockTime(int64_t nMockTimeIn); + +/** For testing. Set e.g. with the setmocktime rpc, or -mocktime argument */ +void SetMockTime(std::chrono::seconds mock_time_in); + /** For testing */ -int64_t GetMockTime(); +std::chrono::seconds GetMockTime(); /** Return system time (or mocked time, if set) */ template <typename T> @@ -57,4 +69,17 @@ std::string FormatISO8601DateTime(int64_t nTime); std::string FormatISO8601Date(int64_t nTime); int64_t ParseISO8601DateTime(const std::string& str); +/** + * Convert milliseconds to a struct timeval for e.g. select. + */ +struct timeval MillisToTimeval(int64_t nTimeout); + +/** + * Convert milliseconds to a struct timeval for e.g. select. + */ +struct timeval MillisToTimeval(std::chrono::milliseconds ms); + +/** Sanity check epoch match normal Unix epoch */ +bool ChronoSanityCheck(); + #endif // BITCOIN_UTIL_TIME_H diff --git a/src/validation.cpp b/src/validation.cpp index eeb99f0f0a..1b8fb1fd45 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -17,9 +17,11 @@ #include <cuckoocache.h> #include <flatfile.h> #include <hash.h> +#include <index/blockfilterindex.h> #include <index/txindex.h> #include <logging.h> #include <logging/timer.h> +#include <node/coinstats.h> #include <node/ui_interface.h> #include <optional.h> #include <policy/policy.h> @@ -167,17 +169,19 @@ namespace { std::set<int> setDirtyFileInfo; } // anon namespace -CBlockIndex* LookupBlockIndex(const uint256& hash) +CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) { AssertLockHeld(cs_main); - BlockMap::const_iterator it = g_chainman.BlockIndex().find(hash); - return it == g_chainman.BlockIndex().end() ? nullptr : it->second; + assert(std::addressof(g_chainman.BlockIndex()) == std::addressof(m_block_index)); + BlockMap::const_iterator it = m_block_index.find(hash); + return it == m_block_index.end() ? nullptr : it->second; } -CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) +CBlockIndex* BlockManager::FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) { AssertLockHeld(cs_main); + assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this)); // Find the latest block common to locator and chain - we expect that // locator.vHave is sorted descending by height. for (const uint256& hash : locator.vHave) { @@ -200,9 +204,10 @@ static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false); static FlatFileSeq BlockFileSeq(); static FlatFileSeq UndoFileSeq(); -bool CheckFinalTx(const CTransaction &tx, int flags) +bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags) { AssertLockHeld(cs_main); + assert(std::addressof(*::ChainActive().Tip()) == std::addressof(*active_chain_tip)); // By convention a negative value for flags indicates that the // current network-enforced consensus rules should be used. In @@ -218,7 +223,7 @@ bool CheckFinalTx(const CTransaction &tx, int flags) // evaluated is what is used. Thus if we want to know if a // transaction can be part of the *next* block, we need to call // IsFinalTx() with one more than ::ChainActive().Height(). - const int nBlockHeight = ::ChainActive().Height() + 1; + const int nBlockHeight = active_chain_tip->nHeight + 1; // BIP113 requires that time-locked transactions have nLockTime set to // less than the median time of the previous block they're contained in. @@ -226,13 +231,13 @@ bool CheckFinalTx(const CTransaction &tx, int flags) // chain tip, so we use that to calculate the median time passed to // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set. const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST) - ? ::ChainActive().Tip()->GetMedianTimePast() + ? active_chain_tip->GetMedianTimePast() : GetAdjustedTime(); return IsFinalTx(tx, nBlockHeight, nBlockTime); } -bool TestLockPointValidity(const LockPoints* lp) +bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp) { AssertLockHeld(cs_main); assert(lp); @@ -241,7 +246,8 @@ bool TestLockPointValidity(const LockPoints* lp) if (lp->maxInputBlock) { // Check whether ::ChainActive() is an extension of the block at which the LockPoints // calculation was valid. If not LockPoints are no longer valid - if (!::ChainActive().Contains(lp->maxInputBlock)) { + assert(std::addressof(::ChainActive()) == std::addressof(active_chain)); + if (!active_chain.Contains(lp->maxInputBlock)) { return false; } } @@ -250,22 +256,28 @@ bool TestLockPointValidity(const LockPoints* lp) return true; } -bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp, bool useExistingLockPoints) +bool CheckSequenceLocks(CChainState& active_chainstate, + const CTxMemPool& pool, + const CTransaction& tx, + int flags, + LockPoints* lp, + bool useExistingLockPoints) { AssertLockHeld(cs_main); AssertLockHeld(pool.cs); + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); - CBlockIndex* tip = ::ChainActive().Tip(); + CBlockIndex* tip = active_chainstate.m_chain.Tip(); assert(tip != nullptr); CBlockIndex index; index.pprev = tip; - // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate + // CheckSequenceLocks() uses active_chainstate.m_chain.Height()+1 to evaluate // height based locks because when SequenceLocks() is called within // ConnectBlock(), the height of the block *being* // evaluated is what is used. // Thus if we want to know if a transaction can be part of the - // *next* block, we need to use one more than ::ChainActive().Height() + // *next* block, we need to use one more than active_chainstate.m_chain.Height() index.nHeight = tip->nHeight + 1; std::pair<int, int64_t> lockPair; @@ -275,8 +287,8 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag lockPair.second = lp->time; } else { - // CoinsTip() contains the UTXO set for ::ChainActive().Tip() - CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool); + // CoinsTip() contains the UTXO set for active_chainstate.m_chain.Tip() + CCoinsViewMemPool viewMemPool(&active_chainstate.CoinsTip(), pool); std::vector<int> prevheights; prevheights.resize(tx.vin.size()); for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { @@ -325,7 +337,7 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag // Returns the script flags which should be checked for a given block static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams); -static void LimitMempoolSize(CTxMemPool& pool, size_t limit, std::chrono::seconds age) +static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age) EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main) { int expired = pool.Expire(GetTime<std::chrono::seconds>() - age); @@ -335,18 +347,20 @@ static void LimitMempoolSize(CTxMemPool& pool, size_t limit, std::chrono::second std::vector<COutPoint> vNoSpendsRemaining; pool.TrimToSize(limit, &vNoSpendsRemaining); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_cache)); for (const COutPoint& removed : vNoSpendsRemaining) - ::ChainstateActive().CoinsTip().Uncache(removed); + coins_cache.Uncache(removed); } -static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); - if (::ChainstateActive().IsInitialBlockDownload()) + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + if (active_chainstate.IsInitialBlockDownload()) return false; - if (::ChainActive().Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE)) + if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE)) return false; - if (::ChainActive().Height() < pindexBestHeader->nHeight - 1) + if (active_chainstate.m_chain.Height() < pindexBestHeader->nHeight - 1) return false; return true; } @@ -364,10 +378,11 @@ static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main) * and instead just erase from the mempool as needed. */ -static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs) +static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs) { AssertLockHeld(cs_main); AssertLockHeld(mempool.cs); + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); std::vector<uint256> vHashUpdate; // disconnectpool's insertion_order index sorts the entries from // oldest to newest, but the oldest entry will be the last tx from the @@ -378,10 +393,8 @@ static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransact auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin(); while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) { // ignore validation errors in resurrected transactions - TxValidationState stateDummy; if (!fAddToMempool || (*it)->IsCoinBase() || - !AcceptToMemoryPool(mempool, stateDummy, *it, - nullptr /* plTxnReplaced */, true /* bypass_limits */)) { + AcceptToMemoryPool(active_chainstate, mempool, *it, true /* bypass_limits */).m_result_type != MempoolAcceptResult::ResultType::VALID) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); @@ -399,9 +412,9 @@ static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransact mempool.UpdateTransactionsFromBlock(vHashUpdate); // We also need to remove any now-immature transactions - mempool.removeForReorg(&::ChainstateActive().CoinsTip(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + mempool.removeForReorg(active_chainstate, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions - LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); + LimitMempoolSize(mempool, active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); } /** @@ -411,7 +424,7 @@ static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransact * */ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool, - unsigned int flags, PrecomputedTransactionData& txdata) + unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { AssertLockHeld(cs_main); @@ -436,7 +449,8 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationS assert(txFrom->vout.size() > txin.prevout.n); assert(txFrom->vout[txin.prevout.n] == coin.out); } else { - const Coin& coinFromUTXOSet = ::ChainstateActive().CoinsTip().AccessCoin(txin.prevout); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_tip)); + const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout); assert(!coinFromUTXOSet.IsSpent()); assert(coinFromUTXOSet.out == coin.out); } @@ -451,19 +465,19 @@ namespace { class MemPoolAccept { public: - explicit MemPoolAccept(CTxMemPool& mempool) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool), + explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate), m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)), m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000), m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)), - m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {} + m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) { + assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); + } // We put the arguments we're handed into a struct, so we can pass them // around easier. struct ATMPArgs { const CChainParams& m_chainparams; - TxValidationState &m_state; const int64_t m_accept_time; - std::list<CTransactionRef>* m_replaced_transactions; const bool m_bypass_limits; /* * Return any outpoints which were not previously present in the coins @@ -474,11 +488,10 @@ public: */ std::vector<COutPoint>& m_coins_to_uncache; const bool m_test_accept; - CAmount* m_fee_out; }; // Single transaction acceptance - bool AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); private: // All the intermediate state that gets passed between the various levels @@ -489,14 +502,17 @@ private: CTxMemPool::setEntries m_all_conflicting; CTxMemPool::setEntries m_ancestors; std::unique_ptr<CTxMemPoolEntry> m_entry; + std::list<CTransactionRef> m_replaced_transactions; bool m_replacement_transaction; + CAmount m_base_fees; CAmount m_modified_fees; CAmount m_conflicting_fees; size_t m_conflicting_size; const CTransactionRef& m_ptx; const uint256& m_hash; + TxValidationState m_state; }; // Run the policy checks on a given transaction, excluding any script checks. @@ -507,18 +523,18 @@ private: // Run the script checks using our policy flags. As this can be slow, we should // only invoke this on transactions that have otherwise passed policy checks. - bool PolicyScriptChecks(ATMPArgs& args, const Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); + bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Re-run the script checks, using consensus flags, and try to cache the // result in the scriptcache. This should be done after // PolicyScriptChecks(). This requires that all inputs either be in our // utxo set or in the mempool. - bool ConsensusScriptChecks(ATMPArgs& args, const Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); + bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Try to add the transaction to the mempool, removing any conflicts first. // Returns true if the transaction is in the mempool after any size // limiting is performed, false otherwise. - bool Finalize(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); + bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Compare a package's feerate against minimum allowed. bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs) @@ -540,6 +556,8 @@ private: CCoinsViewMemPool m_viewmempool; CCoinsView m_dummy; + CChainState& m_active_chainstate; + // The package limits in effect at the time of invocation. const size_t m_limit_ancestors; const size_t m_limit_ancestor_size; @@ -556,12 +574,12 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) const uint256& hash = ws.m_hash; // Copy/alias what we need out of args - TxValidationState &state = args.m_state; const int64_t nAcceptTime = args.m_accept_time; const bool bypass_limits = args.m_bypass_limits; std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache; // Alias what we need out of ws + TxValidationState& state = ws.m_state; std::set<uint256>& setConflicts = ws.m_conflicts; CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting; CTxMemPool::setEntries& setAncestors = ws.m_ancestors; @@ -594,7 +612,8 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Only accept nLockTime-using transactions that can be mined in the next // block; we don't want our mempool filled up with transactions that can't // be mined yet. - if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) + assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); + if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final"); // is it already in the memory pool? @@ -642,7 +661,8 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) LockPoints lp; m_view.SetBackend(m_viewmempool); - const CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip(); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); + const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip(); // do all inputs exist? for (const CTxIn& txin : tx.vin) { if (!coins_cache.HaveCoinInCache(txin.prevout)) { @@ -678,22 +698,19 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // be mined yet. // Must keep pool.cs for this unless we change CheckSequenceLocks to take a // CoinsViewCache instead of create its own - if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) + assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); + if (!CheckSequenceLocks(m_active_chainstate, m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final"); - CAmount nFees = 0; - if (!Consensus::CheckTxInputs(tx, state, m_view, GetSpendHeight(m_view), nFees)) { + assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_active_chainstate.m_blockman)); + if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) { return false; // state filled in by CheckTxInputs } - // If fee_out is passed, return the fee to the caller - if (args.m_fee_out) { - *args.m_fee_out = nFees; - } - // Check for non-standard pay-to-script-hash in inputs const auto& params = args.m_chainparams.GetConsensus(); - auto taproot_state = VersionBitsState(::ChainActive().Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache); + assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); + auto taproot_state = VersionBitsState(m_active_chainstate.m_chain.Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache); if (fRequireStandard && !AreInputsStandard(tx, m_view, taproot_state == ThresholdState::ACTIVE)) { return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); } @@ -705,7 +722,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS); // nModifiedFees includes any fee deltas from PrioritiseTransaction - nModifiedFees = nFees; + nModifiedFees = ws.m_base_fees; m_pool.ApplyDelta(hash, nModifiedFees); // Keep track of transactions that spend a coinbase, which we re-scan @@ -719,7 +736,8 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) } } - entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime, ::ChainActive().Height(), + assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); + entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(), fSpendsCoinbase, nSigOpsCost, lp)); unsigned int nSize = entry->GetTxSize(); @@ -923,11 +941,10 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) return true; } -bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, const Workspace& ws, PrecomputedTransactionData& txdata) +bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) { const CTransaction& tx = *ws.m_ptx; - - TxValidationState &state = args.m_state; + TxValidationState& state = ws.m_state; constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; @@ -950,12 +967,11 @@ bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, const Workspace& ws, Prec return true; } -bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, const Workspace& ws, PrecomputedTransactionData& txdata) +bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) { const CTransaction& tx = *ws.m_ptx; const uint256& hash = ws.m_hash; - - TxValidationState &state = args.m_state; + TxValidationState& state = ws.m_state; const CChainParams& chainparams = args.m_chainparams; // Check again against the current block tip's script verification @@ -973,8 +989,10 @@ bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, const Workspace& ws, P // There is a similar check in CreateNewBlock() to prevent creating // invalid blocks (using TestBlockValidity), however allowing such // transactions into the mempool can be exploited as a DoS attack. - unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus()); - if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata)) { + assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); + unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus()); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); + if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata, m_active_chainstate.CoinsTip())) { return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s", __func__, hash.ToString(), state.ToString()); } @@ -982,11 +1000,11 @@ bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, const Workspace& ws, P return true; } -bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws) +bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws) { const CTransaction& tx = *ws.m_ptx; const uint256& hash = ws.m_hash; - TxValidationState &state = args.m_state; + TxValidationState& state = ws.m_state; const bool bypass_limits = args.m_bypass_limits; CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting; @@ -1005,8 +1023,7 @@ bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws) hash.ToString(), FormatMoney(nModifiedFees - nConflictingFees), (int)entry->GetTxSize() - (int)nConflictingSize); - if (args.m_replaced_transactions) - args.m_replaced_transactions->push_back(it->GetSharedTx()); + ws.m_replaced_transactions.push_back(it->GetSharedTx()); } m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED); @@ -1015,28 +1032,30 @@ bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws) // - it's not being re-added during a reorg which bypasses typical mempool fee limits // - the node is not behind // - the transaction is not dependent on any other transactions in the mempool - bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && m_pool.HasNoInputsOf(tx); + assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); + bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx); // Store transaction in memory m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation); // trim mempool and check if tx was trimmed if (!bypass_limits) { - LimitMempoolSize(m_pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); + LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); if (!m_pool.exists(hash)) return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full"); } return true; } -bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) +MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) { AssertLockHeld(cs_main); LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool()) - Workspace workspace(ptx); + Workspace ws(ptx); - if (!PreChecks(args, workspace)) return false; + if (!PreChecks(args, ws)) return MempoolAcceptResult(ws.m_state); // Only compute the precomputed transaction data if we need to verify // scripts (ie, other policy checks pass). We perform the inexpensive @@ -1044,51 +1063,56 @@ bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs // checks pass, to mitigate CPU exhaustion denial-of-service attacks. PrecomputedTransactionData txdata; - if (!PolicyScriptChecks(args, workspace, txdata)) return false; + if (!PolicyScriptChecks(args, ws, txdata)) return MempoolAcceptResult(ws.m_state); - if (!ConsensusScriptChecks(args, workspace, txdata)) return false; + if (!ConsensusScriptChecks(args, ws, txdata)) return MempoolAcceptResult(ws.m_state); // Tx was accepted, but not added - if (args.m_test_accept) return true; + if (args.m_test_accept) { + return MempoolAcceptResult(std::move(ws.m_replaced_transactions), ws.m_base_fees); + } - if (!Finalize(args, workspace)) return false; + if (!Finalize(args, ws)) return MempoolAcceptResult(ws.m_state); GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence()); - return true; + return MempoolAcceptResult(std::move(ws.m_replaced_transactions), ws.m_base_fees); } } // anon namespace /** (try to) add transaction to memory pool with a specified acceptance time **/ -static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx, - int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced, - bool bypass_limits, bool test_accept, CAmount* fee_out=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static MempoolAcceptResult AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, + CChainState& active_chainstate, + const CTransactionRef &tx, int64_t nAcceptTime, + bool bypass_limits, bool test_accept) + EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::vector<COutPoint> coins_to_uncache; - MemPoolAccept::ATMPArgs args { chainparams, state, nAcceptTime, plTxnReplaced, bypass_limits, coins_to_uncache, test_accept, fee_out }; - bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args); - if (!res) { + MemPoolAccept::ATMPArgs args { chainparams, nAcceptTime, bypass_limits, coins_to_uncache, test_accept }; + + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args); + if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { // Remove coins that were not present in the coins cache before calling ATMPW; // this is to prevent memory DoS in case we receive a large number of // invalid transactions that attempt to overrun the in-memory coins cache // (`CCoinsViewCache::cacheCoins`). for (const COutPoint& hashTx : coins_to_uncache) - ::ChainstateActive().CoinsTip().Uncache(hashTx); + active_chainstate.CoinsTip().Uncache(hashTx); } // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits BlockValidationState state_dummy; - ::ChainstateActive().FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC); - return res; + active_chainstate.FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC); + return result; } -bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx, - std::list<CTransactionRef>* plTxnReplaced, - bool bypass_limits, bool test_accept, CAmount* fee_out) +MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPool& pool, const CTransactionRef& tx, + bool bypass_limits, bool test_accept) { - const CChainParams& chainparams = Params(); - return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, GetTime(), plTxnReplaced, bypass_limits, test_accept, fee_out); + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + return AcceptToMemoryPoolWithTime(Params(), pool, active_chainstate, tx, GetTime(), bypass_limits, test_accept); } CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock) @@ -1262,8 +1286,8 @@ void CoinsViews::InitCache() } CChainState::CChainState(CTxMemPool& mempool, BlockManager& blockman, uint256 from_snapshot_blockhash) - : m_blockman(blockman), - m_mempool(mempool), + : m_mempool(mempool), + m_blockman(blockman), m_from_snapshot_blockhash(from_snapshot_blockhash) {} void CChainState::InitCoinsDB( @@ -1410,9 +1434,10 @@ bool CScriptCheck::operator()() { return VerifyScript(scriptSig, m_tx_out.scriptPubKey, witness, nFlags, CachingTransactionSignatureChecker(ptxTo, nIn, m_tx_out.nValue, cacheStore, *txdata), &error); } -int GetSpendHeight(const CCoinsViewCache& inputs) +int BlockManager::GetSpendHeight(const CCoinsViewCache& inputs) { - LOCK(cs_main); + AssertLockHeld(cs_main); + assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this)); CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock()); return pindexPrev->nHeight + 1; } @@ -2246,17 +2271,25 @@ bool CChainState::FlushStateToDisk( { bool fFlushForPrune = false; bool fDoFullFlush = false; + CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&m_mempool); LOCK(cs_LastBlockFile); if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { + // make sure we don't prune above the blockfilterindexes bestblocks + // pruning is height-based + int last_prune = m_chain.Height(); // last height we can prune + ForEachBlockFilterIndex([&](BlockFilterIndex& index) { + last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height)); + }); + if (nManualPruneHeight > 0) { LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH); - m_blockman.FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight, m_chain.Height()); + m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height()); } else { LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH); - m_blockman.FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight(), m_chain.Height(), IsInitialBlockDownload()); + m_blockman.FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload()); fCheckForPruning = false; } if (!setFilesToPrune.empty()) { @@ -2690,7 +2723,7 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChai if (!DisconnectTip(state, chainparams, &disconnectpool)) { // This is likely a fatal error, but keep the mempool consistent, // just in case. Only remove from the mempool in this case. - UpdateMempoolForReorg(m_mempool, disconnectpool, false); + UpdateMempoolForReorg(::ChainstateActive(), m_mempool, disconnectpool, false); // If we're unable to disconnect a block during normal operation, // then that is a failure of our local system -- we should abort @@ -2734,7 +2767,7 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChai // A system error occurred (disk space, database error, ...). // Make the mempool consistent with the current tip, just in case // any observers try to use it before shutdown. - UpdateMempoolForReorg(m_mempool, disconnectpool, false); + UpdateMempoolForReorg(::ChainstateActive(), m_mempool, disconnectpool, false); return false; } } else { @@ -2751,7 +2784,7 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChai if (fBlocksDisconnected) { // If any blocks were disconnected, disconnectpool may be non empty. Add // any disconnected transactions back to the mempool. - UpdateMempoolForReorg(m_mempool, disconnectpool, true); + UpdateMempoolForReorg(::ChainstateActive(), m_mempool, disconnectpool, true); } m_mempool.check(&CoinsTip()); @@ -2767,7 +2800,7 @@ static SynchronizationState GetSynchronizationState(bool init) return SynchronizationState::INIT_DOWNLOAD; } -static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) { +static bool NotifyHeaderTip(CChainState& chainstate) LOCKS_EXCLUDED(cs_main) { bool fNotify = false; bool fInitialBlockDownload = false; static CBlockIndex* pindexHeaderOld = nullptr; @@ -2778,7 +2811,8 @@ static bool NotifyHeaderTip() LOCKS_EXCLUDED(cs_main) { if (pindexHeader != pindexHeaderOld) { fNotify = true; - fInitialBlockDownload = ::ChainstateActive().IsInitialBlockDownload(); + assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate)); + fInitialBlockDownload = chainstate.IsInitialBlockDownload(); pindexHeaderOld = pindexHeader; } } @@ -2895,10 +2929,6 @@ bool CChainState::ActivateBestChain(BlockValidationState &state, const CChainPar return true; } -bool ActivateBestChain(BlockValidationState &state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock) { - return ::ChainstateActive().ActivateBestChain(state, chainparams, std::move(pblock)); -} - bool CChainState::PreciousBlock(BlockValidationState& state, const CChainParams& params, CBlockIndex *pindex) { { @@ -2991,7 +3021,7 @@ bool CChainState::InvalidateBlock(BlockValidationState& state, const CChainParam // transactions back to the mempool if disconnecting was successful, // and we're not doing a very deep invalidation (in which case // keeping the mempool up to date is probably futile anyway). - UpdateMempoolForReorg(m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret); + UpdateMempoolForReorg(::ChainstateActive(), m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret); if (!ret) return false; assert(invalid_walk_tip->pprev == m_chain.Tip()); @@ -3203,7 +3233,7 @@ static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int n bool finalize_undo = false; if (!fKnown) { - while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) { + while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) { // when the undo file is keeping up with the block file, we want to flush it explicitly // when it is lagging behind (more blocks arrive than are being connected), we let the // undo block write case handle it @@ -3219,7 +3249,7 @@ static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int n if ((int)nFile != nLastBlockFile) { if (!fKnown) { - LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString()); + LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString()); } FlushBlockFile(!fKnown, finalize_undo); nLastBlockFile = nFile; @@ -3398,14 +3428,14 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc return commitment; } -//! Returns last CBlockIndex* that is a checkpoint -static CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data) { const MapCheckpoints& checkpoints = data.mapCheckpoints; for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) { const uint256& hash = i.second; + assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this)); CBlockIndex* pindex = LookupBlockIndex(hash); if (pindex) { return pindex; @@ -3423,7 +3453,7 @@ static CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOC * in ConnectBlock(). * Note that -reindex-chainstate skips the validation that happens here! */ -static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidationState& state, BlockManager& blockman, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { assert(pindexPrev != nullptr); const int nHeight = pindexPrev->nHeight + 1; @@ -3438,7 +3468,8 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidatio // Don't accept any forks from the main chain prior to last checkpoint. // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our // BlockIndex(). - CBlockIndex* pcheckpoint = GetLastCheckpoint(params.Checkpoints()); + assert(std::addressof(g_chainman.m_blockman) == std::addressof(blockman)); + CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(params.Checkpoints()); if (pcheckpoint && nHeight < pcheckpoint->nHeight) { LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight); return state.Invalid(BlockValidationResult::BLOCK_CHECKPOINT, "bad-fork-prior-to-checkpoint"); @@ -3558,11 +3589,10 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS // Check for duplicate uint256 hash = block.GetHash(); BlockMap::iterator miSelf = m_block_index.find(hash); - CBlockIndex *pindex = nullptr; if (hash != chainparams.GetConsensus().hashGenesisBlock) { if (miSelf != m_block_index.end()) { // Block header is already known. - pindex = miSelf->second; + CBlockIndex* pindex = miSelf->second; if (ppindex) *ppindex = pindex; if (pindex->nStatus & BLOCK_FAILED_MASK) { @@ -3589,7 +3619,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS LogPrintf("ERROR: %s: prev block invalid\n", __func__); return state.Invalid(BlockValidationResult::BLOCK_INVALID_PREV, "bad-prevblk"); } - if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime())) + if (!ContextualCheckBlockHeader(block, state, *this, chainparams, pindexPrev, GetAdjustedTime())) return error("%s: Consensus::ContextualCheckBlockHeader: %s, %s", __func__, hash.ToString(), state.ToString()); /* Determine if this block descends from any block which has been found @@ -3631,8 +3661,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS } } } - if (pindex == nullptr) - pindex = AddToBlockIndex(block); + CBlockIndex* pindex = AddToBlockIndex(block); if (ppindex) *ppindex = pindex; @@ -3643,6 +3672,7 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS // Exposed wrapper for AcceptBlockHeader bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex) { + assert(std::addressof(::ChainstateActive()) == std::addressof(ActiveChainstate())); AssertLockNotHeld(cs_main); { LOCK(cs_main); @@ -3650,7 +3680,7 @@ bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& CBlockIndex *pindex = nullptr; // Use a temp pindex instead of ppindex to avoid a const_cast bool accepted = m_blockman.AcceptBlockHeader( header, state, chainparams, &pindex); - ::ChainstateActive().CheckBlockIndex(chainparams.GetConsensus()); + ActiveChainstate().CheckBlockIndex(chainparams.GetConsensus()); if (!accepted) { return false; @@ -3660,8 +3690,8 @@ bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& } } } - if (NotifyHeaderTip()) { - if (::ChainstateActive().IsInitialBlockDownload() && ppindex && *ppindex) { + if (NotifyHeaderTip(ActiveChainstate())) { + if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) { LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight); } } @@ -3773,6 +3803,7 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock> pblock, bool fForceProcessing, bool* fNewBlock) { AssertLockNotHeld(cs_main); + assert(std::addressof(::ChainstateActive()) == std::addressof(ActiveChainstate())); { CBlockIndex *pindex = nullptr; @@ -3788,7 +3819,7 @@ bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const s bool ret = CheckBlock(*pblock, state, chainparams.GetConsensus()); if (ret) { // Store to disk - ret = ::ChainstateActive().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock); + ret = ActiveChainstate().AcceptBlock(pblock, state, chainparams, &pindex, fForceProcessing, nullptr, fNewBlock); } if (!ret) { GetMainSignals().BlockChecked(*pblock, state); @@ -3796,20 +3827,27 @@ bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const s } } - NotifyHeaderTip(); + NotifyHeaderTip(ActiveChainstate()); BlockValidationState state; // Only used to report errors, not invalidity - ignore it - if (!::ChainstateActive().ActivateBestChain(state, chainparams, pblock)) + if (!ActiveChainstate().ActivateBestChain(state, chainparams, pblock)) return error("%s: ActivateBestChain failed (%s)", __func__, state.ToString()); return true; } -bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW, bool fCheckMerkleRoot) +bool TestBlockValidity(BlockValidationState& state, + const CChainParams& chainparams, + CChainState& chainstate, + const CBlock& block, + CBlockIndex* pindexPrev, + bool fCheckPOW, + bool fCheckMerkleRoot) { AssertLockHeld(cs_main); - assert(pindexPrev && pindexPrev == ::ChainActive().Tip()); - CCoinsViewCache viewNew(&::ChainstateActive().CoinsTip()); + assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate)); + assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip()); + CCoinsViewCache viewNew(&chainstate.CoinsTip()); uint256 block_hash(block.GetHash()); CBlockIndex indexDummy(block); indexDummy.pprev = pindexPrev; @@ -3817,13 +3855,14 @@ bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainpar indexDummy.phashBlock = &block_hash; // NOTE: CheckBlockHeader is called by CheckBlock - if (!ContextualCheckBlockHeader(block, state, chainparams, pindexPrev, GetAdjustedTime())) + assert(std::addressof(g_chainman.m_blockman) == std::addressof(chainstate.m_blockman)); + if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainparams, pindexPrev, GetAdjustedTime())) return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString()); if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) return error("%s: Consensus::CheckBlock: %s", __func__, state.ToString()); if (!ContextualCheckBlock(block, state, chainparams.GetConsensus(), pindexPrev)) return error("%s: Consensus::ContextualCheckBlock: %s", __func__, state.ToString()); - if (!::ChainstateActive().ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true)) + if (!chainstate.ConnectBlock(block, state, &indexDummy, viewNew, chainparams, true)) return false; assert(state.IsValid()); @@ -3925,7 +3964,7 @@ void PruneBlockFilesManual(int nManualPruneHeight) } } -void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, bool is_ibd) +void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd) { LOCK2(cs_main, cs_LastBlockFile); if (chain_tip_height < 0 || nPruneTarget == 0) { @@ -3935,7 +3974,7 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr return; } - unsigned int nLastBlockWeCanPrune = chain_tip_height - MIN_BLOCKS_TO_KEEP; + unsigned int nLastBlockWeCanPrune = std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP)); uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files // So we should leave a buffer under our target to account for another allocation @@ -3986,7 +4025,7 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr static FlatFileSeq BlockFileSeq() { - return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE); + return FlatFileSeq(GetBlocksDir(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE); } static FlatFileSeq UndoFileSeq() @@ -4152,7 +4191,8 @@ bool static LoadBlockIndexDB(ChainstateManager& chainman, const CChainParams& ch void CChainState::LoadMempool(const ArgsManager& args) { if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { - ::LoadMempool(m_mempool); + assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); + ::LoadMempool(m_mempool, *this); } m_mempool.SetIsLoaded(!ShutdownRequested()); } @@ -4169,7 +4209,7 @@ bool CChainState::LoadChainTip(const CChainParams& chainparams) } // Load pointer to end of best chain - CBlockIndex* pindex = LookupBlockIndex(coins_cache.GetBestBlock()); + CBlockIndex* pindex = m_blockman.LookupBlockIndex(coins_cache.GetBestBlock()); if (!pindex) { return false; } @@ -4599,7 +4639,7 @@ bool LoadGenesisBlock(const CChainParams& chainparams) return ::ChainstateActive().LoadGenesisBlock(chainparams); } -void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp) +void CChainState::LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp) { // Map of disk positions for blocks with unknown parent (only used for reindex) static std::multimap<uint256, FlatFilePos> mapBlocksUnknownParent; @@ -4648,7 +4688,8 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi { LOCK(cs_main); // detect out of order blocks, and store them for later - if (hash != chainparams.GetConsensus().hashGenesisBlock && !LookupBlockIndex(block.hashPrevBlock)) { + assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_blockman)); + if (hash != chainparams.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) { LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), block.hashPrevBlock.ToString()); if (dbp) @@ -4657,10 +4698,12 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi } // process in case the block isn't known yet - CBlockIndex* pindex = LookupBlockIndex(hash); + assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_blockman)); + CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash); if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) { BlockValidationState state; - if (::ChainstateActive().AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) { + assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); + if (AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) { nLoaded++; } if (state.IsError()) { @@ -4674,12 +4717,14 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi // Activate the genesis block so normal node progress can continue if (hash == chainparams.GetConsensus().hashGenesisBlock) { BlockValidationState state; + assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); if (!ActivateBestChain(state, chainparams, nullptr)) { break; } } - NotifyHeaderTip(); + assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); + NotifyHeaderTip(*this); // Recursively process earlier encountered successors of this block std::deque<uint256> queue; @@ -4697,7 +4742,8 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi head.ToString()); LOCK(cs_main); BlockValidationState dummy; - if (::ChainstateActive().AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr)) + assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); + if (AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr)) { nLoaded++; queue.push_back(pblockrecursive->GetHash()); @@ -4705,7 +4751,8 @@ void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFi } range.first++; mapBlocksUnknownParent.erase(it); - NotifyHeaderTip(); + assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); + NotifyHeaderTip(*this); } } } catch (const std::exception& e) { @@ -4975,7 +5022,7 @@ int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::D static const uint64_t MEMPOOL_DUMP_VERSION = 1; -bool LoadMempool(CTxMemPool& pool) +bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate) { const CChainParams& chainparams = Params(); int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; @@ -5013,13 +5060,11 @@ bool LoadMempool(CTxMemPool& pool) if (amountdelta) { pool.PrioritiseTransaction(tx->GetHash(), amountdelta); } - TxValidationState state; if (nTime > nNow - nExpiryTimeout) { LOCK(cs_main); - AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nTime, - nullptr /* plTxnReplaced */, false /* bypass_limits */, - false /* test_accept */); - if (state.IsValid()) { + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + if (AcceptToMemoryPoolWithTime(chainparams, pool, active_chainstate, tx, nTime, false /* bypass_limits */, + false /* test_accept */).m_result_type == MempoolAcceptResult::ResultType::VALID) { ++count; } else { // mempool may contain the transaction already, e.g. from @@ -5143,7 +5188,9 @@ double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pin } Optional<uint256> ChainstateManager::SnapshotBlockhash() const { - if (m_active_chainstate != nullptr) { + LOCK(::cs_main); + if (m_active_chainstate != nullptr && + !m_active_chainstate->m_from_snapshot_blockhash.IsNull()) { // If a snapshot chainstate exists, it will always be our active. return m_active_chainstate->m_from_snapshot_blockhash; } @@ -5152,6 +5199,7 @@ Optional<uint256> ChainstateManager::SnapshotBlockhash() const { std::vector<CChainState*> ChainstateManager::GetAll() { + LOCK(::cs_main); std::vector<CChainState*> out; if (!IsSnapshotValidated() && m_ibd_chainstate) { @@ -5187,19 +5235,311 @@ CChainState& ChainstateManager::InitializeChainstate(CTxMemPool& mempool, const return *to_modify; } +const AssumeutxoData* ExpectedAssumeutxo( + const int height, const CChainParams& chainparams) +{ + const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo(); + const auto assumeutxo_found = valid_assumeutxos_map.find(height); + + if (assumeutxo_found != valid_assumeutxos_map.end()) { + return &assumeutxo_found->second; + } + return nullptr; +} + +bool ChainstateManager::ActivateSnapshot( + CAutoFile& coins_file, + const SnapshotMetadata& metadata, + bool in_memory) +{ + uint256 base_blockhash = metadata.m_base_blockhash; + + if (this->SnapshotBlockhash()) { + LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n"); + return false; + } + + int64_t current_coinsdb_cache_size{0}; + int64_t current_coinstip_cache_size{0}; + + // Cache percentages to allocate to each chainstate. + // + // These particular percentages don't matter so much since they will only be + // relevant during snapshot activation; caches are rebalanced at the conclusion of + // this function. We want to give (essentially) all available cache capacity to the + // snapshot to aid the bulk load later in this function. + static constexpr double IBD_CACHE_PERC = 0.01; + static constexpr double SNAPSHOT_CACHE_PERC = 0.99; + + { + LOCK(::cs_main); + // Resize the coins caches to ensure we're not exceeding memory limits. + // + // Allocate the majority of the cache to the incoming snapshot chainstate, since + // (optimistically) getting to its tip will be the top priority. We'll need to call + // `MaybeRebalanceCaches()` once we're done with this function to ensure + // the right allocation (including the possibility that no snapshot was activated + // and that we should restore the active chainstate caches to their original size). + // + current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes; + current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes; + + // Temporarily resize the active coins cache to make room for the newly-created + // snapshot chain. + this->ActiveChainstate().ResizeCoinsCaches( + static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC), + static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC)); + } + + auto snapshot_chainstate = WITH_LOCK(::cs_main, return MakeUnique<CChainState>( + this->ActiveChainstate().m_mempool, m_blockman, base_blockhash)); + + { + LOCK(::cs_main); + snapshot_chainstate->InitCoinsDB( + static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC), + in_memory, false, "chainstate"); + snapshot_chainstate->InitCoinsCache( + static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC)); + } + + const bool snapshot_ok = this->PopulateAndValidateSnapshot( + *snapshot_chainstate, coins_file, metadata); + + if (!snapshot_ok) { + WITH_LOCK(::cs_main, this->MaybeRebalanceCaches()); + return false; + } + + { + LOCK(::cs_main); + assert(!m_snapshot_chainstate); + m_snapshot_chainstate.swap(snapshot_chainstate); + const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(::Params()); + assert(chaintip_loaded); + + m_active_chainstate = m_snapshot_chainstate.get(); + + LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString()); + LogPrintf("[snapshot] (%.2f MB)\n", + m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000)); + + this->MaybeRebalanceCaches(); + } + return true; +} + +bool ChainstateManager::PopulateAndValidateSnapshot( + CChainState& snapshot_chainstate, + CAutoFile& coins_file, + const SnapshotMetadata& metadata) +{ + // It's okay to release cs_main before we're done using `coins_cache` because we know + // that nothing else will be referencing the newly created snapshot_chainstate yet. + CCoinsViewCache& coins_cache = *WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip()); + + uint256 base_blockhash = metadata.m_base_blockhash; + + COutPoint outpoint; + Coin coin; + const uint64_t coins_count = metadata.m_coins_count; + uint64_t coins_left = metadata.m_coins_count; + + LogPrintf("[snapshot] loading coins from snapshot %s\n", base_blockhash.ToString()); + int64_t flush_now{0}; + int64_t coins_processed{0}; + + while (coins_left > 0) { + try { + coins_file >> outpoint; + } catch (const std::ios_base::failure&) { + LogPrintf("[snapshot] bad snapshot - no coins left after deserializing %d coins\n", + coins_count - coins_left); + return false; + } + coins_file >> coin; + coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin)); + + --coins_left; + ++coins_processed; + + if (coins_processed % 1000000 == 0) { + LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n", + coins_processed, + static_cast<float>(coins_processed) * 100 / static_cast<float>(coins_count), + coins_cache.DynamicMemoryUsage() / (1000 * 1000)); + } + + // Batch write and flush (if we need to) every so often. + // + // If our average Coin size is roughly 41 bytes, checking every 120,000 coins + // means <5MB of memory imprecision. + if (coins_processed % 120000 == 0) { + if (ShutdownRequested()) { + return false; + } + + const auto snapshot_cache_state = WITH_LOCK(::cs_main, + return snapshot_chainstate.GetCoinsCacheSizeState(&snapshot_chainstate.m_mempool)); + + if (snapshot_cache_state >= + CoinsCacheSizeState::CRITICAL) { + LogPrintf("[snapshot] flushing coins cache (%.2f MB)... ", /* Continued */ + coins_cache.DynamicMemoryUsage() / (1000 * 1000)); + flush_now = GetTimeMillis(); + + // This is a hack - we don't know what the actual best block is, but that + // doesn't matter for the purposes of flushing the cache here. We'll set this + // to its correct value (`base_blockhash`) below after the coins are loaded. + coins_cache.SetBestBlock(GetRandHash()); + + coins_cache.Flush(); + LogPrintf("done (%.2fms)\n", GetTimeMillis() - flush_now); + } + } + } + + // Important that we set this. This and the coins_cache accesses above are + // sort of a layer violation, but either we reach into the innards of + // CCoinsViewCache here or we have to invert some of the CChainState to + // embed them in a snapshot-activation-specific CCoinsViewCache bulk load + // method. + coins_cache.SetBestBlock(base_blockhash); + + bool out_of_coins{false}; + try { + coins_file >> outpoint; + } catch (const std::ios_base::failure&) { + // We expect an exception since we should be out of coins. + out_of_coins = true; + } + if (!out_of_coins) { + LogPrintf("[snapshot] bad snapshot - coins left over after deserializing %d coins\n", + coins_count); + return false; + } + + LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n", + coins_count, + coins_cache.DynamicMemoryUsage() / (1000 * 1000), + base_blockhash.ToString()); + + LogPrintf("[snapshot] flushing snapshot chainstate to disk\n"); + // No need to acquire cs_main since this chainstate isn't being used yet. + coins_cache.Flush(); // TODO: if #17487 is merged, add erase=false here for better performance. + + assert(coins_cache.GetBestBlock() == base_blockhash); + + CCoinsStats stats; + auto breakpoint_fnc = [] { /* TODO insert breakpoint here? */ }; + + // As above, okay to immediately release cs_main here since no other context knows + // about the snapshot_chainstate. + CCoinsViewDB* snapshot_coinsdb = WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB()); + + if (!GetUTXOStats(snapshot_coinsdb, stats, CoinStatsHashType::HASH_SERIALIZED, breakpoint_fnc)) { + LogPrintf("[snapshot] failed to generate coins stats\n"); + return false; + } + + // Ensure that the base blockhash appears in the known chain of valid headers. We're willing to + // wait a bit here because the snapshot may have been loaded on startup, before we've + // received headers from the network. + + int max_secs_to_wait_for_headers = 60 * 10; + CBlockIndex* snapshot_start_block = nullptr; + + while (max_secs_to_wait_for_headers > 0) { + snapshot_start_block = WITH_LOCK(::cs_main, + return m_blockman.LookupBlockIndex(base_blockhash)); + --max_secs_to_wait_for_headers; + + if (!snapshot_start_block) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } else { + break; + } + } + + if (snapshot_start_block == nullptr) { + LogPrintf("[snapshot] timed out waiting for snapshot start blockheader %s\n", + base_blockhash.ToString()); + return false; + } + + // Assert that the deserialized chainstate contents match the expected assumeutxo value. + + int base_height = snapshot_start_block->nHeight; + auto maybe_au_data = ExpectedAssumeutxo(base_height, ::Params()); + + if (!maybe_au_data) { + LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " /* Continued */ + "(%d) - refusing to load snapshot\n", base_height); + return false; + } + + const AssumeutxoData& au_data = *maybe_au_data; + + if (stats.hashSerialized != au_data.hash_serialized) { + LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n", + au_data.hash_serialized.ToString(), stats.hashSerialized.ToString()); + return false; + } + + snapshot_chainstate.m_chain.SetTip(snapshot_start_block); + + // The remainder of this function requires modifying data protected by cs_main. + LOCK(::cs_main); + + // Fake various pieces of CBlockIndex state: + // + // - nChainTx: so that we accurately report IBD-to-tip progress + // - nTx: so that LoadBlockIndex() loads assumed-valid CBlockIndex entries + // (among other things) + // - nStatus & BLOCK_OPT_WITNESS: so that RewindBlockIndex() doesn't zealously + // unwind the assumed-valid chain. + // + CBlockIndex* index = nullptr; + for (int i = 0; i <= snapshot_chainstate.m_chain.Height(); ++i) { + index = snapshot_chainstate.m_chain[i]; + + if (!index->nTx) { + index->nTx = 1; + } + index->nChainTx = index->pprev ? index->pprev->nChainTx + index->nTx : 1; + + // We need to fake this flag so that CChainState::RewindBlockIndex() + // won't try to rewind the entire assumed-valid chain on startup. + if (index->pprev && ::IsWitnessEnabled(index->pprev, ::Params().GetConsensus())) { + index->nStatus |= BLOCK_OPT_WITNESS; + } + } + + assert(index); + index->nChainTx = metadata.m_nchaintx; + snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block); + + LogPrintf("[snapshot] validated snapshot (%.2f MB)\n", + coins_cache.DynamicMemoryUsage() / (1000 * 1000)); + return true; +} + CChainState& ChainstateManager::ActiveChainstate() const { + LOCK(::cs_main); assert(m_active_chainstate); return *m_active_chainstate; } bool ChainstateManager::IsSnapshotActive() const { + LOCK(::cs_main); return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get(); } CChainState& ChainstateManager::ValidatedChainstate() const { + LOCK(::cs_main); if (m_snapshot_chainstate && IsSnapshotValidated()) { return *m_snapshot_chainstate.get(); } @@ -5209,6 +5549,7 @@ CChainState& ChainstateManager::ValidatedChainstate() const bool ChainstateManager::IsBackgroundIBD(CChainState* chainstate) const { + LOCK(::cs_main); return (m_snapshot_chainstate && chainstate == m_ibd_chainstate.get()); } @@ -5224,6 +5565,7 @@ void ChainstateManager::Unload() void ChainstateManager::Reset() { + LOCK(::cs_main); m_ibd_chainstate.reset(); m_snapshot_chainstate.reset(); m_active_chainstate = nullptr; diff --git a/src/validation.h b/src/validation.h index d3fbabe1a2..4e4bdbea54 100644 --- a/src/validation.h +++ b/src/validation.h @@ -11,9 +11,12 @@ #endif #include <amount.h> +#include <attributes.h> #include <coins.h> +#include <consensus/validation.h> #include <crypto/common.h> // for ReadLE64 #include <fs.h> +#include <node/utxo_snapshot.h> #include <optional.h> #include <policy/feerate.h> #include <protocol.h> // For CMessageHeader::MessageStartChars @@ -23,6 +26,7 @@ #include <txdb.h> #include <versionbits.h> #include <serialize.h> +#include <util/check.h> #include <util/hasher.h> #include <atomic> @@ -40,17 +44,18 @@ class CBlockIndex; class CBlockTreeDB; class CBlockUndo; class CChainParams; +struct CCheckpointData; class CInv; class CConnman; class CScriptCheck; class CTxMemPool; class ChainstateManager; -class TxValidationState; struct ChainTxData; struct DisconnectedBlockTransactions; struct PrecomputedTransactionData; struct LockPoints; +struct AssumeutxoData; /** Default for -minrelaytxfee, minimum relay fee for transactions */ static const unsigned int DEFAULT_MIN_RELAY_TX_FEE = 1000; @@ -143,8 +148,6 @@ extern const std::vector<std::string> CHECKLEVEL_DOC; FILE* OpenBlockFile(const FlatFilePos &pos, bool fReadOnly = false); /** Translation to a filesystem path */ fs::path GetBlockPosFilename(const FlatFilePos &pos); -/** Import blocks from an external file */ -void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp = nullptr); /** Ensures we have a genesis block in the block tree, possibly writing one to disk. */ bool LoadGenesisBlock(const CChainParams& chainparams); /** Unload database information */ @@ -166,13 +169,6 @@ void StopScriptCheckWorkerThreads(); * @returns The tx if found, otherwise nullptr */ CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock); -/** - * Find the best known block, and make it the tip of the block chain - * - * May not be called with cs_main held. May not be called in a - * validationinterface callback. - */ -bool ActivateBestChain(BlockValidationState& state, const CChainParams& chainparams, std::shared_ptr<const CBlock> pblock = std::shared_ptr<const CBlock>()); CAmount GetBlockSubsidy(int nHeight, const Consensus::Params& consensusParams); /** Guess verification progress (as a fraction between 0.0=genesis and 1.0=current tip). */ @@ -189,12 +185,46 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune); /** Prune block files up to a given height */ void PruneBlockFilesManual(int nManualPruneHeight); -/** (try to) add transaction to memory pool - * plTxnReplaced will be appended to with all transactions replaced from mempool - * @param[out] fee_out optional argument to return tx fee to the caller **/ -bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx, - std::list<CTransactionRef>* plTxnReplaced, - bool bypass_limits, bool test_accept=false, CAmount* fee_out=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +/** +* Validation result for a single transaction mempool acceptance. +*/ +struct MempoolAcceptResult { + /** Used to indicate the results of mempool validation, + * including the possibility of unfinished validation. + */ + enum class ResultType { + VALID, //!> Fully validated, valid. + INVALID, //!> Invalid. + }; + ResultType m_result_type; + TxValidationState m_state; + + // The following fields are only present when m_result_type = ResultType::VALID + /** Mempool transactions replaced by the tx per BIP 125 rules. */ + std::optional<std::list<CTransactionRef>> m_replaced_transactions; + /** Raw base fees. */ + std::optional<CAmount> m_base_fees; + + /** Constructor for failure case */ + explicit MempoolAcceptResult(TxValidationState state) + : m_result_type(ResultType::INVALID), + m_state(state), m_replaced_transactions(nullopt), m_base_fees(nullopt) { + Assume(!state.IsValid()); // Can be invalid or error + } + + /** Constructor for success case */ + explicit MempoolAcceptResult(std::list<CTransactionRef>&& replaced_txns, CAmount fees) + : m_result_type(ResultType::VALID), m_state(TxValidationState{}), + m_replaced_transactions(std::move(replaced_txns)), m_base_fees(fees) {} +}; + +/** + * (Try to) add a transaction to the memory pool. + * @param[in] bypass_limits When true, don't enforce mempool fee limits. + * @param[in] test_accept When true, run validation checks but don't submit to mempool. + */ +MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPool& pool, const CTransactionRef& tx, + bool bypass_limits, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Get the BIP9 state for a given deployment at the current tip. */ ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos); @@ -218,12 +248,12 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight); * * See consensus/consensus.h for flag definitions. */ -bool CheckFinalTx(const CTransaction &tx, int flags = -1) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags = -1) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** * Test whether the LockPoints height and time are still valid on the current chain */ -bool TestLockPointValidity(const LockPoints* lp) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** * Check if transaction will be BIP 68 final in the next block to be created. @@ -236,7 +266,12 @@ bool TestLockPointValidity(const LockPoints* lp) EXCLUSIVE_LOCKS_REQUIRED(cs_mai * * See consensus/consensus.h for flag definitions. */ -bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp = nullptr, bool useExistingLockPoints = false) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs); +bool CheckSequenceLocks(CChainState& active_chainstate, + const CTxMemPool& pool, + const CTransaction& tx, + int flags, + LockPoints* lp = nullptr, + bool useExistingLockPoints = false) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs); /** * Closure representing one script verification @@ -291,7 +326,13 @@ bool UndoReadFromDisk(CBlockUndo& blockundo, const CBlockIndex* pindex); bool CheckBlock(const CBlock& block, BlockValidationState& state, const Consensus::Params& consensusParams, bool fCheckPOW = true, bool fCheckMerkleRoot = true); /** Check a block is completely valid from start to finish (only works on top of our current best block) */ -bool TestBlockValidity(BlockValidationState& state, const CChainParams& chainparams, const CBlock& block, CBlockIndex* pindexPrev, bool fCheckPOW = true, bool fCheckMerkleRoot = true) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +bool TestBlockValidity(BlockValidationState& state, + const CChainParams& chainparams, + CChainState& chainstate, + const CBlock& block, + CBlockIndex* pindexPrev, + bool fCheckPOW = true, + bool fCheckMerkleRoot = true) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Check whether witness commitments are required for a block, and whether to enforce NULLDUMMY (BIP 147) rules. * Note that transaction witness validation rules are always enforced when P2SH is enforced. */ @@ -311,11 +352,6 @@ public: bool VerifyDB(const CChainParams& chainparams, CCoinsView *coinsview, int nCheckLevel, int nCheckDepth); }; -CBlockIndex* LookupBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - -/** Find the last common block between the parameter chain and a locator. */ -CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - enum DisconnectResult { DISCONNECT_OK, // All good. @@ -368,7 +404,7 @@ private: * * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned */ - void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, bool is_ibd); + void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd); public: BlockMap m_block_index GUARDED_BY(cs_main); @@ -433,6 +469,21 @@ public: const CChainParams& chainparams, CBlockIndex** ppindex) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + CBlockIndex* LookupBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Find the last common block between the parameter chain and a locator. */ + CBlockIndex* FindForkInGlobalIndex(const CChain& chain, const CBlockLocator& locator) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + //! Returns last CBlockIndex* that is a checkpoint + CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** + * Return the spend height, which is one more than the inputs.GetBestBlock(). + * While checking, GetBestBlock() refers to the parent block. (protected by cs_main) + * This is also true for mempool checks. + */ + int GetSpendHeight(const CCoinsViewCache& inputs) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + ~BlockManager() { Unload(); } @@ -525,11 +576,6 @@ protected: */ mutable std::atomic<bool> m_cached_finished_ibd{false}; - //! Reference to a BlockManager instance which itself is shared across all - //! CChainState instances. Keeping a local reference allows us to test more - //! easily as opposed to referencing a global. - BlockManager& m_blockman; - //! mempool that is kept in sync with the chain CTxMemPool& m_mempool; @@ -537,6 +583,10 @@ protected: std::unique_ptr<CoinsViews> m_coins_views; public: + //! Reference to a BlockManager instance which itself is shared across all + //! CChainState instances. + BlockManager& m_blockman; + explicit CChainState(CTxMemPool& mempool, BlockManager& blockman, uint256 from_snapshot_blockhash = uint256()); /** @@ -613,6 +663,9 @@ public: bool ResizeCoinsCaches(size_t coinstip_size, size_t coinsdb_size) EXCLUSIVE_LOCKS_REQUIRED(::cs_main); + /** Import blocks from an external file */ + void LoadExternalBlockFile(const CChainParams& chainparams, FILE* fileIn, FlatFilePos* dbp = nullptr); + /** * Update the on-disk chain state. * The caches and indexes are flushed depending on the mode we're called with @@ -638,9 +691,10 @@ public: void PruneAndFlush(); /** - * Make the best chain active, in multiple steps. The result is either failure - * or an activated best chain. pblock is either nullptr or a pointer to a block - * that is already loaded (to avoid loading it again from disk). + * Find the best known block, and make it the tip of the block chain. The + * result is either failure or an activated best chain. pblock is either + * nullptr or a pointer to a block that is already loaded (to avoid loading + * it again from disk). * * ActivateBestChain is split into steps (see ActivateBestChainStep) so that * we avoid holding cs_main for an extended period of time; the length of this @@ -654,7 +708,7 @@ public: bool ActivateBestChain( BlockValidationState& state, const CChainParams& chainparams, - std::shared_ptr<const CBlock> pblock) LOCKS_EXCLUDED(cs_main); + std::shared_ptr<const CBlock> pblock = nullptr) LOCKS_EXCLUDED(cs_main); bool AcceptBlock(const std::shared_ptr<const CBlock>& pblock, BlockValidationState& state, const CChainParams& chainparams, CBlockIndex** ppindex, bool fRequested, const FlatFilePos* dbp, bool* fNewBlock) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -784,45 +838,45 @@ private: //! using this pointer (e.g. net_processing). //! //! Once this pointer is set to a corresponding chainstate, it will not - //! be reset until init.cpp:Shutdown(). This means it is safe to acquire - //! the contents of this pointer with ::cs_main held, release the lock, - //! and then use the reference without concern of it being deconstructed. + //! be reset until init.cpp:Shutdown(). //! //! This is especially important when, e.g., calling ActivateBestChain() //! on all chainstates because we are not able to hold ::cs_main going into //! that call. - std::unique_ptr<CChainState> m_ibd_chainstate; + std::unique_ptr<CChainState> m_ibd_chainstate GUARDED_BY(::cs_main); //! A chainstate initialized on the basis of a UTXO snapshot. If this is //! non-null, it is always our active chainstate. //! //! Once this pointer is set to a corresponding chainstate, it will not - //! be reset until init.cpp:Shutdown(). This means it is safe to acquire - //! the contents of this pointer with ::cs_main held, release the lock, - //! and then use the reference without concern of it being deconstructed. + //! be reset until init.cpp:Shutdown(). //! //! This is especially important when, e.g., calling ActivateBestChain() //! on all chainstates because we are not able to hold ::cs_main going into //! that call. - std::unique_ptr<CChainState> m_snapshot_chainstate; + std::unique_ptr<CChainState> m_snapshot_chainstate GUARDED_BY(::cs_main); //! Points to either the ibd or snapshot chainstate; indicates our //! most-work chain. //! //! Once this pointer is set to a corresponding chainstate, it will not - //! be reset until init.cpp:Shutdown(). This means it is safe to acquire - //! the contents of this pointer with ::cs_main held, release the lock, - //! and then use the reference without concern of it being deconstructed. + //! be reset until init.cpp:Shutdown(). //! //! This is especially important when, e.g., calling ActivateBestChain() //! on all chainstates because we are not able to hold ::cs_main going into //! that call. - CChainState* m_active_chainstate{nullptr}; + CChainState* m_active_chainstate GUARDED_BY(::cs_main) {nullptr}; //! If true, the assumed-valid chainstate has been fully validated //! by the background validation chainstate. bool m_snapshot_validated{false}; + //! Internal helper for ActivateSnapshot(). + [[nodiscard]] bool PopulateAndValidateSnapshot( + CChainState& snapshot_chainstate, + CAutoFile& coins_file, + const SnapshotMetadata& metadata); + // For access to m_active_chainstate. friend CChainState& ChainstateActive(); friend CChain& ChainActive(); @@ -853,6 +907,22 @@ public: //! Get all chainstates currently being used. std::vector<CChainState*> GetAll(); + //! Construct and activate a Chainstate on the basis of UTXO snapshot data. + //! + //! Steps: + //! + //! - Initialize an unused CChainState. + //! - Load its `CoinsViews` contents from `coins_file`. + //! - Verify that the hash of the resulting coinsdb matches the expected hash + //! per assumeutxo chain parameters. + //! - Wait for our headers chain to include the base block of the snapshot. + //! - "Fast forward" the tip of the new chainstate to the base of the snapshot, + //! faking nTx* block index data along the way. + //! - Move the new chainstate to `m_snapshot_chainstate` and make it our + //! ChainstateActive(). + [[nodiscard]] bool ActivateSnapshot( + CAutoFile& coins_file, const SnapshotMetadata& metadata, bool in_memory); + //! The most-work chain. CChainState& ActiveChainstate() const; CChain& ActiveChain() const { return ActiveChainstate().m_chain; } @@ -945,13 +1015,6 @@ CChain& ChainActive(); /** Global variable that points to the active block tree (protected by cs_main) */ extern std::unique_ptr<CBlockTreeDB> pblocktree; -/** - * Return the spend height, which is one more than the inputs.GetBestBlock(). - * While checking, GetBestBlock() refers to the parent block. (protected by cs_main) - * This is also true for mempool checks. - */ -int GetSpendHeight(const CCoinsViewCache& inputs); - extern VersionBitsCache versionbitscache; /** @@ -966,7 +1029,7 @@ CBlockFileInfo* GetBlockFileInfo(size_t n); bool DumpMempool(const CTxMemPool& pool); /** Load the mempool from disk. */ -bool LoadMempool(CTxMemPool& pool); +bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate); //! Check whether the block associated with this index entry is pruned or not. inline bool IsBlockPruned(const CBlockIndex* pblockindex) @@ -974,4 +1037,13 @@ inline bool IsBlockPruned(const CBlockIndex* pblockindex) return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0); } +/** + * Return the expected assumeutxo value for a given height, if one exists. + * + * @param height[in] Get the assumeutxo value for this height. + * + * @returns empty if no assumeutxo configuration exists for the given height. + */ +const AssumeutxoData* ExpectedAssumeutxo(const int height, const CChainParams& params); + #endif // BITCOIN_VALIDATION_H diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp index c0d107bf39..ad40e6da9a 100644 --- a/src/wallet/bdb.cpp +++ b/src/wallet/bdb.cpp @@ -488,9 +488,9 @@ bool BerkeleyDatabase::Rewrite(const char* pszSkip) break; } if (pszSkip && - strncmp(ssKey.data(), pszSkip, std::min(ssKey.size(), strlen(pszSkip))) == 0) + strncmp((const char*)ssKey.data(), pszSkip, std::min(ssKey.size(), strlen(pszSkip))) == 0) continue; - if (strncmp(ssKey.data(), "\x07version", 8) == 0) { + if (strncmp((const char*)ssKey.data(), "\x07version", 8) == 0) { // Update version: ssValue.clear(); ssValue << CLIENT_VERSION; diff --git a/src/wallet/coinselection.cpp b/src/wallet/coinselection.cpp index a2dea84d17..10f89e3a6f 100644 --- a/src/wallet/coinselection.cpp +++ b/src/wallet/coinselection.cpp @@ -300,8 +300,26 @@ bool KnapsackSolver(const CAmount& nTargetValue, std::vector<OutputGroup>& group ******************************************************************************/ -void OutputGroup::Insert(const CInputCoin& output, int depth, bool from_me, size_t ancestors, size_t descendants) { +void OutputGroup::Insert(const CInputCoin& output, int depth, bool from_me, size_t ancestors, size_t descendants, bool positive_only) { + // Compute the effective value first + const CAmount coin_fee = output.m_input_bytes < 0 ? 0 : m_effective_feerate.GetFee(output.m_input_bytes); + const CAmount ev = output.txout.nValue - coin_fee; + + // Filter for positive only here before adding the coin + if (positive_only && ev <= 0) return; + m_outputs.push_back(output); + CInputCoin& coin = m_outputs.back(); + + coin.m_fee = coin_fee; + fee += coin.m_fee; + + coin.m_long_term_fee = coin.m_input_bytes < 0 ? 0 : m_long_term_feerate.GetFee(coin.m_input_bytes); + long_term_fee += coin.m_long_term_fee; + + coin.effective_value = ev; + effective_value += coin.effective_value; + m_from_me &= from_me; m_value += output.txout.nValue; m_depth = std::min(m_depth, depth); @@ -312,20 +330,6 @@ void OutputGroup::Insert(const CInputCoin& output, int depth, bool from_me, size // descendants is the count as seen from the top ancestor, not the descendants as seen from the // coin itself; thus, this value is counted as the max, not the sum m_descendants = std::max(m_descendants, descendants); - effective_value += output.effective_value; - fee += output.m_fee; - long_term_fee += output.m_long_term_fee; -} - -std::vector<CInputCoin>::iterator OutputGroup::Discard(const CInputCoin& output) { - auto it = m_outputs.begin(); - while (it != m_outputs.end() && it->outpoint != output.outpoint) ++it; - if (it == m_outputs.end()) return it; - m_value -= output.txout.nValue; - effective_value -= output.effective_value; - fee -= output.m_fee; - long_term_fee -= output.m_long_term_fee; - return m_outputs.erase(it); } bool OutputGroup::EligibleForSpending(const CoinEligibilityFilter& eligibility_filter) const @@ -334,35 +338,3 @@ bool OutputGroup::EligibleForSpending(const CoinEligibilityFilter& eligibility_f && m_ancestors <= eligibility_filter.max_ancestors && m_descendants <= eligibility_filter.max_descendants; } - -void OutputGroup::SetFees(const CFeeRate effective_feerate, const CFeeRate long_term_feerate) -{ - fee = 0; - long_term_fee = 0; - effective_value = 0; - for (CInputCoin& coin : m_outputs) { - coin.m_fee = coin.m_input_bytes < 0 ? 0 : effective_feerate.GetFee(coin.m_input_bytes); - fee += coin.m_fee; - - coin.m_long_term_fee = coin.m_input_bytes < 0 ? 0 : long_term_feerate.GetFee(coin.m_input_bytes); - long_term_fee += coin.m_long_term_fee; - - coin.effective_value = coin.txout.nValue - coin.m_fee; - effective_value += coin.effective_value; - } -} - -OutputGroup OutputGroup::GetPositiveOnlyGroup() -{ - OutputGroup group(*this); - for (auto it = group.m_outputs.begin(); it != group.m_outputs.end(); ) { - const CInputCoin& coin = *it; - // Only include outputs that are positive effective value (i.e. not dust) - if (coin.effective_value <= 0) { - it = group.Discard(coin); - } else { - ++it; - } - } - return group; -} diff --git a/src/wallet/coinselection.h b/src/wallet/coinselection.h index cf9768b927..f0e1addaf1 100644 --- a/src/wallet/coinselection.h +++ b/src/wallet/coinselection.h @@ -6,11 +6,10 @@ #define BITCOIN_WALLET_COINSELECTION_H #include <amount.h> +#include <policy/feerate.h> #include <primitives/transaction.h> #include <random.h> -class CFeeRate; - //! target minimum change amount static constexpr CAmount MIN_CHANGE{COIN / 100}; //! final minimum change amount after paying for fees @@ -63,9 +62,11 @@ struct CoinEligibilityFilter const int conf_theirs; const uint64_t max_ancestors; const uint64_t max_descendants; + const bool m_include_partial_groups{false}; //! Include partial destination groups when avoid_reuse and there are full groups CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_ancestors) {} CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors, uint64_t max_descendants) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_descendants) {} + CoinEligibilityFilter(int conf_mine, int conf_theirs, uint64_t max_ancestors, uint64_t max_descendants, bool include_partial) : conf_mine(conf_mine), conf_theirs(conf_theirs), max_ancestors(max_ancestors), max_descendants(max_descendants), m_include_partial_groups(include_partial) {} }; struct OutputGroup @@ -78,27 +79,18 @@ struct OutputGroup size_t m_descendants{0}; CAmount effective_value{0}; CAmount fee{0}; + CFeeRate m_effective_feerate{0}; CAmount long_term_fee{0}; + CFeeRate m_long_term_feerate{0}; OutputGroup() {} - OutputGroup(std::vector<CInputCoin>&& outputs, bool from_me, CAmount value, int depth, size_t ancestors, size_t descendants) - : m_outputs(std::move(outputs)) - , m_from_me(from_me) - , m_value(value) - , m_depth(depth) - , m_ancestors(ancestors) - , m_descendants(descendants) + OutputGroup(const CFeeRate& effective_feerate, const CFeeRate& long_term_feerate) : + m_effective_feerate(effective_feerate), + m_long_term_feerate(long_term_feerate) {} - OutputGroup(const CInputCoin& output, int depth, bool from_me, size_t ancestors, size_t descendants) : OutputGroup() { - Insert(output, depth, from_me, ancestors, descendants); - } - void Insert(const CInputCoin& output, int depth, bool from_me, size_t ancestors, size_t descendants); - std::vector<CInputCoin>::iterator Discard(const CInputCoin& output); - bool EligibleForSpending(const CoinEligibilityFilter& eligibility_filter) const; - //! Update the OutputGroup's fee, long_term_fee, and effective_value based on the given feerates - void SetFees(const CFeeRate effective_feerate, const CFeeRate long_term_feerate); - OutputGroup GetPositiveOnlyGroup(); + void Insert(const CInputCoin& output, int depth, bool from_me, size_t ancestors, size_t descendants, bool positive_only); + bool EligibleForSpending(const CoinEligibilityFilter& eligibility_filter) const; }; bool SelectCoinsBnB(std::vector<OutputGroup>& utxo_pool, const CAmount& target_value, const CAmount& cost_of_change, std::set<CInputCoin>& out_set, CAmount& value_ret, CAmount not_input_fees); diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 8505ddc309..99803a91d2 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -934,9 +934,9 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d case TxoutType::NONSTANDARD: case TxoutType::WITNESS_UNKNOWN: case TxoutType::WITNESS_V1_TAPROOT: - default: return "unrecognized script"; - } + } // no default case, so the compiler can warn about missing cases + CHECK_NONFATAL(false); } static UniValue ProcessImportLegacy(ImportData& import_data, std::map<CKeyID, CPubKey>& pubkey_map, std::map<CKeyID, CKey>& privkey_map, std::set<CScript>& script_pub_keys, bool& have_solving_data, const UniValue& data, std::vector<CKeyID>& ordered_pubkeys) diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 46de273d63..53232db6bc 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -400,6 +400,12 @@ UniValue SendMoney(CWallet* const pwallet, const CCoinControl &coin_control, std { EnsureWalletIsUnlocked(pwallet); + // This function is only used by sendtoaddress and sendmany. + // This should always try to sign, if we don't have private keys, don't try to do anything here. + if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { + throw JSONRPCError(RPC_WALLET_ERROR, "Error: Private keys are disabled for this wallet"); + } + // Shuffle recipient list std::shuffle(recipients.begin(), recipients.end(), FastRandomContext()); @@ -409,7 +415,7 @@ UniValue SendMoney(CWallet* const pwallet, const CCoinControl &coin_control, std bilingual_str error; CTransactionRef tx; FeeCalculation fee_calc_out; - bool fCreated = pwallet->CreateTransaction(recipients, tx, nFeeRequired, nChangePosRet, error, coin_control, fee_calc_out, !pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)); + const bool fCreated = pwallet->CreateTransaction(recipients, tx, nFeeRequired, nChangePosRet, error, coin_control, fee_calc_out, true); if (!fCreated) { throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, error.original); } @@ -3787,6 +3793,7 @@ RPCHelpMan getaddressinfo() {RPCResult::Type::BOOL, "iswatchonly", "If the address is watchonly."}, {RPCResult::Type::BOOL, "solvable", "If we know how to spend coins sent to this address, ignoring the possible lack of private keys."}, {RPCResult::Type::STR, "desc", /* optional */ true, "A descriptor for spending coins sent to this address (only when solvable)."}, + {RPCResult::Type::STR, "parent_desc", /* optional */ true, "The descriptor used to derive this address if this is a descriptor wallet"}, {RPCResult::Type::BOOL, "isscript", "If the key is a script."}, {RPCResult::Type::BOOL, "ischange", "If the address was used for change output."}, {RPCResult::Type::BOOL, "iswitness", "If the address is a witness address."}, @@ -3862,6 +3869,14 @@ RPCHelpMan getaddressinfo() ret.pushKV("desc", InferDescriptor(scriptPubKey, *provider)->ToString()); } + DescriptorScriptPubKeyMan* desc_spk_man = dynamic_cast<DescriptorScriptPubKeyMan*>(pwallet->GetScriptPubKeyMan(scriptPubKey)); + if (desc_spk_man) { + std::string desc_str; + if (desc_spk_man->GetDescriptorString(desc_str, false)) { + ret.pushKV("parent_desc", desc_str); + } + } + ret.pushKV("iswatchonly", bool(mine & ISMINE_WATCH_ONLY)); UniValue detail = DescribeWalletAddress(pwallet, dest); diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 15972fe7bb..4630603f8e 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -94,8 +94,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s TxoutType whichType = Solver(scriptPubKey, vSolutions); CKeyID keyID; - switch (whichType) - { + switch (whichType) { case TxoutType::NONSTANDARD: case TxoutType::NULL_DATA: case TxoutType::WITNESS_UNKNOWN: @@ -194,7 +193,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s } break; } - } + } // no default case, so the compiler can warn about missing cases if (ret == IsMineResult::NO && keystore.HaveWatchOnly(scriptPubKey)) { ret = std::max(ret, IsMineResult::WATCH_ONLY); @@ -2265,3 +2264,16 @@ const std::vector<CScript> DescriptorScriptPubKeyMan::GetScriptPubKeys() const } return script_pub_keys; } + +bool DescriptorScriptPubKeyMan::GetDescriptorString(std::string& out, bool priv) const +{ + LOCK(cs_desc_man); + if (m_storage.IsLocked()) { + return false; + } + + FlatSigningProvider provider; + provider.keys = GetKeys(); + + return m_wallet_descriptor.descriptor->ToNormalizedString(provider, out, priv); +} diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index 8f6b69bc78..51283e791d 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -616,6 +616,8 @@ public: const WalletDescriptor GetWalletDescriptor() const EXCLUSIVE_LOCKS_REQUIRED(cs_desc_man); const std::vector<CScript> GetScriptPubKeys() const; + + bool GetDescriptorString(std::string& out, bool priv) const; }; #endif // BITCOIN_WALLET_SCRIPTPUBKEYMAN_H diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp index 202804c9ff..ffac78d752 100644 --- a/src/wallet/test/coinselector_tests.cpp +++ b/src/wallet/test/coinselector_tests.cpp @@ -35,7 +35,7 @@ static CAmount balance = 0; CoinEligibilityFilter filter_standard(1, 6, 0); CoinEligibilityFilter filter_confirmed(1, 1, 0); CoinEligibilityFilter filter_standard_extra(6, 6, 0); -CoinSelectionParams coin_selection_params(false, 0, 0, CFeeRate(0), 0); +CoinSelectionParams coin_selection_params(false, 0, 0, CFeeRate(0), 0, false); static void add_coin(const CAmount& nValue, int nInput, std::vector<CInputCoin>& set) { @@ -115,7 +115,10 @@ inline std::vector<OutputGroup>& GroupCoins(const std::vector<CInputCoin>& coins { static std::vector<OutputGroup> static_groups; static_groups.clear(); - for (auto& coin : coins) static_groups.emplace_back(coin, 0, true, 0, 0); + for (auto& coin : coins) { + static_groups.emplace_back(); + static_groups.back().Insert(coin, 0, true, 0, 0, false); + } return static_groups; } @@ -123,7 +126,10 @@ inline std::vector<OutputGroup>& GroupCoins(const std::vector<COutput>& coins) { static std::vector<OutputGroup> static_groups; static_groups.clear(); - for (auto& coin : coins) static_groups.emplace_back(coin.GetInputCoin(), coin.nDepth, coin.tx->m_amounts[CWalletTx::DEBIT].m_cached[ISMINE_SPENDABLE] && coin.tx->m_amounts[CWalletTx::DEBIT].m_value[ISMINE_SPENDABLE] == 1 /* HACK: we can't figure out the is_me flag so we use the conditions defined above; perhaps set safe to false for !fIsFromMe in add_coin() */, 0, 0); + for (auto& coin : coins) { + static_groups.emplace_back(); + static_groups.back().Insert(coin.GetInputCoin(), coin.nDepth, coin.tx->m_amounts[CWalletTx::DEBIT].m_cached[ISMINE_SPENDABLE] && coin.tx->m_amounts[CWalletTx::DEBIT].m_value[ISMINE_SPENDABLE] == 1 /* HACK: we can't figure out the is_me flag so we use the conditions defined above; perhaps set safe to false for !fIsFromMe in add_coin() */, 0, 0, false); + } return static_groups; } @@ -263,22 +269,22 @@ BOOST_AUTO_TEST_CASE(bnb_search_test) } // Make sure that effective value is working in SelectCoinsMinConf when BnB is used - CoinSelectionParams coin_selection_params_bnb(true, 0, 0, CFeeRate(3000), 0); + CoinSelectionParams coin_selection_params_bnb(true, 0, 0, CFeeRate(3000), 0, false); CoinSet setCoinsRet; CAmount nValueRet; bool bnb_used; empty_wallet(); add_coin(1); vCoins.at(0).nInputBytes = 40; // Make sure that it has a negative effective value. The next check should assert if this somehow got through. Otherwise it will fail - BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params_bnb, bnb_used)); + BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params_bnb, bnb_used)); // Test fees subtracted from output: empty_wallet(); add_coin(1 * CENT); vCoins.at(0).nInputBytes = 40; - BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params_bnb, bnb_used)); + BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params_bnb, bnb_used)); coin_selection_params_bnb.m_subtract_fee_outputs = true; - BOOST_CHECK(testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params_bnb, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params_bnb, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 1 * CENT); // Make sure that can use BnB when there are preset inputs @@ -317,24 +323,24 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) empty_wallet(); // with an empty wallet we can't even pay one cent - BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); add_coin(1*CENT, 4); // add a new 1 cent coin // with a new 1 cent coin, we still can't find a mature 1 cent - BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(!testWallet.SelectCoinsMinConf( 1 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); // but we can find a new 1 cent - BOOST_CHECK( testWallet.SelectCoinsMinConf( 1 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf( 1 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 1 * CENT); add_coin(2*CENT); // add a mature 2 cent coin // we can't make 3 cents of mature coins - BOOST_CHECK(!testWallet.SelectCoinsMinConf( 3 * CENT, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(!testWallet.SelectCoinsMinConf( 3 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); // we can make 3 cents of new coins - BOOST_CHECK( testWallet.SelectCoinsMinConf( 3 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf( 3 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 3 * CENT); add_coin(5*CENT); // add a mature 5 cent coin, @@ -344,33 +350,33 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) // now we have new: 1+10=11 (of which 10 was self-sent), and mature: 2+5+20=27. total = 38 // we can't make 38 cents only if we disallow new coins: - BOOST_CHECK(!testWallet.SelectCoinsMinConf(38 * CENT, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(!testWallet.SelectCoinsMinConf(38 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); // we can't even make 37 cents if we don't allow new coins even if they're from us - BOOST_CHECK(!testWallet.SelectCoinsMinConf(38 * CENT, filter_standard_extra, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(!testWallet.SelectCoinsMinConf(38 * CENT, filter_standard_extra, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); // but we can make 37 cents if we accept new coins from ourself - BOOST_CHECK( testWallet.SelectCoinsMinConf(37 * CENT, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(37 * CENT, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 37 * CENT); // and we can make 38 cents if we accept all new coins - BOOST_CHECK( testWallet.SelectCoinsMinConf(38 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(38 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 38 * CENT); // try making 34 cents from 1,2,5,10,20 - we can't do it exactly - BOOST_CHECK( testWallet.SelectCoinsMinConf(34 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(34 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 35 * CENT); // but 35 cents is closest BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U); // the best should be 20+10+5. it's incredibly unlikely the 1 or 2 got included (but possible) // when we try making 7 cents, the smaller coins (1,2,5) are enough. We should see just 2+5 - BOOST_CHECK( testWallet.SelectCoinsMinConf( 7 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf( 7 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 7 * CENT); BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U); // when we try making 8 cents, the smaller coins (1,2,5) are exactly enough. - BOOST_CHECK( testWallet.SelectCoinsMinConf( 8 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf( 8 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK(nValueRet == 8 * CENT); BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U); // when we try making 9 cents, no subset of smaller coins is enough, and we get the next bigger coin (10) - BOOST_CHECK( testWallet.SelectCoinsMinConf( 9 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf( 9 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 10 * CENT); BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U); @@ -384,30 +390,30 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) add_coin(30*CENT); // now we have 6+7+8+20+30 = 71 cents total // check that we have 71 and not 72 - BOOST_CHECK( testWallet.SelectCoinsMinConf(71 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); - BOOST_CHECK(!testWallet.SelectCoinsMinConf(72 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(71 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(!testWallet.SelectCoinsMinConf(72 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); // now try making 16 cents. the best smaller coins can do is 6+7+8 = 21; not as good at the next biggest coin, 20 - BOOST_CHECK( testWallet.SelectCoinsMinConf(16 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(16 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 20 * CENT); // we should get 20 in one coin BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U); add_coin( 5*CENT); // now we have 5+6+7+8+20+30 = 75 cents total // now if we try making 16 cents again, the smaller coins can make 5+6+7 = 18 cents, better than the next biggest coin, 20 - BOOST_CHECK( testWallet.SelectCoinsMinConf(16 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(16 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 18 * CENT); // we should get 18 in 3 coins BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U); add_coin( 18*CENT); // now we have 5+6+7+8+18+20+30 // and now if we try making 16 cents again, the smaller coins can make 5+6+7 = 18 cents, the same as the next biggest coin, 18 - BOOST_CHECK( testWallet.SelectCoinsMinConf(16 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(16 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 18 * CENT); // we should get 18 in 1 coin BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U); // because in the event of a tie, the biggest coin wins // now try making 11 cents. we should get 5+6 - BOOST_CHECK( testWallet.SelectCoinsMinConf(11 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(11 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 11 * CENT); BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U); @@ -416,11 +422,11 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) add_coin( 2*COIN); add_coin( 3*COIN); add_coin( 4*COIN); // now we have 5+6+7+8+18+20+30+100+200+300+400 = 1094 cents - BOOST_CHECK( testWallet.SelectCoinsMinConf(95 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(95 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 1 * COIN); // we should get 1 BTC in 1 coin BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U); - BOOST_CHECK( testWallet.SelectCoinsMinConf(195 * CENT, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(195 * CENT, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 2 * COIN); // we should get 2 BTC in 1 coin BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U); @@ -435,14 +441,14 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) // try making 1 * MIN_CHANGE from the 1.5 * MIN_CHANGE // we'll get change smaller than MIN_CHANGE whatever happens, so can expect MIN_CHANGE exactly - BOOST_CHECK( testWallet.SelectCoinsMinConf(MIN_CHANGE, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE); // but if we add a bigger coin, small change is avoided add_coin(1111*MIN_CHANGE); // try making 1 from 0.1 + 0.2 + 0.3 + 0.4 + 0.5 + 1111 = 1112.5 - BOOST_CHECK( testWallet.SelectCoinsMinConf(1 * MIN_CHANGE, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(1 * MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 1 * MIN_CHANGE); // we should get the exact amount // if we add more small coins: @@ -450,7 +456,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) add_coin(MIN_CHANGE * 7 / 10); // and try again to make 1.0 * MIN_CHANGE - BOOST_CHECK( testWallet.SelectCoinsMinConf(1 * MIN_CHANGE, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(1 * MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 1 * MIN_CHANGE); // we should get the exact amount // run the 'mtgox' test (see https://blockexplorer.com/tx/29a3efd3ef04f9153d47a990bd7b048a4b2d213daaa5fb8ed670fb85f13bdbcf) @@ -459,7 +465,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) for (int j = 0; j < 20; j++) add_coin(50000 * COIN); - BOOST_CHECK( testWallet.SelectCoinsMinConf(500000 * COIN, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(500000 * COIN, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 500000 * COIN); // we should get the exact amount BOOST_CHECK_EQUAL(setCoinsRet.size(), 10U); // in ten coins @@ -472,7 +478,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) add_coin(MIN_CHANGE * 6 / 10); add_coin(MIN_CHANGE * 7 / 10); add_coin(1111 * MIN_CHANGE); - BOOST_CHECK( testWallet.SelectCoinsMinConf(1 * MIN_CHANGE, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(1 * MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 1111 * MIN_CHANGE); // we get the bigger coin BOOST_CHECK_EQUAL(setCoinsRet.size(), 1U); @@ -482,7 +488,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) add_coin(MIN_CHANGE * 6 / 10); add_coin(MIN_CHANGE * 8 / 10); add_coin(1111 * MIN_CHANGE); - BOOST_CHECK( testWallet.SelectCoinsMinConf(MIN_CHANGE, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK( testWallet.SelectCoinsMinConf(MIN_CHANGE, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE); // we should get the exact amount BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U); // in two coins 0.4+0.6 @@ -493,12 +499,12 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) add_coin(MIN_CHANGE * 100); // trying to make 100.01 from these three coins - BOOST_CHECK(testWallet.SelectCoinsMinConf(MIN_CHANGE * 10001 / 100, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(MIN_CHANGE * 10001 / 100, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, MIN_CHANGE * 10105 / 100); // we should get all coins BOOST_CHECK_EQUAL(setCoinsRet.size(), 3U); // but if we try to make 99.9, we should take the bigger of the two small coins to avoid small change - BOOST_CHECK(testWallet.SelectCoinsMinConf(MIN_CHANGE * 9990 / 100, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(MIN_CHANGE * 9990 / 100, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 101 * MIN_CHANGE); BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U); } @@ -512,7 +518,7 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) // We only create the wallet once to save time, but we still run the coin selection RUN_TESTS times. for (int i = 0; i < RUN_TESTS; i++) { - BOOST_CHECK(testWallet.SelectCoinsMinConf(2000, filter_confirmed, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(2000, filter_confirmed, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); if (amt - 2000 < MIN_CHANGE) { // needs more than one input: @@ -538,8 +544,8 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) for (int i = 0; i < RUN_TESTS; i++) { // picking 50 from 100 coins doesn't depend on the shuffle, // but does depend on randomness in the stochastic approximation code - BOOST_CHECK(testWallet.SelectCoinsMinConf(50 * COIN, filter_standard, GroupCoins(vCoins), setCoinsRet , nValueRet, coin_selection_params, bnb_used)); - BOOST_CHECK(testWallet.SelectCoinsMinConf(50 * COIN, filter_standard, GroupCoins(vCoins), setCoinsRet2, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(50 * COIN, filter_standard, vCoins, setCoinsRet , nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(50 * COIN, filter_standard, vCoins, setCoinsRet2, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK(!equal_sets(setCoinsRet, setCoinsRet2)); int fails = 0; @@ -547,8 +553,8 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) { // selecting 1 from 100 identical coins depends on the shuffle; this test will fail 1% of the time // run the test RANDOM_REPEATS times and only complain if all of them fail - BOOST_CHECK(testWallet.SelectCoinsMinConf(COIN, filter_standard, GroupCoins(vCoins), setCoinsRet , nValueRet, coin_selection_params, bnb_used)); - BOOST_CHECK(testWallet.SelectCoinsMinConf(COIN, filter_standard, GroupCoins(vCoins), setCoinsRet2, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(COIN, filter_standard, vCoins, setCoinsRet , nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(COIN, filter_standard, vCoins, setCoinsRet2, nValueRet, coin_selection_params, bnb_used)); if (equal_sets(setCoinsRet, setCoinsRet2)) fails++; } @@ -570,8 +576,8 @@ BOOST_AUTO_TEST_CASE(knapsack_solver_test) { // selecting 1 from 100 identical coins depends on the shuffle; this test will fail 1% of the time // run the test RANDOM_REPEATS times and only complain if all of them fail - BOOST_CHECK(testWallet.SelectCoinsMinConf(90*CENT, filter_standard, GroupCoins(vCoins), setCoinsRet , nValueRet, coin_selection_params, bnb_used)); - BOOST_CHECK(testWallet.SelectCoinsMinConf(90*CENT, filter_standard, GroupCoins(vCoins), setCoinsRet2, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(90*CENT, filter_standard, vCoins, setCoinsRet , nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(90*CENT, filter_standard, vCoins, setCoinsRet2, nValueRet, coin_selection_params, bnb_used)); if (equal_sets(setCoinsRet, setCoinsRet2)) fails++; } @@ -598,7 +604,7 @@ BOOST_AUTO_TEST_CASE(ApproximateBestSubset) add_coin(1000 * COIN); add_coin(3 * COIN); - BOOST_CHECK(testWallet.SelectCoinsMinConf(1003 * COIN, filter_standard, GroupCoins(vCoins), setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(1003 * COIN, filter_standard, vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); BOOST_CHECK_EQUAL(nValueRet, 1003 * COIN); BOOST_CHECK_EQUAL(setCoinsRet.size(), 2U); @@ -633,13 +639,13 @@ BOOST_AUTO_TEST_CASE(SelectCoins_test) CAmount target = rand.randrange(balance - 1000) + 1000; // Perform selection - CoinSelectionParams coin_selection_params_knapsack(false, 34, 148, CFeeRate(0), 0); - CoinSelectionParams coin_selection_params_bnb(true, 34, 148, CFeeRate(0), 0); + CoinSelectionParams coin_selection_params_knapsack(false, 34, 148, CFeeRate(0), 0, false); + CoinSelectionParams coin_selection_params_bnb(true, 34, 148, CFeeRate(0), 0, false); CoinSet out_set; CAmount out_value = 0; bool bnb_used = false; - BOOST_CHECK(testWallet.SelectCoinsMinConf(target, filter_standard, GroupCoins(vCoins), out_set, out_value, coin_selection_params_bnb, bnb_used) || - testWallet.SelectCoinsMinConf(target, filter_standard, GroupCoins(vCoins), out_set, out_value, coin_selection_params_knapsack, bnb_used)); + BOOST_CHECK(testWallet.SelectCoinsMinConf(target, filter_standard, vCoins, out_set, out_value, coin_selection_params_bnb, bnb_used) || + testWallet.SelectCoinsMinConf(target, filter_standard, vCoins, out_set, out_value, coin_selection_params_knapsack, bnb_used)); BOOST_CHECK_GE(out_value, target); } } diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp index 27179839b7..b2eb8e4bca 100644 --- a/src/wallet/test/db_tests.cpp +++ b/src/wallet/test/db_tests.cpp @@ -30,8 +30,8 @@ BOOST_AUTO_TEST_CASE(getwalletenv_file) std::string filename; std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, filename); - BOOST_CHECK(filename == test_name); - BOOST_CHECK(env->Directory() == datadir); + BOOST_CHECK_EQUAL(filename, test_name); + BOOST_CHECK_EQUAL(env->Directory(), datadir); } BOOST_AUTO_TEST_CASE(getwalletenv_directory) @@ -41,8 +41,8 @@ BOOST_AUTO_TEST_CASE(getwalletenv_directory) std::string filename; std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(datadir, filename); - BOOST_CHECK(filename == expected_name); - BOOST_CHECK(env->Directory() == datadir); + BOOST_CHECK_EQUAL(filename, expected_name); + BOOST_CHECK_EQUAL(env->Directory(), datadir); } BOOST_AUTO_TEST_CASE(getwalletenv_g_dbenvs_multiple) diff --git a/src/wallet/test/init_tests.cpp b/src/wallet/test/init_tests.cpp index e70b56c529..45e1b8c4b8 100644 --- a/src/wallet/test/init_tests.cpp +++ b/src/wallet/test/init_tests.cpp @@ -19,7 +19,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_default) BOOST_CHECK(result == true); fs::path walletdir = gArgs.GetArg("-walletdir", ""); fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]); - BOOST_CHECK(walletdir == expected_path); + BOOST_CHECK_EQUAL(walletdir, expected_path); } BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_custom) @@ -29,7 +29,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_custom) BOOST_CHECK(result == true); fs::path walletdir = gArgs.GetArg("-walletdir", ""); fs::path expected_path = fs::canonical(m_walletdir_path_cases["custom"]); - BOOST_CHECK(walletdir == expected_path); + BOOST_CHECK_EQUAL(walletdir, expected_path); } BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_does_not_exist) @@ -69,7 +69,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing) BOOST_CHECK(result == true); fs::path walletdir = gArgs.GetArg("-walletdir", ""); fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]); - BOOST_CHECK(walletdir == expected_path); + BOOST_CHECK_EQUAL(walletdir, expected_path); } BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing2) @@ -79,7 +79,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing2) BOOST_CHECK(result == true); fs::path walletdir = gArgs.GetArg("-walletdir", ""); fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]); - BOOST_CHECK(walletdir == expected_path); + BOOST_CHECK_EQUAL(walletdir, expected_path); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 723552860a..db80745db0 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -2357,13 +2357,12 @@ const CTxOut& CWallet::FindNonChangeParentOutput(const CTransaction& tx, int out return ptx->vout[n]; } -bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<OutputGroup> groups, +bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<COutput> coins, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CoinSelectionParams& coin_selection_params, bool& bnb_used) const { setCoinsRet.clear(); nValueRet = 0; - std::vector<OutputGroup> utxo_pool; if (coin_selection_params.use_bnb) { // Get long term estimate FeeCalculation feeCalc; @@ -2371,35 +2370,27 @@ bool CWallet::SelectCoinsMinConf(const CAmount& nTargetValue, const CoinEligibil temp.m_confirm_target = 1008; CFeeRate long_term_feerate = GetMinimumFeeRate(*this, temp, &feeCalc); - // Calculate cost of change - CAmount cost_of_change = GetDiscardRate(*this).GetFee(coin_selection_params.change_spend_size) + coin_selection_params.effective_fee.GetFee(coin_selection_params.change_output_size); + // Get the feerate for effective value. + // When subtracting the fee from the outputs, we want the effective feerate to be 0 + CFeeRate effective_feerate{0}; + if (!coin_selection_params.m_subtract_fee_outputs) { + effective_feerate = coin_selection_params.effective_fee; + } - // Filter by the min conf specs and add to utxo_pool and calculate effective value - for (OutputGroup& group : groups) { - if (!group.EligibleForSpending(eligibility_filter)) continue; + std::vector<OutputGroup> groups = GroupOutputs(coins, !coin_selection_params.m_avoid_partial_spends, effective_feerate, long_term_feerate, eligibility_filter, true /* positive_only */); - if (coin_selection_params.m_subtract_fee_outputs) { - // Set the effective feerate to 0 as we don't want to use the effective value since the fees will be deducted from the output - group.SetFees(CFeeRate(0) /* effective_feerate */, long_term_feerate); - } else { - group.SetFees(coin_selection_params.effective_fee, long_term_feerate); - } + // Calculate cost of change + CAmount cost_of_change = GetDiscardRate(*this).GetFee(coin_selection_params.change_spend_size) + coin_selection_params.effective_fee.GetFee(coin_selection_params.change_output_size); - OutputGroup pos_group = group.GetPositiveOnlyGroup(); - if (pos_group.effective_value > 0) utxo_pool.push_back(pos_group); - } // Calculate the fees for things that aren't inputs CAmount not_input_fees = coin_selection_params.effective_fee.GetFee(coin_selection_params.tx_noinputs_size); bnb_used = true; - return SelectCoinsBnB(utxo_pool, nTargetValue, cost_of_change, setCoinsRet, nValueRet, not_input_fees); + return SelectCoinsBnB(groups, nTargetValue, cost_of_change, setCoinsRet, nValueRet, not_input_fees); } else { - // Filter by the min conf specs and add to utxo_pool - for (const OutputGroup& group : groups) { - if (!group.EligibleForSpending(eligibility_filter)) continue; - utxo_pool.push_back(group); - } + std::vector<OutputGroup> groups = GroupOutputs(coins, !coin_selection_params.m_avoid_partial_spends, CFeeRate(0), CFeeRate(0), eligibility_filter, false /* positive_only */); + bnb_used = false; - return KnapsackSolver(nTargetValue, utxo_pool, setCoinsRet, nValueRet); + return KnapsackSolver(nTargetValue, groups, setCoinsRet, nValueRet); } } @@ -2482,16 +2473,14 @@ bool CWallet::SelectCoins(const std::vector<COutput>& vAvailableCoins, const CAm // explicitly shuffling the outputs before processing Shuffle(vCoins.begin(), vCoins.end(), FastRandomContext()); } - std::vector<OutputGroup> groups = GroupOutputs(vCoins, !coin_control.m_avoid_partial_spends, max_ancestors); - bool res = value_to_select <= 0 || - SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(1, 6, 0), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used) || - SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(1, 1, 0), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used) || - (m_spend_zero_conf_change && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, 2), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || - (m_spend_zero_conf_change && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, std::min((size_t)4, max_ancestors/3), std::min((size_t)4, max_descendants/3)), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || - (m_spend_zero_conf_change && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, max_ancestors/2, max_descendants/2), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || - (m_spend_zero_conf_change && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, max_ancestors-1, max_descendants-1), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || - (m_spend_zero_conf_change && !fRejectLongChains && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, std::numeric_limits<uint64_t>::max()), groups, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); + SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(1, 6, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used) || + SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(1, 1, 0), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used) || + (m_spend_zero_conf_change && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, 2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || + (m_spend_zero_conf_change && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, std::min((size_t)4, max_ancestors/3), std::min((size_t)4, max_descendants/3)), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || + (m_spend_zero_conf_change && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, max_ancestors/2, max_descendants/2), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || + (m_spend_zero_conf_change && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, max_ancestors-1, max_descendants-1, true /* include_partial_groups */), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)) || + (m_spend_zero_conf_change && !fRejectLongChains && SelectCoinsMinConf(value_to_select, CoinEligibilityFilter(0, 1, std::numeric_limits<uint64_t>::max(), std::numeric_limits<uint64_t>::max(), true /* include_partial_groups */), vCoins, setCoinsRet, nValueRet, coin_selection_params, bnb_used)); // because SelectCoinsMinConf clears the setCoinsRet, we now add the possible inputs to the coinset util::insert(setCoinsRet, setPresetCoins); @@ -2782,6 +2771,7 @@ bool CWallet::CreateTransactionInternal( std::vector<COutput> vAvailableCoins; AvailableCoins(vAvailableCoins, true, &coin_control, 1, MAX_MONEY, MAX_MONEY, 0); CoinSelectionParams coin_selection_params; // Parameters for coin selection, init with dummy + coin_selection_params.m_avoid_partial_spends = coin_control.m_avoid_partial_spends; // Create change script that will be used if we need change // TODO: pass in scriptChange instead of reservedest so @@ -4192,51 +4182,90 @@ bool CWalletTx::IsImmatureCoinBase() const return GetBlocksToMaturity() > 0; } -std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outputs, bool single_coin, const size_t max_ancestors) const { - std::vector<OutputGroup> groups; - std::map<CTxDestination, OutputGroup> gmap; - std::set<CTxDestination> full_groups; +std::vector<OutputGroup> CWallet::GroupOutputs(const std::vector<COutput>& outputs, bool separate_coins, const CFeeRate& effective_feerate, const CFeeRate& long_term_feerate, const CoinEligibilityFilter& filter, bool positive_only) const +{ + std::vector<OutputGroup> groups_out; - for (const auto& output : outputs) { - if (output.fSpendable) { - CTxDestination dst; - CInputCoin input_coin = output.GetInputCoin(); + if (separate_coins) { + // Single coin means no grouping. Each COutput gets its own OutputGroup. + for (const COutput& output : outputs) { + // Skip outputs we cannot spend + if (!output.fSpendable) continue; size_t ancestors, descendants; chain().getTransactionAncestry(output.tx->GetHash(), ancestors, descendants); - if (!single_coin && ExtractDestination(output.tx->tx->vout[output.i].scriptPubKey, dst)) { - auto it = gmap.find(dst); - if (it != gmap.end()) { - // Limit output groups to no more than OUTPUT_GROUP_MAX_ENTRIES - // number of entries, to protect against inadvertently creating - // a too-large transaction when using -avoidpartialspends to - // prevent breaking consensus or surprising users with a very - // high amount of fees. - if (it->second.m_outputs.size() >= OUTPUT_GROUP_MAX_ENTRIES) { - groups.push_back(it->second); - it->second = OutputGroup{}; - full_groups.insert(dst); - } - it->second.Insert(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants); - } else { - gmap[dst].Insert(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants); - } - } else { - groups.emplace_back(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants); - } + CInputCoin input_coin = output.GetInputCoin(); + + // Make an OutputGroup containing just this output + OutputGroup group{effective_feerate, long_term_feerate}; + group.Insert(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants, positive_only); + + // Check the OutputGroup's eligibility. Only add the eligible ones. + if (positive_only && group.effective_value <= 0) continue; + if (group.m_outputs.size() > 0 && group.EligibleForSpending(filter)) groups_out.push_back(group); + } + return groups_out; + } + + // We want to combine COutputs that have the same scriptPubKey into single OutputGroups + // except when there are more than OUTPUT_GROUP_MAX_ENTRIES COutputs grouped in an OutputGroup. + // To do this, we maintain a map where the key is the scriptPubKey and the value is a vector of OutputGroups. + // For each COutput, we check if the scriptPubKey is in the map, and if it is, the COutput's CInputCoin is added + // to the last OutputGroup in the vector for the scriptPubKey. When the last OutputGroup has + // OUTPUT_GROUP_MAX_ENTRIES CInputCoins, a new OutputGroup is added to the end of the vector. + std::map<CScript, std::vector<OutputGroup>> spk_to_groups_map; + for (const auto& output : outputs) { + // Skip outputs we cannot spend + if (!output.fSpendable) continue; + + size_t ancestors, descendants; + chain().getTransactionAncestry(output.tx->GetHash(), ancestors, descendants); + CInputCoin input_coin = output.GetInputCoin(); + CScript spk = input_coin.txout.scriptPubKey; + + std::vector<OutputGroup>& groups = spk_to_groups_map[spk]; + + if (groups.size() == 0) { + // No OutputGroups for this scriptPubKey yet, add one + groups.emplace_back(effective_feerate, long_term_feerate); } + + // Get the last OutputGroup in the vector so that we can add the CInputCoin to it + // A pointer is used here so that group can be reassigned later if it is full. + OutputGroup* group = &groups.back(); + + // Check if this OutputGroup is full. We limit to OUTPUT_GROUP_MAX_ENTRIES when using -avoidpartialspends + // to avoid surprising users with very high fees. + if (group->m_outputs.size() >= OUTPUT_GROUP_MAX_ENTRIES) { + // The last output group is full, add a new group to the vector and use that group for the insertion + groups.emplace_back(effective_feerate, long_term_feerate); + group = &groups.back(); + } + + // Add the input_coin to group + group->Insert(input_coin, output.nDepth, output.tx->IsFromMe(ISMINE_ALL), ancestors, descendants, positive_only); } - if (!single_coin) { - for (auto& it : gmap) { - auto& group = it.second; - if (full_groups.count(it.first) > 0) { - // Make this unattractive as we want coin selection to avoid it if possible - group.m_ancestors = max_ancestors - 1; + + // Now we go through the entire map and pull out the OutputGroups + for (const auto& spk_and_groups_pair: spk_to_groups_map) { + const std::vector<OutputGroup>& groups_per_spk= spk_and_groups_pair.second; + + // Go through the vector backwards. This allows for the first item we deal with being the partial group. + for (auto group_it = groups_per_spk.rbegin(); group_it != groups_per_spk.rend(); group_it++) { + const OutputGroup& group = *group_it; + + // Don't include partial groups if there are full groups too and we don't want partial groups + if (group_it == groups_per_spk.rbegin() && groups_per_spk.size() > 1 && !filter.m_include_partial_groups) { + continue; } - groups.push_back(group); + + // Check the OutputGroup's eligibility. Only add the eligible ones. + if (positive_only && group.effective_value <= 0) continue; + if (group.m_outputs.size() > 0 && group.EligibleForSpending(filter)) groups_out.push_back(group); } } - return groups; + + return groups_out; } bool CWallet::IsCrypted() const diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index e3eae1dd95..4fc4466604 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -610,8 +610,16 @@ struct CoinSelectionParams size_t tx_noinputs_size = 0; //! Indicate that we are subtracting the fee from outputs bool m_subtract_fee_outputs = false; - - CoinSelectionParams(bool use_bnb, size_t change_output_size, size_t change_spend_size, CFeeRate effective_fee, size_t tx_noinputs_size) : use_bnb(use_bnb), change_output_size(change_output_size), change_spend_size(change_spend_size), effective_fee(effective_fee), tx_noinputs_size(tx_noinputs_size) {} + bool m_avoid_partial_spends = false; + + CoinSelectionParams(bool use_bnb, size_t change_output_size, size_t change_spend_size, CFeeRate effective_fee, size_t tx_noinputs_size, bool avoid_partial) : + use_bnb(use_bnb), + change_output_size(change_output_size), + change_spend_size(change_spend_size), + effective_fee(effective_fee), + tx_noinputs_size(tx_noinputs_size), + m_avoid_partial_spends(avoid_partial) + {} CoinSelectionParams() {} }; @@ -818,7 +826,7 @@ public: * completion the coin set and corresponding actual target value is * assembled */ - bool SelectCoinsMinConf(const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<OutputGroup> groups, + bool SelectCoinsMinConf(const CAmount& nTargetValue, const CoinEligibilityFilter& eligibility_filter, std::vector<COutput> coins, std::set<CInputCoin>& setCoinsRet, CAmount& nValueRet, const CoinSelectionParams& coin_selection_params, bool& bnb_used) const; bool IsSpent(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); @@ -827,7 +835,7 @@ public: bool IsSpentKey(const uint256& hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); void SetSpentKeyState(WalletBatch& batch, const uint256& hash, unsigned int n, bool used, std::set<CTxDestination>& tx_destinations) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); - std::vector<OutputGroup> GroupOutputs(const std::vector<COutput>& outputs, bool single_coin, const size_t max_ancestors) const; + std::vector<OutputGroup> GroupOutputs(const std::vector<COutput>& outputs, bool separate_coins, const CFeeRate& effective_feerate, const CFeeRate& long_term_feerate, const CoinEligibilityFilter& filter, bool positive_only) const; bool IsLockedCoin(uint256 hash, unsigned int n) const EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); void LockCoin(const COutPoint& output) EXCLUSIVE_LOCKS_REQUIRED(cs_wallet); diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 4e6270220e..69854cae05 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -551,13 +551,6 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, CHDChain chain; ssValue >> chain; pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadHDChain(chain); - } else if (strType == DBKeys::FLAGS) { - uint64_t flags; - ssValue >> flags; - if (!pwallet->LoadWalletFlags(flags)) { - strErr = "Error reading wallet database: Unknown non-tolerable wallet flags found"; - return false; - } } else if (strType == DBKeys::OLD_KEY) { strErr = "Found unsupported 'wkey' record, try loading with version 0.18"; return false; @@ -662,7 +655,8 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, wss.fIsEncrypted = true; } else if (strType != DBKeys::BESTBLOCK && strType != DBKeys::BESTBLOCK_NOMERKLE && strType != DBKeys::MINVERSION && strType != DBKeys::ACENTRY && - strType != DBKeys::VERSION && strType != DBKeys::SETTINGS) { + strType != DBKeys::VERSION && strType != DBKeys::SETTINGS && + strType != DBKeys::FLAGS) { wss.m_unknown_records++; } } catch (const std::exception& e) { @@ -707,6 +701,16 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) pwallet->LoadMinVersion(nMinVersion); } + // Load wallet flags, so they are known when processing other records. + // The FLAGS key is absent during wallet creation. + uint64_t flags; + if (m_batch->Read(DBKeys::FLAGS, flags)) { + if (!pwallet->LoadWalletFlags(flags)) { + pwallet->WalletLogPrintf("Error reading wallet database: Unknown non-tolerable wallet flags found\n"); + return DBErrors::CORRUPT; + } + } + // Get cursor if (!m_batch->StartCursor()) { diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp index bc90491a2c..b2cb0bf479 100644 --- a/src/wallet/wallettool.cpp +++ b/src/wallet/wallettool.cpp @@ -103,10 +103,8 @@ static void WalletShowInfo(CWallet* wallet_instance) tfm::format(std::cout, "Address Book: %zu\n", wallet_instance->m_address_book.size()); } -bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command, const std::string& name) +bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command) { - const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), name); - if (args.IsArgSet("-format") && command != "createfromdump") { tfm::format(std::cerr, "The -format option can only be used with the \"createfromdump\" command.\n"); return false; @@ -119,6 +117,12 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command, tfm::format(std::cerr, "The -descriptors option can only be used with the 'create' command.\n"); return false; } + if (command == "create" && !args.IsArgSet("-wallet")) { + tfm::format(std::cerr, "Wallet name must be provided when creating a new wallet.\n"); + return false; + } + const std::string name = args.GetArg("-wallet", ""); + const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), name); if (command == "create") { DatabaseOptions options; diff --git a/src/wallet/wallettool.h b/src/wallet/wallettool.h index f544a6f727..f4516bb5bc 100644 --- a/src/wallet/wallettool.h +++ b/src/wallet/wallettool.h @@ -10,7 +10,7 @@ namespace WalletTool { void WalletShowInfo(CWallet* wallet_instance); -bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command, const std::string& file); +bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command); } // namespace WalletTool diff --git a/test/functional/README.md b/test/functional/README.md index 2d04413eb2..d830ba0334 100644 --- a/test/functional/README.md +++ b/test/functional/README.md @@ -63,10 +63,13 @@ don't have test cases for. - Avoid stop-starting the nodes multiple times during the test if possible. A stop-start takes several seconds, so doing it several times blows up the runtime of the test. -- Set the `self.setup_clean_chain` variable in `set_test_params()` to control whether - or not to use the cached data directories. The cached data directories - contain a 200-block pre-mined blockchain and wallets for four nodes. Each node - has 25 mature blocks (25x50=1250 BTC) in its wallet. +- Set the `self.setup_clean_chain` variable in `set_test_params()` to `True` to + initialize an empty blockchain and start from the Genesis block, rather than + load a premined blockchain from cache with the default value of `False`. The + cached data directories contain a 200-block pre-mined blockchain with the + spendable mining rewards being split between four nodes. Each node has 25 + mature block subsidies (25x50=1250 BTC) in its wallet. Using them is much more + efficient than mining blocks in your test. - When calling RPCs with lots of arguments, consider using named keyword arguments instead of positional arguments to make the intent of the call clear to readers. diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index 6e72db1d96..fab921ef19 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -57,7 +57,7 @@ class BadTxTemplate: __metaclass__ = abc.ABCMeta # The expected error code given by bitcoind upon submission of the tx. - reject_reason = "" # type: Optional[str] + reject_reason: Optional[str] = "" # Only specified if it differs from mempool acceptance error. block_reject_reason = "" diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 97f24e1b6e..a0eb213a78 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -76,6 +76,9 @@ class ExampleTest(BitcoinTestFramework): """Override test parameters for your individual test. This method must be overridden and num_nodes must be explicitly set.""" + # By default every test loads a pre-mined chain of 200 blocks from cache. + # Set setup_clean_chain to True to skip this and start from the Genesis + # block. self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py index 2c6553fbe2..5fcecb4882 100755 --- a/test/functional/feature_asmap.py +++ b/test/functional/feature_asmap.py @@ -36,7 +36,6 @@ def expected_messages(filename): class AsmapTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def test_without_asmap_arg(self): diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index 603d7f5d3b..1a148f04f4 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -29,9 +29,11 @@ Start three nodes: block 200. node2 will reject block 102 since it's assumed valid, but it isn't buried by at least two weeks' work. """ -import time -from test_framework.blocktools import (create_block, create_coinbase) +from test_framework.blocktools import ( + create_block, + create_coinbase, +) from test_framework.key import ECKey from test_framework.messages import ( CBlockHeader, @@ -79,24 +81,6 @@ class AssumeValidTest(BitcoinTestFramework): assert not p2p_conn.is_connected break - def assert_blockchain_height(self, node, height): - """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" - last_height = node.getblock(node.getbestblockhash())['height'] - timeout = 10 - while True: - time.sleep(0.25) - current_height = node.getblock(node.getbestblockhash())['height'] - if current_height != last_height: - last_height = current_height - if timeout < 0: - assert False, "blockchain too short after timeout: %d" % current_height - timeout - 0.25 - continue - elif current_height > height: - assert False, "blockchain too long: %d" % current_height - elif current_height == height: - break - def run_test(self): p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) @@ -177,7 +161,8 @@ class AssumeValidTest(BitcoinTestFramework): # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) - self.assert_blockchain_height(self.nodes[0], 101) + self.wait_until(lambda: self.nodes[0].getblockcount() >= 101) + assert_equal(self.nodes[0].getblockcount(), 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): @@ -188,7 +173,8 @@ class AssumeValidTest(BitcoinTestFramework): # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) - self.assert_blockchain_height(self.nodes[2], 101) + self.wait_until(lambda: self.nodes[2].getblockcount() >= 101) + assert_equal(self.nodes[2].getblockcount(), 101) if __name__ == '__main__': diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py index b161c71a85..e6a53b52db 100755 --- a/test/functional/feature_backwards_compatibility.py +++ b/test/functional/feature_backwards_compatibility.py @@ -354,73 +354,75 @@ class BackwardsCompatibilityTest(BitcoinTestFramework): hdkeypath = v17_info["hdkeypath"] pubkey = v17_info["pubkey"] - # Copy the 0.16 wallet to the last Bitcoin Core version and open it: - shutil.copyfile( - os.path.join(node_v16_wallets_dir, "wallets/u1_v16"), - os.path.join(node_master_wallets_dir, "u1_v16") - ) - load_res = node_master.loadwallet("u1_v16") - # Make sure this wallet opens without warnings. See https://github.com/bitcoin/bitcoin/pull/19054 - assert_equal(load_res['warning'], '') - wallet = node_master.get_wallet_rpc("u1_v16") - info = wallet.getaddressinfo(v16_addr) - descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")" - assert_equal(info["desc"], descsum_create(descriptor)) - - # Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it - os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16")) - shutil.copyfile( - os.path.join(node_master_wallets_dir, "u1_v16"), - os.path.join(node_v16_wallets_dir, "wallets/u1_v16") - ) - self.start_node(-1, extra_args=["-wallet=u1_v16"]) - wallet = node_v16.get_wallet_rpc("u1_v16") - info = wallet.validateaddress(v16_addr) - assert_equal(info, v16_info) - - # Copy the 0.17 wallet to the last Bitcoin Core version and open it: - node_v17.unloadwallet("u1_v17") - shutil.copytree( - os.path.join(node_v17_wallets_dir, "u1_v17"), - os.path.join(node_master_wallets_dir, "u1_v17") - ) - node_master.loadwallet("u1_v17") - wallet = node_master.get_wallet_rpc("u1_v17") - info = wallet.getaddressinfo(address) - descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")" - assert_equal(info["desc"], descsum_create(descriptor)) - - # Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it - node_master.unloadwallet("u1_v17") - shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17")) - shutil.copytree( - os.path.join(node_master_wallets_dir, "u1_v17"), - os.path.join(node_v17_wallets_dir, "u1_v17") - ) - node_v17.loadwallet("u1_v17") - wallet = node_v17.get_wallet_rpc("u1_v17") - info = wallet.getaddressinfo(address) - assert_equal(info, v17_info) - - # Copy the 0.19 wallet to the last Bitcoin Core version and open it: - shutil.copytree( - os.path.join(node_v19_wallets_dir, "w1_v19"), - os.path.join(node_master_wallets_dir, "w1_v19") - ) - node_master.loadwallet("w1_v19") - wallet = node_master.get_wallet_rpc("w1_v19") - assert wallet.getaddressinfo(address_18075)["solvable"] + if self.is_bdb_compiled(): + # Old wallets are BDB and will only work if BDB is compiled + # Copy the 0.16 wallet to the last Bitcoin Core version and open it: + shutil.copyfile( + os.path.join(node_v16_wallets_dir, "wallets/u1_v16"), + os.path.join(node_master_wallets_dir, "u1_v16") + ) + load_res = node_master.loadwallet("u1_v16") + # Make sure this wallet opens without warnings. See https://github.com/bitcoin/bitcoin/pull/19054 + assert_equal(load_res['warning'], '') + wallet = node_master.get_wallet_rpc("u1_v16") + info = wallet.getaddressinfo(v16_addr) + descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")" + assert_equal(info["desc"], descsum_create(descriptor)) + + # Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it + os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16")) + shutil.copyfile( + os.path.join(node_master_wallets_dir, "u1_v16"), + os.path.join(node_v16_wallets_dir, "wallets/u1_v16") + ) + self.start_node(-1, extra_args=["-wallet=u1_v16"]) + wallet = node_v16.get_wallet_rpc("u1_v16") + info = wallet.validateaddress(v16_addr) + assert_equal(info, v16_info) - # Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it - node_master.unloadwallet("w1_v19") - shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19")) - shutil.copytree( - os.path.join(node_master_wallets_dir, "w1_v19"), - os.path.join(node_v19_wallets_dir, "w1_v19") - ) - node_v19.loadwallet("w1_v19") - wallet = node_v19.get_wallet_rpc("w1_v19") - assert wallet.getaddressinfo(address_18075)["solvable"] + # Copy the 0.17 wallet to the last Bitcoin Core version and open it: + node_v17.unloadwallet("u1_v17") + shutil.copytree( + os.path.join(node_v17_wallets_dir, "u1_v17"), + os.path.join(node_master_wallets_dir, "u1_v17") + ) + node_master.loadwallet("u1_v17") + wallet = node_master.get_wallet_rpc("u1_v17") + info = wallet.getaddressinfo(address) + descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")" + assert_equal(info["desc"], descsum_create(descriptor)) + + # Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it + node_master.unloadwallet("u1_v17") + shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17")) + shutil.copytree( + os.path.join(node_master_wallets_dir, "u1_v17"), + os.path.join(node_v17_wallets_dir, "u1_v17") + ) + node_v17.loadwallet("u1_v17") + wallet = node_v17.get_wallet_rpc("u1_v17") + info = wallet.getaddressinfo(address) + assert_equal(info, v17_info) + + # Copy the 0.19 wallet to the last Bitcoin Core version and open it: + shutil.copytree( + os.path.join(node_v19_wallets_dir, "w1_v19"), + os.path.join(node_master_wallets_dir, "w1_v19") + ) + node_master.loadwallet("w1_v19") + wallet = node_master.get_wallet_rpc("w1_v19") + assert wallet.getaddressinfo(address_18075)["solvable"] + + # Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it + node_master.unloadwallet("w1_v19") + shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19")) + shutil.copytree( + os.path.join(node_master_wallets_dir, "w1_v19"), + os.path.join(node_v19_wallets_dir, "w1_v19") + ) + node_v19.loadwallet("w1_v19") + wallet = node_v19.get_wallet_rpc("w1_v19") + assert wallet.getaddressinfo(address_18075)["solvable"] if __name__ == '__main__': BackwardsCompatibilityTest().main() diff --git a/test/functional/feature_blockfilterindex_prune.py b/test/functional/feature_blockfilterindex_prune.py new file mode 100755 index 0000000000..455073ef9c --- /dev/null +++ b/test/functional/feature_blockfilterindex_prune.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test blockfilterindex in conjunction with prune.""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_raises_rpc_error, + assert_greater_than, +) + + +class FeatureBlockfilterindexPruneTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.extra_args = [["-fastprune", "-prune=1"], ["-fastprune", "-prune=1", "-blockfilterindex=1"]] + + def run_test(self): + # test basic pruning compatibility & filter access of pruned blocks + self.log.info("check if we can access a blockfilter when pruning is enabled but no blocks are actually pruned") + assert len(self.nodes[1].getblockfilter(self.nodes[1].getbestblockhash())['filter']) > 0 + # Mine two batches of blocks to avoid hitting NODE_NETWORK_LIMITED_MIN_BLOCKS disconnection + self.nodes[1].generate(250) + self.sync_all() + self.nodes[1].generate(250) + self.sync_all() + self.log.info("prune some blocks") + pruneheight = self.nodes[1].pruneblockchain(400) + assert pruneheight != 0 + self.log.info("check if we can access the tips blockfilter when we have pruned some blocks") + assert len(self.nodes[1].getblockfilter(self.nodes[1].getbestblockhash())['filter']) > 0 + self.log.info("check if we can access the blockfilter of a pruned block") + assert len(self.nodes[1].getblockfilter(self.nodes[1].getblockhash(2))['filter']) > 0 + self.log.info("start node without blockfilterindex") + self.stop_node(1) + self.start_node(1, extra_args=self.extra_args[0]) + self.log.info("make sure accessing the blockfilters throws an error") + assert_raises_rpc_error(-1, "Index is not enabled for filtertype basic", self.nodes[1].getblockfilter, self.nodes[1].getblockhash(2)) + self.nodes[1].generate(1000) + self.log.info("prune below the blockfilterindexes best block while blockfilters are disabled") + pruneheight_new = self.nodes[1].pruneblockchain(1000) + assert_greater_than(pruneheight_new, pruneheight) + self.stop_node(1) + self.log.info("make sure we get an init error when starting the node again with block filters") + with self.nodes[1].assert_debug_log(["basic block filter index best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"]): + self.nodes[1].assert_start_raises_init_error(extra_args=self.extra_args[1]) + self.log.info("make sure the node starts again with the -reindex arg") + reindex_args = self.extra_args[1] + reindex_args.append("-reindex") + self.start_node(1, extra_args=reindex_args) + + +if __name__ == '__main__': + FeatureBlockfilterindexPruneTest().main() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index 2445b6d977..573760a8cb 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -5,6 +5,7 @@ """Test various command line arguments and configuration file parameters.""" import os +import time from test_framework.test_framework import BitcoinTestFramework from test_framework import util @@ -147,11 +148,68 @@ class ConfArgsTest(BitcoinTestFramework): self.start_node(0, extra_args=['-nonetworkactive=1']) self.stop_node(0) + def test_seed_peers(self): + self.log.info('Test seed peers') + default_data_dir = self.nodes[0].datadir + + # No peers.dat exists and -dnsseed=1 + # We expect the node will use DNS Seeds, but Regtest mode has 0 DNS seeds + # So after 60 seconds, the node should fallback to fixed seeds (this is a slow test) + assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) + start = int(time.time()) + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Loaded 0 addresses from peers.dat", + "0 addresses found from DNS seeds"]): + self.start_node(0, extra_args=['-dnsseed=1 -mocktime={}'.format(start)]) + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Adding fixed seeds as 60 seconds have passed and addrman is empty"]): + self.nodes[0].setmocktime(start + 65) + self.stop_node(0) + + # No peers.dat exists and -dnsseed=0 + # We expect the node will fallback immediately to fixed seeds + assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) + start = time.time() + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Loaded 0 addresses from peers.dat", + "DNS seeding disabled", + "Adding fixed seeds as -dnsseed=0, -addnode is not provided and all -seednode(s) attempted\n"]): + self.start_node(0, extra_args=['-dnsseed=0']) + assert time.time() - start < 60 + self.stop_node(0) + + # No peers.dat exists and dns seeds are disabled. + # We expect the node will not add fixed seeds when explicitly disabled. + assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) + start = time.time() + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Loaded 0 addresses from peers.dat", + "DNS seeding disabled", + "Fixed seeds are disabled"]): + self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=0']) + assert time.time() - start < 60 + self.stop_node(0) + + # No peers.dat exists and -dnsseed=0, but a -addnode is provided + # We expect the node will allow 60 seconds prior to using fixed seeds + assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) + start = int(time.time()) + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Loaded 0 addresses from peers.dat", + "DNS seeding disabled"]): + self.start_node(0, extra_args=['-dnsseed=0', '-addnode=fakenodeaddr -mocktime={}'.format(start)]) + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Adding fixed seeds as 60 seconds have passed and addrman is empty"]): + self.nodes[0].setmocktime(start + 65) + self.stop_node(0) + + def run_test(self): self.stop_node(0) self.test_log_buffer() self.test_args_log() + self.test_seed_peers() self.test_networkactive() self.test_config_file_parser() diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index f9ece244fb..2b56bc78f5 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -49,7 +49,6 @@ from test_framework.util import ( class ChainstateWriteCrashTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 - self.setup_clean_chain = False self.rpc_timeout = 480 self.supports_cli = False diff --git a/test/functional/feature_filelock.py b/test/functional/feature_filelock.py index 7de9a589be..2798d11b0a 100755 --- a/test/functional/feature_filelock.py +++ b/test/functional/feature_filelock.py @@ -4,6 +4,8 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Check that it's not possible to start a second bitcoind instance using the same datadir or wallet.""" import os +import random +import string from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch @@ -27,11 +29,21 @@ class FilelockTest(BitcoinTestFramework): self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg) if self.is_wallet_compiled(): - self.nodes[0].createwallet(self.default_wallet_name) - wallet_dir = os.path.join(datadir, 'wallets') - self.log.info("Check that we can't start a second bitcoind instance using the same wallet") - expected_msg = "Error: Error initializing wallet database environment" - self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-wallet=' + self.default_wallet_name, '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX) + def check_wallet_filelock(descriptors): + wallet_name = ''.join([random.choice(string.ascii_lowercase) for _ in range(6)]) + self.nodes[0].createwallet(wallet_name=wallet_name, descriptors=descriptors) + wallet_dir = os.path.join(datadir, 'wallets') + self.log.info("Check that we can't start a second bitcoind instance using the same wallet") + if descriptors: + expected_msg = "Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?" + else: + expected_msg = "Error: Error initializing wallet database environment" + self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-wallet=' + wallet_name, '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX) + + if self.is_bdb_compiled(): + check_wallet_filelock(False) + if self.is_sqlite_compiled(): + check_wallet_filelock(True) if __name__ == '__main__': FilelockTest().main() diff --git a/test/functional/feature_includeconf.py b/test/functional/feature_includeconf.py index 6f1a0cd348..f22b7f266a 100755 --- a/test/functional/feature_includeconf.py +++ b/test/functional/feature_includeconf.py @@ -20,7 +20,6 @@ from test_framework.test_framework import BitcoinTestFramework class IncludeConfTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def setup_chain(self): diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index f2313bac13..b068ce612c 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -5,11 +5,11 @@ """Test the -alertnotify, -blocknotify and -walletnotify options.""" import os -from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, keyhash_to_p2pkh +from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE +from test_framework.descriptors import descsum_create from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - hex_str_to_bytes, ) # Linux allow all characters other than \x00 @@ -49,6 +49,31 @@ class NotificationsTest(BitcoinTestFramework): super().setup_network() def run_test(self): + if self.is_wallet_compiled(): + # Setup the descriptors to be imported to the wallet + seed = "cTdGmKFWpbvpKQ7ejrdzqYT2hhjyb3GPHnLAK7wdi5Em67YLwSm9" + xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v" + desc_imports = [{ + "desc": descsum_create("wpkh(" + xpriv + "/0/*)"), + "timestamp": 0, + "active": True, + "keypool": True, + },{ + "desc": descsum_create("wpkh(" + xpriv + "/1/*)"), + "timestamp": 0, + "active": True, + "keypool": True, + "internal": True, + }] + # Make the wallets and import the descriptors + # Ensures that node 0 and node 1 share the same wallet for the conflicting transaction tests below. + for i, name in enumerate(self.wallet_names): + self.nodes[i].createwallet(wallet_name=name, descriptors=self.options.descriptors, blank=True, load_on_startup=True) + if self.options.descriptors: + self.nodes[i].importdescriptors(desc_imports) + else: + self.nodes[i].sethdseed(True, seed) + self.log.info("test -blocknotify") block_count = 10 blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE) @@ -84,11 +109,10 @@ class NotificationsTest(BitcoinTestFramework): for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) - # Conflicting transactions tests. Give node 0 same wallet seed as - # node 1, generate spends from node 0, and check notifications + # Conflicting transactions tests. + # Generate spends from node 0, and check notifications # triggered by node 1 self.log.info("test -walletnotify with conflicting transactions") - self.nodes[0].sethdseed(seed=self.nodes[1].dumpprivkey(keyhash_to_p2pkh(hex_str_to_bytes(self.nodes[1].getwalletinfo()['hdseedid'])[::-1]))) self.nodes[0].rescanblockchain() self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_UNSPENDABLE) self.sync_blocks() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index cd5eff9184..2983feaa0d 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -44,8 +44,8 @@ from test_framework.netutil import test_ipv6_local RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports -# Networks returned by RPC getpeerinfo, defined in src/netbase.cpp::GetNetworkName() -NET_UNROUTABLE = "unroutable" +# Networks returned by RPC getpeerinfo. +NET_UNROUTABLE = "not_publicly_routable" NET_IPV4 = "ipv4" NET_IPV6 = "ipv6" NET_ONION = "onion" diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index 6ee2b72c11..5027a9828f 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -517,7 +517,6 @@ def add_spender(spenders, *args, **kwargs): def random_checksig_style(pubkey): """Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack.""" - return bytes(CScript([pubkey, OP_CHECKSIG])) opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD]) if (opcode == OP_CHECKSIGVERIFY): ret = CScript([pubkey, opcode, OP_1]) diff --git a/test/functional/feature_utxo_set_hash.py b/test/functional/feature_utxo_set_hash.py new file mode 100755 index 0000000000..6e6046d84d --- /dev/null +++ b/test/functional/feature_utxo_set_hash.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test UTXO set hash value calculation in gettxoutsetinfo.""" + +import struct + +from test_framework.blocktools import create_transaction +from test_framework.messages import ( + CBlock, + COutPoint, + FromHex, +) +from test_framework.muhash import MuHash3072 +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class UTXOSetHashTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + + def test_deterministic_hash_results(self): + self.log.info("Test deterministic UTXO set hash results") + + # These depend on the setup_clean_chain option, the chain loaded from the cache + assert_equal(self.nodes[0].gettxoutsetinfo()['hash_serialized_2'], "b32ec1dda5a53cd025b95387aad344a801825fe46a60ff952ce26528f01d3be8") + assert_equal(self.nodes[0].gettxoutsetinfo("muhash")['muhash'], "dd5ad2a105c2d29495f577245c357409002329b9f4d6182c0af3dc2f462555c8") + + def test_muhash_implementation(self): + self.log.info("Test MuHash implementation consistency") + + node = self.nodes[0] + + # Generate 100 blocks and remove the first since we plan to spend its + # coinbase + block_hashes = node.generate(100) + blocks = list(map(lambda block: FromHex(CBlock(), node.getblock(block, False)), block_hashes)) + spending = blocks.pop(0) + + # Create a spending transaction and mine a block which includes it + tx = create_transaction(node, spending.vtx[0].rehash(), node.getnewaddress(), amount=49) + txid = node.sendrawtransaction(hexstring=tx.serialize_with_witness().hex(), maxfeerate=0) + + tx_block = node.generateblock(output=node.getnewaddress(), transactions=[txid]) + blocks.append(FromHex(CBlock(), node.getblock(tx_block['hash'], False))) + + # Serialize the outputs that should be in the UTXO set and add them to + # a MuHash object + muhash = MuHash3072() + + for height, block in enumerate(blocks): + # The Genesis block coinbase is not part of the UTXO set and we + # spent the first mined block + height += 2 + + for tx in block.vtx: + for n, tx_out in enumerate(tx.vout): + coinbase = 1 if not tx.vin[0].prevout.hash else 0 + + # Skip witness commitment + if (coinbase and n > 0): + continue + + data = COutPoint(int(tx.rehash(), 16), n).serialize() + data += struct.pack("<i", height * 2 + coinbase) + data += tx_out.serialize() + + muhash.insert(data) + + finalized = muhash.digest() + node_muhash = node.gettxoutsetinfo("muhash")['muhash'] + + assert_equal(finalized[::-1].hex(), node_muhash) + + def run_test(self): + self.test_deterministic_hash_results() + self.test_muhash_implementation() + + +if __name__ == '__main__': + UTXOSetHashTest().main() diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py index 1257dff1ae..2cf0ef2251 100755 --- a/test/functional/interface_bitcoin_cli.py +++ b/test/functional/interface_bitcoin_cli.py @@ -29,6 +29,8 @@ class TestBitcoinCli(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 + if self.is_wallet_compiled(): + self.requires_wallet = True def skip_test_if_missing_module(self): self.skip_if_no_cli() diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index 946bfa51d4..d0967a9340 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -27,28 +27,31 @@ def hash256_reversed(byte_str): class ZMQSubscriber: def __init__(self, socket, topic): - self.sequence = 0 + self.sequence = None # no sequence number received yet self.socket = socket self.topic = topic self.socket.setsockopt(zmq.SUBSCRIBE, self.topic) - def receive(self): + # Receive message from publisher and verify that topic and sequence match + def _receive_from_publisher_and_check(self): topic, body, seq = self.socket.recv_multipart() # Topic should match the subscriber topic. assert_equal(topic, self.topic) # Sequence should be incremental. - assert_equal(struct.unpack('<I', seq)[-1], self.sequence) + received_seq = struct.unpack('<I', seq)[-1] + if self.sequence is None: + self.sequence = received_seq + else: + assert_equal(received_seq, self.sequence) self.sequence += 1 return body + def receive(self): + return self._receive_from_publisher_and_check() + def receive_sequence(self): - topic, body, seq = self.socket.recv_multipart() - # Topic should match the subscriber topic. - assert_equal(topic, self.topic) - # Sequence should be incremental. - assert_equal(struct.unpack('<I', seq)[-1], self.sequence) - self.sequence += 1 + body = self._receive_from_publisher_and_check() hash = body[:32].hex() label = chr(body[32]) mempool_sequence = None if len(body) != 32+1+8 else struct.unpack("<Q", body[32+1:])[0] @@ -62,6 +65,11 @@ class ZMQSubscriber: class ZMQTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 + if self.is_wallet_compiled(): + self.requires_wallet = True + # This test isn't testing txn relay/timing, so set whitelist on the + # peers for instant txn relay. This speeds up the test run time 2-3x. + self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes def skip_test_if_missing_module(self): self.skip_if_no_py3_zmq() @@ -82,23 +90,46 @@ class ZMQTest (BitcoinTestFramework): # Restart node with the specified zmq notifications enabled, subscribe to # all of them and return the corresponding ZMQSubscriber objects. - def setup_zmq_test(self, services, recv_timeout=60, connect_nodes=False): + def setup_zmq_test(self, services, *, recv_timeout=60, sync_blocks=True): subscribers = [] for topic, address in services: socket = self.ctx.socket(zmq.SUB) - socket.set(zmq.RCVTIMEO, recv_timeout*1000) subscribers.append(ZMQSubscriber(socket, topic.encode())) - self.restart_node(0, ["-zmqpub%s=%s" % (topic, address) for topic, address in services]) - - if connect_nodes: - self.connect_nodes(0, 1) + self.restart_node(0, ["-zmqpub%s=%s" % (topic, address) for topic, address in services] + + self.extra_args[0]) for i, sub in enumerate(subscribers): sub.socket.connect(services[i][1]) - # Relax so that the subscribers are ready before publishing zmq messages - sleep(0.2) + # Ensure that all zmq publisher notification interfaces are ready by + # running the following "sync up" procedure: + # 1. Generate a block on the node + # 2. Try to receive a notification on all subscribers + # 3. If all subscribers get a message within the timeout (1 second), + # we are done, otherwise repeat starting from step 1 + for sub in subscribers: + sub.socket.set(zmq.RCVTIMEO, 1000) + while True: + self.nodes[0].generate(1) + recv_failed = False + for sub in subscribers: + try: + sub.receive() + except zmq.error.Again: + self.log.debug("Didn't receive sync-up notification, trying again.") + recv_failed = True + if not recv_failed: + self.log.debug("ZMQ sync-up completed, all subscribers are ready.") + break + + # set subscriber's desired timeout for the test + for sub in subscribers: + sub.socket.set(zmq.RCVTIMEO, recv_timeout*1000) + + self.connect_nodes(0, 1) + if sync_blocks: + self.sync_blocks() return subscribers @@ -108,9 +139,7 @@ class ZMQTest (BitcoinTestFramework): self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"]) address = 'tcp://127.0.0.1:28332' - subs = self.setup_zmq_test( - [(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]], - connect_nodes=True) + subs = self.setup_zmq_test([(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]]) hashblock = subs[0] hashtx = subs[1] @@ -187,6 +216,7 @@ class ZMQTest (BitcoinTestFramework): hashblock, hashtx = self.setup_zmq_test( [(topic, address) for topic in ["hashblock", "hashtx"]], recv_timeout=2) # 2 second timeout to check end of notifications + self.disconnect_nodes(0, 1) # Generate 1 block in nodes[0] with 1 mempool tx and receive all notifications payment_txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1.0) @@ -235,6 +265,7 @@ class ZMQTest (BitcoinTestFramework): """ self.log.info("Testing 'sequence' publisher") [seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")]) + self.disconnect_nodes(0, 1) # Mempool sequence number starts at 1 seq_num = 1 @@ -385,7 +416,7 @@ class ZMQTest (BitcoinTestFramework): return self.log.info("Testing 'mempool sync' usage of sequence notifier") - [seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")], connect_nodes=True) + [seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")]) # In-memory counter, should always start at 1 next_mempool_seq = self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"] @@ -485,10 +516,13 @@ class ZMQTest (BitcoinTestFramework): def test_multiple_interfaces(self): # Set up two subscribers with different addresses + # (note that after the reorg test, syncing would fail due to different + # chain lengths on node0 and node1; for this test we only need node0, so + # we can disable syncing blocks on the setup) subscribers = self.setup_zmq_test([ ("hashblock", "tcp://127.0.0.1:28334"), ("hashblock", "tcp://127.0.0.1:28335"), - ]) + ], sync_blocks=False) # Generate 1 block in nodes[0] and receive all notifications self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE) diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index 70cd4ebb3b..752b925b92 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -69,6 +69,8 @@ class MempoolPersistTest(BitcoinTestFramework): assert_equal(len(self.nodes[0].getrawmempool()), 5) assert_equal(len(self.nodes[1].getrawmempool()), 5) + total_fee_old = self.nodes[0].getmempoolinfo()['total_fee'] + self.log.debug("Prioritize a transaction on node0") fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] assert_equal(fees['base'], fees['modified']) @@ -76,6 +78,10 @@ class MempoolPersistTest(BitcoinTestFramework): fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified']) + self.log.info('Check the total base fee is unchanged after prioritisetransaction') + assert_equal(total_fee_old, self.nodes[0].getmempoolinfo()['total_fee']) + assert_equal(total_fee_old, sum(v['fees']['base'] for k, v in self.nodes[0].getrawmempool(verbose=True).items())) + tx_creation_time = self.nodes[0].getmempoolentry(txid=last_txid)['time'] assert_greater_than_or_equal(tx_creation_time, tx_creation_time_lower) assert_greater_than_or_equal(tx_creation_time_higher, tx_creation_time) diff --git a/test/functional/p2p_add_connections.py b/test/functional/p2p_add_connections.py index a63c3a3287..a04ba5db2d 100755 --- a/test/functional/p2p_add_connections.py +++ b/test/functional/p2p_add_connections.py @@ -17,7 +17,6 @@ def check_node_connections(*, node, num_in, num_out): class P2PAddConnections(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 2 def setup_network(self): diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 91fbd722cf..69821763bd 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -46,7 +46,6 @@ class AddrReceiver(P2PInterface): class AddrTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def run_test(self): diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py index c592ab52b1..6584efae79 100755 --- a/test/functional/p2p_blocksonly.py +++ b/test/functional/p2p_blocksonly.py @@ -15,7 +15,6 @@ from test_framework.util import assert_equal class P2PBlocksOnly(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 self.extra_args = [["-blocksonly"]] diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py index 642a217047..8f64419138 100755 --- a/test/functional/p2p_filter.py +++ b/test/functional/p2p_filter.py @@ -19,7 +19,13 @@ from test_framework.messages import ( msg_mempool, msg_version, ) -from test_framework.p2p import P2PInterface, p2p_lock +from test_framework.p2p import ( + P2PInterface, + P2P_SERVICES, + P2P_SUBVERSION, + P2P_VERSION, + p2p_lock, +) from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE from test_framework.test_framework import BitcoinTestFramework @@ -81,7 +87,6 @@ class P2PBloomFilter(P2PInterface): class FilterTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 self.extra_args = [[ '-peerbloomfilters', @@ -217,9 +222,12 @@ class FilterTest(BitcoinTestFramework): self.log.info('Test BIP 37 for a node with fRelay = False') # Add peer but do not send version yet filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False) - # Send version with fRelay=False + # Send version with relay=False version_without_fRelay = msg_version() - version_without_fRelay.nRelay = 0 + version_without_fRelay.nVersion = P2P_VERSION + version_without_fRelay.strSubVer = P2P_SUBVERSION + version_without_fRelay.nServices = P2P_SERVICES + version_without_fRelay.relay = 0 filter_peer_without_nrelay.send_message(version_without_fRelay) filter_peer_without_nrelay.wait_for_verack() assert not self.nodes[0].getpeerinfo()[0]['relaytxes'] diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py index 2b75ad5175..d375af6fe1 100755 --- a/test/functional/p2p_getaddr_caching.py +++ b/test/functional/p2p_getaddr_caching.py @@ -41,7 +41,6 @@ class AddrReceiver(P2PInterface): class AddrTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def run_test(self): diff --git a/test/functional/p2p_invalid_locator.py b/test/functional/p2p_invalid_locator.py index e4fc9fd178..f884cf90ff 100755 --- a/test/functional/p2p_invalid_locator.py +++ b/test/functional/p2p_invalid_locator.py @@ -13,7 +13,6 @@ from test_framework.test_framework import BitcoinTestFramework class InvalidLocatorTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.setup_clean_chain = False def run_test(self): node = self.nodes[0] # convenience reference to the node diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index ca8bf908a9..12b8b7baff 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -17,7 +17,12 @@ from test_framework.messages import ( msg_ping, msg_version, ) -from test_framework.p2p import P2PInterface +from test_framework.p2p import ( + P2PInterface, + P2P_SUBVERSION, + P2P_SERVICES, + P2P_VERSION_RELAY, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -125,12 +130,15 @@ class P2PLeakTest(BitcoinTestFramework): assert_equal(ver.addrFrom.port, 0) assert_equal(ver.addrFrom.ip, '0.0.0.0') assert_equal(ver.nStartingHeight, 201) - assert_equal(ver.nRelay, 1) + assert_equal(ver.relay, 1) self.log.info('Check that old peers are disconnected') p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False) old_version_msg = msg_version() old_version_msg.nVersion = 31799 + old_version_msg.strSubVer = P2P_SUBVERSION + old_version_msg.nServices = P2P_SERVICES + old_version_msg.relay = P2P_VERSION_RELAY with self.nodes[0].assert_debug_log(['peer=3 using obsolete version 31799; disconnecting']): p2p_old_peer.send_message(old_version_msg) p2p_old_peer.wait_for_disconnect() diff --git a/test/functional/p2p_message_capture.py b/test/functional/p2p_message_capture.py new file mode 100755 index 0000000000..080b2d93ad --- /dev/null +++ b/test/functional/p2p_message_capture.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test per-peer message capture capability. + +Additionally, the output of contrib/message-capture/message-capture-parser.py should be verified manually. +""" + +import glob +from io import BytesIO +import os + +from test_framework.p2p import P2PDataStore, MESSAGEMAP +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +TIME_SIZE = 8 +LENGTH_SIZE = 4 +MSGTYPE_SIZE = 12 + +def mini_parser(dat_file): + """Parse a data file created by CaptureMessage. + + From the data file we'll only check the structure. + + We won't care about things like: + - Deserializing the payload of the message + - This is managed by the deserialize methods in test_framework.messages + - The order of the messages + - There's no reason why we can't, say, change the order of the messages in the handshake + - Message Type + - We can add new message types + + We're ignoring these because they're simply too brittle to test here. + """ + with open(dat_file, 'rb') as f_in: + # This should have at least one message in it + assert(os.fstat(f_in.fileno()).st_size >= TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) + while True: + tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) + if not tmp_header_raw: + break + tmp_header = BytesIO(tmp_header_raw) + tmp_header.read(TIME_SIZE) # skip the timestamp field + raw_msgtype = tmp_header.read(MSGTYPE_SIZE) + msgtype: bytes = raw_msgtype.split(b'\x00', 1)[0] + remainder = raw_msgtype.split(b'\x00', 1)[1] + assert(len(msgtype) > 0) + assert(msgtype in MESSAGEMAP) + assert(len(remainder) == 0 or not remainder.decode().isprintable()) + length: int = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") + data = f_in.read(length) + assert_equal(len(data), length) + + + +class MessageCaptureTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [["-capturemessages"]] + self.setup_clean_chain = True + + def run_test(self): + capturedir = os.path.join(self.nodes[0].datadir, "regtest/message_capture") + # Connect a node so that the handshake occurs + self.nodes[0].add_p2p_connection(P2PDataStore()) + self.nodes[0].disconnect_p2ps() + recv_file = glob.glob(os.path.join(capturedir, "*/msgs_recv.dat"))[0] + mini_parser(recv_file) + sent_file = glob.glob(os.path.join(capturedir, "*/msgs_sent.dat"))[0] + mini_parser(sent_file) + + +if __name__ == '__main__': + MessageCaptureTest().main() diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 8a751c6b54..4bf96cb0e6 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -56,7 +56,6 @@ MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RE class TxDownloadTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 2 def test_tx_requests(self): diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 99be6b7b8e..84ca1b99c2 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -268,6 +268,18 @@ class BlockchainTest(BitcoinTestFramework): res5 = node.gettxoutsetinfo(hash_type='none') assert 'hash_serialized_2' not in res5 + # hash_type muhash should return a different UTXO set hash. + res6 = node.gettxoutsetinfo(hash_type='muhash') + assert 'muhash' in res6 + assert(res['hash_serialized_2'] != res6['muhash']) + + # muhash should not be included in gettxoutset unless requested. + for r in [res, res2, res3, res4, res5]: + assert 'muhash' not in r + + # Unknown hash_type raises an error + assert_raises_rpc_error(-8, "foohash is not a valid hash_type", node.gettxoutsetinfo, "foohash") + def _test_getblockheader(self): node = self.nodes[0] diff --git a/test/functional/rpc_estimatefee.py b/test/functional/rpc_estimatefee.py index 81862ac69e..51b7efb4c3 100755 --- a/test/functional/rpc_estimatefee.py +++ b/test/functional/rpc_estimatefee.py @@ -14,7 +14,6 @@ from test_framework.util import assert_raises_rpc_error class EstimateFeeTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def run_test(self): diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index de0b7f303f..2d41963beb 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -25,6 +25,7 @@ from test_framework.util import ( assert_raises_rpc_error, p2p_port, ) +from test_framework.wallet import MiniWallet def assert_net_servicesnames(servicesflag, servicenames): @@ -48,6 +49,9 @@ class NetTest(BitcoinTestFramework): self.supports_cli = False def run_test(self): + # We need miniwallet to make a transaction + self.wallet = MiniWallet(self.nodes[0]) + self.wallet.generate(1) # Get out of IBD for the minfeefilter and getpeerinfo tests. self.nodes[0].generate(101) @@ -74,8 +78,7 @@ class NetTest(BitcoinTestFramework): def test_getpeerinfo(self): self.log.info("Test getpeerinfo") # Create a few getpeerinfo last_block/last_transaction values. - if self.is_wallet_compiled(): - self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) + self.wallet.send_self_transfer(from_node=self.nodes[0]) # Make a transaction so we can see it in the getpeerinfo results self.nodes[1].generate(1) self.sync_all() time_now = int(time.time()) @@ -101,6 +104,9 @@ class NetTest(BitcoinTestFramework): assert_equal(peer_info[1][0]['connection_type'], 'manual') assert_equal(peer_info[1][1]['connection_type'], 'inbound') + # Check dynamically generated networks list in getpeerinfo help output. + assert "(ipv4, ipv6, onion, not_publicly_routable)" in self.nodes[0].help("getpeerinfo") + def test_getnettotals(self): self.log.info("Test getnettotals") # Test getnettotals and getpeerinfo by doing a ping. The bytes @@ -149,6 +155,9 @@ class NetTest(BitcoinTestFramework): for info in network_info: assert_net_servicesnames(int(info["localservices"], 0x10), info["localservicesnames"]) + # Check dynamically generated networks list in getnetworkinfo help output. + assert "(ipv4, ipv6, onion)" in self.nodes[0].help("getnetworkinfo") + def test_getaddednodeinfo(self): self.log.info("Test getaddednodeinfo") assert_equal(self.nodes[0].getaddednodeinfo(), []) diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index b364077a9a..ed6abaed78 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -24,7 +24,6 @@ MAX_BIP125_RBF_SEQUENCE = 0xfffffffd class PSBTTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 3 self.extra_args = [ ["-walletrbf=1"], diff --git a/test/functional/rpc_uptime.py b/test/functional/rpc_uptime.py index e86f91b1d0..6177970872 100755 --- a/test/functional/rpc_uptime.py +++ b/test/functional/rpc_uptime.py @@ -10,6 +10,7 @@ Test corresponds to code in rpc/server.cpp. import time from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_raises_rpc_error class UptimeTest(BitcoinTestFramework): @@ -18,8 +19,12 @@ class UptimeTest(BitcoinTestFramework): self.setup_clean_chain = True def run_test(self): + self._test_negative_time() self._test_uptime() + def _test_negative_time(self): + assert_raises_rpc_error(-8, "Mocktime can not be negative: -1.", self.nodes[0].setmocktime, -1) + def _test_uptime(self): wait_time = 10 self.nodes[0].setmocktime(int(time.time() + wait_time)) diff --git a/test/functional/test-shell.md b/test/functional/test-shell.md index f6ea9ef682..b8e899d675 100644 --- a/test/functional/test-shell.md +++ b/test/functional/test-shell.md @@ -178,7 +178,7 @@ can be called after the TestShell is shut down. | `num_nodes` | `1` | Sets the number of initialized bitcoind processes. | | `perf` | False | Profiles running nodes with `perf` for the duration of the test if set to `True`. | | `rpc_timeout` | `60` | Sets the RPC server timeout for the underlying bitcoind processes. | -| `setup_clean_chain` | `False` | Initializes an empty blockchain by default. A 199-block-long chain is initialized if set to `True`. | +| `setup_clean_chain` | `False` | A 200-block-long chain is initialized from cache by default. Instead, `setup_clean_chain` initializes an empty blockchain if set to `True`. | | `randomseed` | Random Integer | `TestShell.options.randomseed` is a member of `TestShell` which can be accessed during a test to seed a random generator. User can override default with a constant value for reproducible test runs. | | `supports_cli` | `False` | Whether the bitcoin-cli utility is compiled and available for the test. | | `tmpdir` | `"/var/folders/.../"` | Sets directory for test logs. Will be deleted upon a successful test run unless `nocleanup` is set to `True` | diff --git a/test/functional/test_framework/bdb.py b/test/functional/test_framework/bdb.py index 9de358aa0a..97b9c1d6d0 100644 --- a/test/functional/test_framework/bdb.py +++ b/test/functional/test_framework/bdb.py @@ -51,7 +51,6 @@ def dump_leaf_page(data): page_info['pgno'] = pgno page_info['prev_pgno'] = prev_pgno page_info['next_pgno'] = next_pgno - page_info['entries'] = entries page_info['hf_offset'] = hf_offset page_info['level'] = level page_info['pg_type'] = pg_type diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index e0cbab45ce..26526e35fa 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -20,10 +20,6 @@ def TaggedHash(tag, data): ss += data return hashlib.sha256(ss).digest() -def xor_bytes(b0, b1): - assert len(b0) == len(b1) - return bytes(x ^ y for (x, y) in zip(b0, b1)) - def jacobi_symbol(n, k): """Compute the Jacobi symbol of n modulo k @@ -510,7 +506,7 @@ class TestFrameworkKey(unittest.TestCase): if pubkey is not None: keys[privkey] = pubkey for msg in byte_arrays: # test every combination of message, signing key, verification key - for sign_privkey, sign_pubkey in keys.items(): + for sign_privkey, _ in keys.items(): sig = sign_schnorr(sign_privkey, msg) for verify_privkey, verify_pubkey in keys.items(): if verify_privkey == sign_privkey: diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 6ad4e13db2..a18a9ec109 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -31,11 +31,6 @@ import time from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, assert_equal -MIN_VERSION_SUPPORTED = 60001 -MY_VERSION = 70016 # past wtxid relay -MY_SUBVERSION = b"/python-p2p-tester:0.0.3/" -MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) - MAX_LOCATOR_SZ = 101 MAX_BLOCK_BASE_SIZE = 1000000 MAX_BLOOM_FILTER_SIZE = 36000 @@ -326,22 +321,20 @@ class CBlockLocator: __slots__ = ("nVersion", "vHave") def __init__(self): - self.nVersion = MY_VERSION self.vHave = [] def deserialize(self, f): - self.nVersion = struct.unpack("<i", f.read(4))[0] + struct.unpack("<i", f.read(4))[0] # Ignore version field. self.vHave = deser_uint256_vector(f) def serialize(self): r = b"" - r += struct.pack("<i", self.nVersion) + r += struct.pack("<i", 0) # Bitcoin Core ignores version field. Set it to 0. r += ser_uint256_vector(self.vHave) return r def __repr__(self): - return "CBlockLocator(nVersion=%i vHave=%s)" \ - % (self.nVersion, repr(self.vHave)) + return "CBlockLocator(vHave=%s)" % (repr(self.vHave)) class COutPoint: @@ -1023,20 +1016,20 @@ class CMerkleBlock: # Objects that correspond to messages on the wire class msg_version: - __slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices", + __slots__ = ("addrFrom", "addrTo", "nNonce", "relay", "nServices", "nStartingHeight", "nTime", "nVersion", "strSubVer") msgtype = b"version" def __init__(self): - self.nVersion = MY_VERSION - self.nServices = NODE_NETWORK | NODE_WITNESS + self.nVersion = 0 + self.nServices = 0 self.nTime = int(time.time()) self.addrTo = CAddress() self.addrFrom = CAddress() self.nNonce = random.getrandbits(64) - self.strSubVer = MY_SUBVERSION + self.strSubVer = '' self.nStartingHeight = -1 - self.nRelay = MY_RELAY + self.relay = 0 def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] @@ -1048,18 +1041,18 @@ class msg_version: self.addrFrom = CAddress() self.addrFrom.deserialize(f, with_time=False) self.nNonce = struct.unpack("<Q", f.read(8))[0] - self.strSubVer = deser_string(f) + self.strSubVer = deser_string(f).decode('utf-8') self.nStartingHeight = struct.unpack("<i", f.read(4))[0] if self.nVersion >= 70001: # Relay field is optional for version 70001 onwards try: - self.nRelay = struct.unpack("<b", f.read(1))[0] + self.relay = struct.unpack("<b", f.read(1))[0] except: - self.nRelay = 0 + self.relay = 0 else: - self.nRelay = 0 + self.relay = 0 def serialize(self): r = b"" @@ -1069,16 +1062,16 @@ class msg_version: r += self.addrTo.serialize(with_time=False) r += self.addrFrom.serialize(with_time=False) r += struct.pack("<Q", self.nNonce) - r += ser_string(self.strSubVer) + r += ser_string(self.strSubVer.encode('utf-8')) r += struct.pack("<i", self.nStartingHeight) - r += struct.pack("<b", self.nRelay) + r += struct.pack("<b", self.relay) return r def __repr__(self): - return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \ + return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i relay=%i)' \ % (self.nVersion, self.nServices, time.ctime(self.nTime), repr(self.addrTo), repr(self.addrFrom), self.nNonce, - self.strSubVer, self.nStartingHeight, self.nRelay) + self.strSubVer, self.nStartingHeight, self.relay) class msg_verack: @@ -1273,7 +1266,7 @@ class msg_block: # for cases where a user needs tighter control over what is sent over the wire # note that the user must supply the name of the msgtype, and the data class msg_generic: - __slots__ = ("msgtype", "data") + __slots__ = ("data") def __init__(self, msgtype, data=None): self.msgtype = msgtype diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index fa4a567aac..05099f3339 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -31,7 +31,6 @@ import threading from test_framework.messages import ( CBlockHeader, MAX_HEADERS_RESULTS, - MIN_VERSION_SUPPORTED, msg_addr, msg_addrv2, msg_block, @@ -79,6 +78,18 @@ from test_framework.util import ( logger = logging.getLogger("TestFramework.p2p") +# The minimum P2P version that this test framework supports +MIN_P2P_VERSION_SUPPORTED = 60001 +# The P2P version that this test framework implements and sends in its `version` message +# Version 70016 supports wtxid relay +P2P_VERSION = 70016 +# The services that this test framework offers in its `version` message +P2P_SERVICES = NODE_NETWORK | NODE_WITNESS +# The P2P user agent string that this test framework sends in its `version` message +P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" +# Value for relay that this test framework sends in its `version` message +P2P_VERSION_RELAY = 1 + MESSAGEMAP = { b"addr": msg_addr, b"addrv2": msg_addrv2, @@ -327,6 +338,9 @@ class P2PInterface(P2PConnection): def peer_connect_send_version(self, services): # Send a version msg vt = msg_version() + vt.nVersion = P2P_VERSION + vt.strSubVer = P2P_SUBVERSION + vt.relay = P2P_VERSION_RELAY vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport @@ -334,7 +348,7 @@ class P2PInterface(P2PConnection): vt.addrFrom.port = 0 self.on_connection_send_msg = vt # Will be sent in connection_made callback - def peer_connect(self, *args, services=NODE_NETWORK | NODE_WITNESS, send_version=True, **kwargs): + def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: @@ -417,7 +431,7 @@ class P2PInterface(P2PConnection): pass def on_version(self, message): - assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED) + assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED) if message.nVersion >= 70016 and self.wtxidrelay: self.send_message(msg_wtxidrelay()) if self.support_addrv2: diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index be0e9f24e2..c35533698c 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -29,8 +29,6 @@ MAX_SCRIPT_ELEMENT_SIZE = 520 LOCKTIME_THRESHOLD = 500000000 ANNEX_TAG = 0x50 -OPCODE_NAMES = {} # type: Dict[CScriptOp, str] - LEAF_VERSION_TAPSCRIPT = 0xc0 def hash160(s): @@ -47,7 +45,6 @@ def bn2vch(v): # Serialize to bytes return encoded_v.to_bytes(n_bytes, 'little') -_opcode_instances = [] # type: List[CScriptOp] class CScriptOp(int): """A single script opcode""" __slots__ = () @@ -111,6 +108,9 @@ class CScriptOp(int): _opcode_instances.append(super().__new__(cls, n)) return _opcode_instances[n] +OPCODE_NAMES: Dict[CScriptOp, str] = {} +_opcode_instances: List[CScriptOp] = [] + # Populate opcode instance table for n in range(0xff + 1): CScriptOp(n) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 4bda73599d..70a9798449 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -108,6 +108,9 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): # skipped. If list is truncated, wallet creation is skipped and keys # are not imported. self.wallet_names = None + # By default the wallet is not required. Set to true by skip_if_no_wallet(). + # When False, we ignore wallet_names regardless of what it is. + self.requires_wallet = False self.set_test_params() assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes if self.options.timeout_factor == 0 : @@ -184,15 +187,30 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts') group = parser.add_mutually_exclusive_group() - group.add_argument("--descriptors", default=False, action="store_true", + group.add_argument("--descriptors", action='store_const', const=True, help="Run test using a descriptor wallet", dest='descriptors') - group.add_argument("--legacy-wallet", default=False, action="store_false", + group.add_argument("--legacy-wallet", action='store_const', const=False, help="Run test using legacy wallets", dest='descriptors') self.add_options(parser) self.options = parser.parse_args() self.options.previous_releases_path = previous_releases_path + config = configparser.ConfigParser() + config.read_file(open(self.options.configfile)) + self.config = config + + if self.options.descriptors is None: + # Prefer BDB unless it isn't available + if self.is_bdb_compiled(): + self.options.descriptors = False + elif self.is_sqlite_compiled(): + self.options.descriptors = True + else: + # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter + # It still needs to exist and be None in order for tests to work however. + self.options.descriptors = None + def setup(self): """Call this method to start up the test framework object with options set.""" @@ -202,9 +220,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): self.options.cachedir = os.path.abspath(self.options.cachedir) - config = configparser.ConfigParser() - config.read_file(open(self.options.configfile)) - self.config = config + config = self.config + fname_bitcoind = os.path.join( config["environment"]["BUILDDIR"], "src", @@ -377,7 +394,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() - if self.is_wallet_compiled(): + if self.requires_wallet: self.import_deterministic_coinbase_privkeys() if not self.setup_clean_chain: for n in self.nodes: @@ -769,10 +786,13 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): def skip_if_no_wallet(self): """Skip the running test if wallet has not been compiled.""" + self.requires_wallet = True if not self.is_wallet_compiled(): raise SkipTest("wallet has not been compiled.") if self.options.descriptors: self.skip_if_no_sqlite() + else: + self.skip_if_no_bdb() def skip_if_no_sqlite(self): """Skip the running test if sqlite has not been compiled.""" diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index b61d433652..b820c36e6e 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -23,9 +23,10 @@ import sys from .authproxy import JSONRPCException from .descriptors import descsum_create -from .messages import MY_SUBVERSION +from .p2p import P2P_SUBVERSION from .util import ( MAX_NODES, + assert_equal, append_config, delete_cookie_file, get_auth_cookie, @@ -114,6 +115,8 @@ class TestNode(): if self.version_is_at_least(190000): self.args.append("-logthreadnames") + if self.version_is_at_least(219900): + self.args.append("-logsourcelocations") self.cli = TestNodeCLI(bitcoin_cli, self.datadir) self.use_cli = use_cli @@ -545,6 +548,11 @@ class TestNode(): # in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely. p2p_conn.sync_with_ping() + # Consistency check that the Bitcoin Core has received our user agent string. This checks the + # node's newest peer. It could be racy if another Bitcoin Core node has connected since we opened + # our connection, but we don't expect that to happen. + assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION) + return p2p_conn def add_outbound_p2p_connection(self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs): @@ -572,7 +580,7 @@ class TestNode(): def num_test_p2p_connections(self): """Return number of test framework p2p connections to the node.""" - return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION.decode("utf-8")]) + return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION]) def disconnect_p2ps(self): """Close all p2p connections to the node.""" diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index c652ac0a06..d742ef4eee 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -86,29 +86,29 @@ EXTENDED_SCRIPTS = [ BASE_SCRIPTS = [ # Scripts that are run by default. # Longest test should go first, to favor running tests in parallel - 'wallet_hd.py', + 'wallet_hd.py --legacy-wallet', 'wallet_hd.py --descriptors', - 'wallet_backup.py', + 'wallet_backup.py --legacy-wallet', 'wallet_backup.py --descriptors', # vv Tests less than 5m vv 'mining_getblocktemplate_longpoll.py', 'feature_maxuploadtarget.py', 'feature_block.py', - 'rpc_fundrawtransaction.py', + 'rpc_fundrawtransaction.py --legacy-wallet', 'rpc_fundrawtransaction.py --descriptors', 'p2p_compactblocks.py', 'feature_segwit.py --legacy-wallet', # vv Tests less than 2m vv - 'wallet_basic.py', + 'wallet_basic.py --legacy-wallet', 'wallet_basic.py --descriptors', - 'wallet_labels.py', + 'wallet_labels.py --legacy-wallet', 'wallet_labels.py --descriptors', 'p2p_segwit.py', 'p2p_timeouts.py', 'p2p_tx_download.py', 'mempool_updatefromblock.py', 'wallet_dump.py --legacy-wallet', - 'wallet_listtransactions.py', + 'wallet_listtransactions.py --legacy-wallet', 'wallet_listtransactions.py --descriptors', 'feature_taproot.py', # vv Tests less than 60s vv @@ -116,21 +116,21 @@ BASE_SCRIPTS = [ 'wallet_importmulti.py --legacy-wallet', 'mempool_limit.py', 'rpc_txoutproof.py', - 'wallet_listreceivedby.py', + 'wallet_listreceivedby.py --legacy-wallet', 'wallet_listreceivedby.py --descriptors', - 'wallet_abandonconflict.py', + 'wallet_abandonconflict.py --legacy-wallet', 'wallet_abandonconflict.py --descriptors', 'feature_csv_activation.py', - 'rpc_rawtransaction.py', + 'rpc_rawtransaction.py --legacy-wallet', 'rpc_rawtransaction.py --descriptors', - 'wallet_address_types.py', + 'wallet_address_types.py --legacy-wallet', 'wallet_address_types.py --descriptors', 'feature_bip68_sequence.py', 'p2p_feefilter.py', 'feature_reindex.py', 'feature_abortnode.py', # vv Tests less than 30s vv - 'wallet_keypool_topup.py', + 'wallet_keypool_topup.py --legacy-wallet', 'wallet_keypool_topup.py --descriptors', 'feature_fee_estimation.py', 'interface_zmq.py', @@ -138,7 +138,7 @@ BASE_SCRIPTS = [ 'interface_bitcoin_cli.py', 'mempool_resurrect.py', 'wallet_txn_doublespend.py --mineblock', - 'tool_wallet.py', + 'tool_wallet.py --legacy-wallet', 'tool_wallet.py --descriptors', 'wallet_txn_clone.py', 'wallet_txn_clone.py --segwit', @@ -146,14 +146,14 @@ BASE_SCRIPTS = [ 'rpc_misc.py', 'interface_rest.py', 'mempool_spend_coinbase.py', - 'wallet_avoidreuse.py', + 'wallet_avoidreuse.py --legacy-wallet', 'wallet_avoidreuse.py --descriptors', 'mempool_reorg.py', 'mempool_persist.py', - 'wallet_multiwallet.py', + 'wallet_multiwallet.py --legacy-wallet', 'wallet_multiwallet.py --descriptors', 'wallet_multiwallet.py --usecli', - 'wallet_createwallet.py', + 'wallet_createwallet.py --legacy-wallet', 'wallet_createwallet.py --usecli', 'wallet_createwallet.py --descriptors', 'wallet_watchonly.py --legacy-wallet', @@ -161,27 +161,27 @@ BASE_SCRIPTS = [ 'wallet_reorgsrestore.py', 'interface_http.py', 'interface_rpc.py', - 'rpc_psbt.py', + 'rpc_psbt.py --legacy-wallet', 'rpc_psbt.py --descriptors', 'rpc_users.py', 'rpc_whitelist.py', 'feature_proxy.py', - 'rpc_signrawtransaction.py', + 'rpc_signrawtransaction.py --legacy-wallet', 'rpc_signrawtransaction.py --descriptors', - 'wallet_groups.py', + 'wallet_groups.py --legacy-wallet', 'p2p_addrv2_relay.py', 'wallet_groups.py --descriptors', 'p2p_disconnect_ban.py', 'rpc_decodescript.py', 'rpc_blockchain.py', 'rpc_deprecated.py', - 'wallet_disable.py', + 'wallet_disable.py --legacy-wallet', 'wallet_disable.py --descriptors', 'p2p_addr_relay.py', 'p2p_getaddr_caching.py', 'p2p_getdata.py', 'rpc_net.py', - 'wallet_keypool.py', + 'wallet_keypool.py --legacy-wallet', 'wallet_keypool.py --descriptors', 'wallet_descriptor.py --descriptors', 'p2p_nobloomfilter_messages.py', @@ -195,70 +195,72 @@ BASE_SCRIPTS = [ 'p2p_invalid_tx.py', 'feature_assumevalid.py', 'example_test.py', - 'wallet_txn_doublespend.py', + 'wallet_txn_doublespend.py --legacy-wallet', 'wallet_txn_doublespend.py --descriptors', - 'feature_backwards_compatibility.py', + 'feature_backwards_compatibility.py --legacy-wallet', 'feature_backwards_compatibility.py --descriptors', 'wallet_txn_clone.py --mineblock', 'feature_notifications.py', 'rpc_getblockfilter.py', 'rpc_invalidateblock.py', + 'feature_utxo_set_hash.py', 'feature_rbf.py', 'mempool_packages.py', 'mempool_package_onemore.py', - 'rpc_createmultisig.py', + 'rpc_createmultisig.py --legacy-wallet', 'rpc_createmultisig.py --descriptors', 'feature_versionbits_warning.py', 'rpc_preciousblock.py', - 'wallet_importprunedfunds.py', + 'wallet_importprunedfunds.py --legacy-wallet', 'wallet_importprunedfunds.py --descriptors', 'p2p_leak_tx.py', 'p2p_eviction.py', 'rpc_signmessage.py', 'rpc_generateblock.py', 'rpc_generate.py', - 'wallet_balance.py', + 'wallet_balance.py --legacy-wallet', 'wallet_balance.py --descriptors', - 'feature_nulldummy.py', + 'feature_nulldummy.py --legacy-wallet', 'feature_nulldummy.py --descriptors', 'mempool_accept.py', 'mempool_expiry.py', 'wallet_import_rescan.py --legacy-wallet', 'wallet_import_with_label.py --legacy-wallet', 'wallet_importdescriptors.py --descriptors', - 'wallet_upgradewallet.py', + 'wallet_upgradewallet.py --legacy-wallet', 'rpc_bind.py --ipv4', 'rpc_bind.py --ipv6', 'rpc_bind.py --nonloopback', 'mining_basic.py', 'feature_signet.py', - 'wallet_bumpfee.py', + 'wallet_bumpfee.py --legacy-wallet', 'wallet_bumpfee.py --descriptors', 'wallet_implicitsegwit.py --legacy-wallet', 'rpc_named_arguments.py', - 'wallet_listsinceblock.py', + 'wallet_listsinceblock.py --legacy-wallet', 'wallet_listsinceblock.py --descriptors', 'wallet_listdescriptors.py --descriptors', 'p2p_leak.py', - 'wallet_encryption.py', + 'wallet_encryption.py --legacy-wallet', 'wallet_encryption.py --descriptors', 'feature_dersig.py', 'feature_cltv.py', 'rpc_uptime.py', - 'wallet_resendwallettransactions.py', + 'wallet_resendwallettransactions.py --legacy-wallet', 'wallet_resendwallettransactions.py --descriptors', - 'wallet_fallbackfee.py', + 'wallet_fallbackfee.py --legacy-wallet', 'wallet_fallbackfee.py --descriptors', 'rpc_dumptxoutset.py', 'feature_minchainwork.py', 'rpc_estimatefee.py', 'rpc_getblockstats.py', - 'wallet_create_tx.py', - 'wallet_send.py', + 'wallet_create_tx.py --legacy-wallet', + 'wallet_send.py --legacy-wallet', + 'wallet_send.py --descriptors', 'wallet_create_tx.py --descriptors', 'p2p_fingerprint.py', 'feature_uacomment.py', - 'wallet_coinbase_category.py', + 'wallet_coinbase_category.py --legacy-wallet', 'wallet_coinbase_category.py --descriptors', 'feature_filelock.py', 'feature_loadblock.py', @@ -266,6 +268,7 @@ BASE_SCRIPTS = [ 'p2p_add_connections.py', 'p2p_unrequested_blocks.py', 'p2p_blockfilters.py', + 'p2p_message_capture.py', 'feature_includeconf.py', 'feature_asmap.py', 'mempool_unbroadcast.py', @@ -286,6 +289,7 @@ BASE_SCRIPTS = [ 'feature_help.py', 'feature_shutdown.py', 'p2p_ibd_txrelay.py', + 'feature_blockfilterindex_prune.py' # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index 8a1af24dcf..28103793df 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -183,11 +183,13 @@ class ToolWalletTest(BitcoinTestFramework): def test_invalid_tool_commands_and_args(self): self.log.info('Testing that various invalid commands raise with specific error messages') - self.assert_raises_tool_error('Invalid command: foo', 'foo') + self.assert_raises_tool_error("Error parsing command line arguments: Invalid command 'foo'", 'foo') # `bitcoin-wallet help` raises an error. Use `bitcoin-wallet -help`. - self.assert_raises_tool_error('Invalid command: help', 'help') - self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create') + self.assert_raises_tool_error("Error parsing command line arguments: Invalid command 'help'", 'help') + self.assert_raises_tool_error('Error: Additional arguments provided (create). Methods do not take arguments. Please refer to `-help`.', 'info', 'create') self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo') + self.assert_raises_tool_error('No method provided. Run `bitcoin-wallet -help` for valid methods.') + self.assert_raises_tool_error('Wallet name must be provided when creating a new wallet.', 'create') locked_dir = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets") error = 'Error initializing wallet database environment "{}"!'.format(locked_dir) if self.options.descriptors: @@ -348,7 +350,8 @@ class ToolWalletTest(BitcoinTestFramework): self.log.info('Checking createfromdump') self.do_tool_createfromdump("load", "wallet.dump") - self.do_tool_createfromdump("load-bdb", "wallet.dump", "bdb") + if self.is_bdb_compiled(): + self.do_tool_createfromdump("load-bdb", "wallet.dump", "bdb") if self.is_sqlite_compiled(): self.do_tool_createfromdump("load-sqlite", "wallet.dump", "sqlite") diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py index 229c134a4b..bc4fa90e83 100755 --- a/test/functional/wallet_avoidreuse.py +++ b/test/functional/wallet_avoidreuse.py @@ -65,7 +65,6 @@ def assert_balances(node, mine): class AvoidReuseTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 2 # This test isn't testing txn relay/timing, so set whitelist on the # peers for instant txn relay. This speeds up the test run time 2-3x. diff --git a/test/functional/wallet_createwallet.py b/test/functional/wallet_createwallet.py index cf3317121f..16a0a50b07 100755 --- a/test/functional/wallet_createwallet.py +++ b/test/functional/wallet_createwallet.py @@ -17,7 +17,6 @@ from test_framework.wallet_util import bytes_to_wif, generate_wif_key class CreateWalletTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def skip_test_if_missing_module(self): diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py index 1de41a5f96..1e032bdd6c 100755 --- a/test/functional/wallet_descriptor.py +++ b/test/functional/wallet_descriptor.py @@ -23,11 +23,14 @@ class WalletDescriptorTest(BitcoinTestFramework): self.skip_if_no_sqlite() def run_test(self): - # Make a legacy wallet and check it is BDB - self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False) - wallet_info = self.nodes[0].getwalletinfo() - assert_equal(wallet_info['format'], 'bdb') - self.nodes[0].unloadwallet("legacy1") + if self.is_bdb_compiled(): + # Make a legacy wallet and check it is BDB + self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False) + wallet_info = self.nodes[0].getwalletinfo() + assert_equal(wallet_info['format'], 'bdb') + self.nodes[0].unloadwallet("legacy1") + else: + self.log.warning("Skipping BDB test") # Make a descriptor wallet self.log.info("Making a descriptor wallet") @@ -148,5 +151,62 @@ class WalletDescriptorTest(BitcoinTestFramework): nopriv_rpc = self.nodes[0].get_wallet_rpc('desc_no_priv') assert_raises_rpc_error(-4, 'This wallet has no available keys', nopriv_rpc.getnewaddress) + self.log.info("Test descriptor exports") + self.nodes[0].createwallet(wallet_name='desc_export', descriptors=True) + exp_rpc = self.nodes[0].get_wallet_rpc('desc_export') + self.nodes[0].createwallet(wallet_name='desc_import', disable_private_keys=True, descriptors=True) + imp_rpc = self.nodes[0].get_wallet_rpc('desc_import') + + addr_types = [('legacy', False, 'pkh(', '44\'/1\'/0\'', -13), + ('p2sh-segwit', False, 'sh(wpkh(', '49\'/1\'/0\'', -14), + ('bech32', False, 'wpkh(', '84\'/1\'/0\'', -13), + ('legacy', True, 'pkh(', '44\'/1\'/0\'', -13), + ('p2sh-segwit', True, 'sh(wpkh(', '49\'/1\'/0\'', -14), + ('bech32', True, 'wpkh(', '84\'/1\'/0\'', -13)] + + for addr_type, internal, desc_prefix, deriv_path, int_idx in addr_types: + int_str = 'internal' if internal else 'external' + + self.log.info("Testing descriptor address type for {} {}".format(addr_type, int_str)) + if internal: + addr = exp_rpc.getrawchangeaddress(address_type=addr_type) + else: + addr = exp_rpc.getnewaddress(address_type=addr_type) + desc = exp_rpc.getaddressinfo(addr)['parent_desc'] + assert_equal(desc_prefix, desc[0:len(desc_prefix)]) + idx = desc.index('/') + 1 + assert_equal(deriv_path, desc[idx:idx + 9]) + if internal: + assert_equal('1', desc[int_idx]) + else: + assert_equal('0', desc[int_idx]) + + self.log.info("Testing the same descriptor is returned for address type {} {}".format(addr_type, int_str)) + for i in range(0, 10): + if internal: + addr = exp_rpc.getrawchangeaddress(address_type=addr_type) + else: + addr = exp_rpc.getnewaddress(address_type=addr_type) + test_desc = exp_rpc.getaddressinfo(addr)['parent_desc'] + assert_equal(desc, test_desc) + + self.log.info("Testing import of exported {} descriptor".format(addr_type)) + imp_rpc.importdescriptors([{ + 'desc': desc, + 'active': True, + 'next_index': 11, + 'timestamp': 'now', + 'internal': internal + }]) + + for i in range(0, 10): + if internal: + exp_addr = exp_rpc.getrawchangeaddress(address_type=addr_type) + imp_addr = imp_rpc.getrawchangeaddress(address_type=addr_type) + else: + exp_addr = exp_rpc.getnewaddress(address_type=addr_type) + imp_addr = imp_rpc.getnewaddress(address_type=addr_type) + assert_equal(exp_addr, imp_addr) + if __name__ == '__main__': WalletDescriptorTest().main () diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py index 9835c5a2af..880341fdd9 100755 --- a/test/functional/wallet_send.py +++ b/test/functional/wallet_send.py @@ -8,6 +8,7 @@ from decimal import Decimal, getcontext from itertools import product from test_framework.authproxy import JSONRPCException +from test_framework.descriptors import descsum_create from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -168,49 +169,91 @@ class WalletSendTest(BitcoinTestFramework): self.nodes[1].createwallet(wallet_name="w1") w1 = self.nodes[1].get_wallet_rpc("w1") # w2 contains the private keys for w3 - self.nodes[1].createwallet(wallet_name="w2") + self.nodes[1].createwallet(wallet_name="w2", blank=True) w2 = self.nodes[1].get_wallet_rpc("w2") + xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v" + xpub = "tpubD6NzVbkrYhZ4YkEfMbRJkQyZe7wTkbTNRECozCtJPtdLRn6cT1QKb8yHjwAPcAr26eHBFYs5iLiFFnCbwPRsncCKUKCfubHDMGKzMVcN1Jg" + if self.options.descriptors: + w2.importdescriptors([{ + "desc": descsum_create("wpkh(" + xpriv + "/0/0/*)"), + "timestamp": "now", + "range": [0, 100], + "active": True + },{ + "desc": descsum_create("wpkh(" + xpriv + "/0/1/*)"), + "timestamp": "now", + "range": [0, 100], + "active": True, + "internal": True + }]) + else: + w2.sethdseed(True) + # w3 is a watch-only wallet, based on w2 self.nodes[1].createwallet(wallet_name="w3", disable_private_keys=True) w3 = self.nodes[1].get_wallet_rpc("w3") - for _ in range(3): - a2_receive = w2.getnewaddress() - a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation - res = w3.importmulti([{ - "desc": w2.getaddressinfo(a2_receive)["desc"], + if self.options.descriptors: + # Match the privkeys in w2 for descriptors + res = w3.importdescriptors([{ + "desc": descsum_create("wpkh(" + xpub + "/0/0/*)"), "timestamp": "now", + "range": [0, 100], "keypool": True, + "active": True, "watchonly": True },{ - "desc": w2.getaddressinfo(a2_change)["desc"], + "desc": descsum_create("wpkh(" + xpub + "/0/1/*)"), "timestamp": "now", + "range": [0, 100], "keypool": True, + "active": True, "internal": True, "watchonly": True }]) assert_equal(res, [{"success": True}, {"success": True}]) - w0.sendtoaddress(a2_receive, 10) # fund w3 - self.nodes[0].generate(1) - self.sync_blocks() - - # w4 has private keys enabled, but only contains watch-only keys (from w2) - self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False) - w4 = self.nodes[1].get_wallet_rpc("w4") for _ in range(3): a2_receive = w2.getnewaddress() - res = w4.importmulti([{ - "desc": w2.getaddressinfo(a2_receive)["desc"], - "timestamp": "now", - "keypool": False, - "watchonly": True - }]) - assert_equal(res, [{"success": True}]) + if not self.options.descriptors: + # Because legacy wallets use exclusively hardened derivation, we can't do a ranged import like we do for descriptors + a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation + res = w3.importmulti([{ + "desc": w2.getaddressinfo(a2_receive)["desc"], + "timestamp": "now", + "keypool": True, + "watchonly": True + },{ + "desc": w2.getaddressinfo(a2_change)["desc"], + "timestamp": "now", + "keypool": True, + "internal": True, + "watchonly": True + }]) + assert_equal(res, [{"success": True}, {"success": True}]) - w0.sendtoaddress(a2_receive, 10) # fund w4 + w0.sendtoaddress(a2_receive, 10) # fund w3 self.nodes[0].generate(1) self.sync_blocks() + if not self.options.descriptors: + # w4 has private keys enabled, but only contains watch-only keys (from w2) + # This is legacy wallet behavior only as descriptor wallets don't allow watchonly and non-watchonly things in the same wallet. + self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False) + w4 = self.nodes[1].get_wallet_rpc("w4") + for _ in range(3): + a2_receive = w2.getnewaddress() + res = w4.importmulti([{ + "desc": w2.getaddressinfo(a2_receive)["desc"], + "timestamp": "now", + "keypool": False, + "watchonly": True + }]) + assert_equal(res, [{"success": True}]) + + w0.sendtoaddress(a2_receive, 10) # fund w4 + self.nodes[0].generate(1) + self.sync_blocks() + self.log.info("Send to address...") self.test_send(from_wallet=w0, to_wallet=w1, amount=1) self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True) @@ -241,11 +284,15 @@ class WalletSendTest(BitcoinTestFramework): res = w2.walletprocesspsbt(res["psbt"]) assert res["complete"] - self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...") - self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds")) - res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False) - res = w2.walletprocesspsbt(res["psbt"]) - assert res["complete"] + if not self.options.descriptors: + # Descriptor wallets do not allow mixed watch-only and non-watch-only things in the same wallet. + # This is specifically testing that w4 ignores its own private keys and creates a psbt with send + # which is not something that needs to be tested in descriptor wallets. + self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...") + self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds")) + res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False) + res = w2.walletprocesspsbt(res["psbt"]) + assert res["complete"] self.log.info("Create OP_RETURN...") self.test_send(from_wallet=w0, to_wallet=w1, amount=1) diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py index d0bb6135a8..fbc0f995d2 100755 --- a/test/functional/wallet_upgradewallet.py +++ b/test/functional/wallet_upgradewallet.py @@ -57,6 +57,7 @@ class UpgradeWalletTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() + self.skip_if_no_bdb() self.skip_if_no_previous_releases() def setup_network(self): diff --git a/test/functional/wallet_watchonly.py b/test/functional/wallet_watchonly.py index b0c41b2738..c345c382d0 100755 --- a/test/functional/wallet_watchonly.py +++ b/test/functional/wallet_watchonly.py @@ -2,7 +2,7 @@ # Copyright (c) 2018-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test createwallet arguments. +"""Test createwallet watchonly arguments. """ from test_framework.test_framework import BitcoinTestFramework @@ -14,7 +14,6 @@ from test_framework.util import ( class CreateWalletWatchonlyTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def skip_test_if_missing_module(self): @@ -50,6 +49,11 @@ class CreateWalletWatchonlyTest(BitcoinTestFramework): assert_equal(len(wo_wallet.listtransactions()), 1) assert_equal(wo_wallet.getbalance(include_watchonly=False), 0) + self.log.info('Test sending from a watch-only wallet raises RPC error') + msg = "Error: Private keys are disabled for this wallet" + assert_raises_rpc_error(-4, msg, wo_wallet.sendtoaddress, a1, 0.1) + assert_raises_rpc_error(-4, msg, wo_wallet.sendmany, amounts={a1: 0.1}) + self.log.info('Testing listreceivedbyaddress watch-only defaults') result = wo_wallet.listreceivedbyaddress() assert_equal(len(result), 1) diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py index aa0aa11d15..611061072f 100755 --- a/test/fuzz/test_runner.py +++ b/test/fuzz/test_runner.py @@ -225,6 +225,8 @@ def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dir) args = [ os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'), '-merge=1', + '-shuffle=0', + '-prefer_small=1', '-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487 os.path.join(corpus, t), os.path.join(merge_dir, t), diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh index c4ad00e954..0b15f99448 100755 --- a/test/lint/lint-circular-dependencies.sh +++ b/test/lint/lint-circular-dependencies.sh @@ -11,6 +11,7 @@ export LC_ALL=C EXPECTED_CIRCULAR_DEPENDENCIES=( "chainparamsbase -> util/system -> chainparamsbase" "index/txindex -> validation -> index/txindex" + "index/blockfilterindex -> validation -> index/blockfilterindex" "policy/fees -> txmempool -> policy/fees" "qt/addresstablemodel -> qt/walletmodel -> qt/addresstablemodel" "qt/bitcoingui -> qt/walletframe -> qt/bitcoingui" @@ -20,6 +21,7 @@ EXPECTED_CIRCULAR_DEPENDENCIES=( "txmempool -> validation -> txmempool" "wallet/fees -> wallet/wallet -> wallet/fees" "wallet/wallet -> wallet/walletdb -> wallet/wallet" + "node/coinstats -> validation -> node/coinstats" ) EXIT_CODE=0 diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh index 262e9d4c79..bf7aeb5b4f 100755 --- a/test/lint/lint-includes.sh +++ b/test/lint/lint-includes.sh @@ -60,16 +60,11 @@ EXPECTED_BOOST_INCLUDES=( boost/multi_index/ordered_index.hpp boost/multi_index/sequenced_index.hpp boost/multi_index_container.hpp - boost/preprocessor/cat.hpp - boost/preprocessor/stringize.hpp boost/process.hpp boost/signals2/connection.hpp boost/signals2/optional_last_value.hpp boost/signals2/signal.hpp boost/test/unit_test.hpp - boost/thread/condition_variable.hpp - boost/thread/shared_mutex.hpp - boost/thread/thread.hpp ) for BOOST_INCLUDE in $(git grep '^#include <boost/' -- "*.cpp" "*.h" | cut -f2 -d: | cut -f2 -d'<' | cut -f1 -d'>' | sort -u); do diff --git a/test/lint/lint-python-dead-code.sh b/test/lint/lint-python-dead-code.sh new file mode 100755 index 0000000000..c3b6ff3c98 --- /dev/null +++ b/test/lint/lint-python-dead-code.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Find dead Python code. + +export LC_ALL=C + +if ! command -v vulture > /dev/null; then + echo "Skipping Python dead code linting since vulture is not installed. Install by running \"pip3 install vulture\"" + exit 0 +fi + +# --min-confidence 100 will only report code that is guaranteed to be unused within the analyzed files. +# Any value below 100 introduces the risk of false positives, which would create an unacceptable maintenance burden. +if ! vulture \ + --min-confidence 100 \ + $(git ls-files -- "*.py"); then + echo "Python dead code detection found some issues" + exit 1 +fi diff --git a/test/sanitizer_suppressions/tsan b/test/sanitizer_suppressions/tsan index 3a04418e8b..3fc9fac25c 100644 --- a/test/sanitizer_suppressions/tsan +++ b/test/sanitizer_suppressions/tsan @@ -28,6 +28,7 @@ race:BerkeleyBatch race:BerkeleyDatabase race:DatabaseBatch race:leveldb::DBImpl::DeleteObsoleteFiles +race:validation_chainstatemanager_tests race:zmq::* race:bitcoin-qt |