diff options
235 files changed, 4902 insertions, 2567 deletions
diff --git a/.cirrus.yml b/.cirrus.yml index 801164c368..691582239e 100644 --- a/.cirrus.yml +++ b/.cirrus.yml @@ -82,10 +82,10 @@ task: FILE_ENV: "./ci/test/00_setup_env_arm.sh" task: - name: 'Win64 [unit tests, no gui tests, no boost::process, no functional tests] [bionic]' + name: 'Win64 [unit tests, no gui tests, no boost::process, no functional tests] [focal]' << : *GLOBAL_TASK_TEMPLATE container: - image: ubuntu:bionic + image: ubuntu:focal env: FILE_ENV: "./ci/test/00_setup_env_win64.sh" @@ -160,10 +160,10 @@ task: FILE_ENV: "./ci/test/00_setup_env_native_nowallet.sh" task: - name: 'macOS 10.14 [gui, no tests] [bionic]' + name: 'macOS 10.14 [gui, no tests] [focal]' << : *GLOBAL_TASK_TEMPLATE container: - image: ubuntu:bionic + image: ubuntu:focal env: FILE_ENV: "./ci/test/00_setup_env_mac.sh" diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..4967e675f6 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,26 @@ +# This is the top-most EditorConfig file. +root = true + +# For all files. +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +insert_final_newline = true +trim_trailing_whitespace = true + +# Source code files +[*.{h,cpp,py,sh}] +indent_size = 4 + +# .cirrus.yml, .appveyor.yml, .fuzzbuzz.yml, etc. +[*.yml] +indent_size = 2 + +# Makefiles +[{*.am,Makefile.*.include}] +indent_style = tab + +# Autoconf scripts +[configure.ac] +indent_size = 2 diff --git a/.fuzzbuzz.yml b/.fuzzbuzz.yml index be9a1cd4e1..e40b4df165 100644 --- a/.fuzzbuzz.yml +++ b/.fuzzbuzz.yml @@ -5,7 +5,7 @@ environment: - CXXFLAGS=-fcoverage-mapping -fno-omit-frame-pointer -fprofile-instr-generate -gline-tables-only -O1 setup: - sudo apt-get update - - sudo apt-get install -y autoconf bsdmainutils clang git libboost-all-dev libc++1 libc++abi1 libc++abi-dev libc++-dev libclang1 libclang-dev libdb5.3++ libevent-dev libllvm-ocaml-dev libomp5 libomp-dev libqt5core5a libqt5dbus5 libqt5gui5 libtool llvm llvm-dev llvm-runtime pkg-config qttools5-dev qttools5-dev-tools software-properties-common + - sudo apt-get install -y autoconf bsdmainutils clang git libboost-system-dev libboost-filesystem-dev libboost-test-dev libc++1 libc++abi1 libc++abi-dev libc++-dev libclang1 libclang-dev libdb5.3++ libevent-dev libllvm-ocaml-dev libomp5 libomp-dev libqt5core5a libqt5dbus5 libqt5gui5 libtool llvm llvm-dev llvm-runtime pkg-config qttools5-dev qttools5-dev-tools software-properties-common - ./autogen.sh - CC=clang CXX=clang++ ./configure --enable-fuzz --with-sanitizers=address,fuzzer,undefined --enable-danger-fuzz-link-all - make diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 2e11474382..ae2379fbd5 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -197,7 +197,7 @@ Note: Code review is a burdensome but important part of the development process, If your pull request contains fixup commits (commits that change the same line of code repeatedly) or too fine-grained commits, you may be asked to [squash](https://git-scm.com/docs/git-rebase#_interactive_mode) your commits -before it will be merged. The basic squashing workflow is shown below. +before it will be reviewed. The basic squashing workflow is shown below. git checkout your_branch_name git rebase -i HEAD~n diff --git a/Makefile.am b/Makefile.am index f6b824faaa..66be768277 100644 --- a/Makefile.am +++ b/Makefile.am @@ -4,7 +4,7 @@ # Pattern rule to print variables, e.g. make print-top_srcdir print-%: - @echo $* = $($*) + @echo '$*' = '$($*)' ACLOCAL_AMFLAGS = -I build-aux/m4 SUBDIRS = src @@ -3,6 +3,11 @@ Bitcoin Core integration/staging tree https://bitcoincore.org +For an immediately usable, binary version of the Bitcoin Core software, see +https://bitcoincore.org/en/download/. + +Further information about Bitcoin Core is available in the [doc folder](/doc). + What is Bitcoin? ---------------- @@ -12,9 +17,7 @@ with no central authority: managing transactions and issuing money are carried out collectively by the network. Bitcoin Core is the name of open source software which enables the use of this currency. -For more information, as well as an immediately usable, binary version of -the Bitcoin Core software, see https://bitcoincore.org/en/download/, or read the -[original whitepaper](https://bitcoincore.org/bitcoin.pdf). +For more information read the original Bitcoin whitepaper. License ------- @@ -53,7 +56,7 @@ submit new unit tests for old code. Unit tests can be compiled and run and extending unit tests can be found in [/src/test/README.md](/src/test/README.md). There are also [regression and integration tests](/test), written -in Python, that are run automatically on the build server. +in Python. These tests can be run (if the [test dependencies](/test) are installed) with: `test/functional/test_runner.py` The CI (Continuous Integration) systems make sure that every pull request is built for Windows, Linux, and macOS, @@ -77,5 +80,3 @@ Translations are periodically pulled from Transifex and merged into the git repo **Important**: We do not accept translation changes as GitHub pull requests because the next pull from Transifex would automatically overwrite them again. - -Translators should also subscribe to the [mailing list](https://groups.google.com/forum/#!forum/bitcoin-translators). diff --git a/build-aux/m4/ax_boost_process.m4 b/build-aux/m4/ax_boost_process.m4 deleted file mode 100644 index 5d20e67464..0000000000 --- a/build-aux/m4/ax_boost_process.m4 +++ /dev/null @@ -1,121 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_boost_process.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_BOOST_PROCESS -# -# DESCRIPTION -# -# Test for Process library from the Boost C++ libraries. The macro -# requires a preceding call to AX_BOOST_BASE. Further documentation is -# available at <http://randspringer.de/boost/index.html>. -# -# This macro calls: -# -# AC_SUBST(BOOST_PROCESS_LIB) -# -# And sets: -# -# HAVE_BOOST_PROCESS -# -# LICENSE -# -# Copyright (c) 2008 Thomas Porschberg <thomas@randspringer.de> -# Copyright (c) 2008 Michael Tindal -# Copyright (c) 2008 Daniel Casimiro <dan.casimiro@gmail.com> -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 2 - -AC_DEFUN([AX_BOOST_PROCESS], -[ - AC_ARG_WITH([boost-process], - AS_HELP_STRING([--with-boost-process@<:@=special-lib@:>@], - [use the Process library from boost - it is possible to specify a certain library for the linker - e.g. --with-boost-process=boost_process-gcc-mt ]), - [ - if test "$withval" = "no"; then - want_boost_process="no" - elif test "$withval" = "yes"; then - want_boost_process="yes" - ax_boost_user_process_lib="" - else - want_boost_process="yes" - ax_boost_user_process_lib="$withval" - fi - ], - [want_boost_process="yes"] - ) - - if test "x$want_boost_process" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - AC_REQUIRE([AC_CANONICAL_BUILD]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - AC_CACHE_CHECK(whether the Boost::Process library is available, - ax_cv_boost_process, - [AC_LANG_PUSH([C++]) - CXXFLAGS_SAVE=$CXXFLAGS - CXXFLAGS= - - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[@%:@include <boost/process.hpp>]], - [[boost::process::child* child = new boost::process::child; delete child;]])], - ax_cv_boost_process=yes, ax_cv_boost_process=no) - CXXFLAGS=$CXXFLAGS_SAVE - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_process" = "xyes"; then - AC_SUBST(BOOST_CPPFLAGS) - - AC_DEFINE(HAVE_BOOST_PROCESS,,[define if the Boost::Process library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - - LDFLAGS_SAVE=$LDFLAGS - if test "x$ax_boost_user_process_lib" = "x"; then - for libextension in `ls -r $BOOSTLIBDIR/libboost_process* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], - [link_process="no"]) - done - if test "x$link_process" != "xyes"; then - for libextension in `ls -r $BOOSTLIBDIR/boost_process* 2>/dev/null | sed 's,.*/,,' | sed -e 's,\..*,,'` ; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], - [link_process="no"]) - done - fi - - else - for ax_lib in $ax_boost_user_process_lib boost_process-$ax_boost_user_process_lib; do - AC_CHECK_LIB($ax_lib, exit, - [BOOST_PROCESS_LIB="-l$ax_lib"; AC_SUBST(BOOST_PROCESS_LIB) link_process="yes"; break], - [link_process="no"]) - done - - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the Boost::Process library!) - fi - if test "x$link_process" = "xno"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_boost_thread.m4 b/build-aux/m4/ax_boost_thread.m4 deleted file mode 100644 index 75e80e6e75..0000000000 --- a/build-aux/m4/ax_boost_thread.m4 +++ /dev/null @@ -1,187 +0,0 @@ -# =========================================================================== -# https://www.gnu.org/software/autoconf-archive/ax_boost_thread.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_BOOST_THREAD -# -# DESCRIPTION -# -# Test for Thread library from the Boost C++ libraries. The macro requires -# a preceding call to AX_BOOST_BASE. Further documentation is available at -# <http://randspringer.de/boost/index.html>. -# -# This macro calls: -# -# AC_SUBST(BOOST_THREAD_LIB) -# -# And sets: -# -# HAVE_BOOST_THREAD -# -# LICENSE -# -# Copyright (c) 2009 Thomas Porschberg <thomas@randspringer.de> -# Copyright (c) 2009 Michael Tindal -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 33 - -AC_DEFUN([AX_BOOST_THREAD], -[ - AC_ARG_WITH([boost-thread], - AS_HELP_STRING([--with-boost-thread@<:@=special-lib@:>@], - [use the Thread library from boost - - it is possible to specify a certain library for the linker - e.g. --with-boost-thread=boost_thread-gcc-mt ]), - [ - if test "$withval" = "yes"; then - want_boost="yes" - ax_boost_user_thread_lib="" - else - want_boost="yes" - ax_boost_user_thread_lib="$withval" - fi - ], - [want_boost="yes"] - ) - - if test "x$want_boost" = "xyes"; then - AC_REQUIRE([AC_PROG_CC]) - AC_REQUIRE([AC_CANONICAL_BUILD]) - CPPFLAGS_SAVED="$CPPFLAGS" - CPPFLAGS="$CPPFLAGS $BOOST_CPPFLAGS" - export CPPFLAGS - - LDFLAGS_SAVED="$LDFLAGS" - LDFLAGS="$LDFLAGS $BOOST_LDFLAGS" - export LDFLAGS - - AC_CACHE_CHECK(whether the Boost::Thread library is available, - ax_cv_boost_thread, - [AC_LANG_PUSH([C++]) - CXXFLAGS_SAVE=$CXXFLAGS - - case "x$host_os" in - xsolaris ) - CXXFLAGS="-pthreads $CXXFLAGS" - break; - ;; - xmingw32 ) - CXXFLAGS="-mthreads $CXXFLAGS" - break; - ;; - *android* ) - break; - ;; - * ) - CXXFLAGS="-pthread $CXXFLAGS" - break; - ;; - esac - - AC_COMPILE_IFELSE([ - AC_LANG_PROGRAM( - [[@%:@include <boost/thread/thread.hpp>]], - [[boost::thread_group thrds; - return 0;]])], - ax_cv_boost_thread=yes, ax_cv_boost_thread=no) - CXXFLAGS=$CXXFLAGS_SAVE - AC_LANG_POP([C++]) - ]) - if test "x$ax_cv_boost_thread" = "xyes"; then - case "x$host_os" in - xsolaris ) - BOOST_CPPFLAGS="-pthreads $BOOST_CPPFLAGS" - break; - ;; - xmingw32 ) - BOOST_CPPFLAGS="-mthreads $BOOST_CPPFLAGS" - break; - ;; - *android* ) - break; - ;; - * ) - BOOST_CPPFLAGS="-pthread $BOOST_CPPFLAGS" - break; - ;; - esac - - AC_SUBST(BOOST_CPPFLAGS) - - AC_DEFINE(HAVE_BOOST_THREAD,, - [define if the Boost::Thread library is available]) - BOOSTLIBDIR=`echo $BOOST_LDFLAGS | sed -e 's/@<:@^\/@:>@*//'` - - LDFLAGS_SAVE=$LDFLAGS - case "x$host_os" in - *bsd* ) - LDFLAGS="-pthread $LDFLAGS" - break; - ;; - esac - if test "x$ax_boost_user_thread_lib" = "x"; then - for libextension in `ls -r $BOOSTLIBDIR/libboost_thread* 2>/dev/null | sed 's,.*/lib,,' | sed 's,\..*,,'`; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [link_thread="yes"; break], - [link_thread="no"]) - done - if test "x$link_thread" != "xyes"; then - for libextension in `ls -r $BOOSTLIBDIR/boost_thread* 2>/dev/null | sed 's,.*/,,' | sed 's,\..*,,'`; do - ax_lib=${libextension} - AC_CHECK_LIB($ax_lib, exit, - [link_thread="yes"; break], - [link_thread="no"]) - done - fi - - else - for ax_lib in $ax_boost_user_thread_lib boost_thread-$ax_boost_user_thread_lib; do - AC_CHECK_LIB($ax_lib, exit, - [link_thread="yes"; break], - [link_thread="no"]) - done - - fi - if test "x$ax_lib" = "x"; then - AC_MSG_ERROR(Could not find a version of the Boost::Thread library!) - fi - if test "x$link_thread" = "xno"; then - AC_MSG_ERROR(Could not link against $ax_lib !) - else - BOOST_THREAD_LIB="-l$ax_lib" - case "x$host_os" in - *bsd* ) - BOOST_LDFLAGS="-pthread $BOOST_LDFLAGS" - break; - ;; - xsolaris ) - BOOST_THREAD_LIB="$BOOST_THREAD_LIB -lpthread" - break; - ;; - xmingw32 ) - break; - ;; - *android* ) - break; - ;; - * ) - BOOST_THREAD_LIB="$BOOST_THREAD_LIB -lpthread" - break; - ;; - esac - AC_SUBST(BOOST_THREAD_LIB) - fi - fi - - CPPFLAGS="$CPPFLAGS_SAVED" - LDFLAGS="$LDFLAGS_SAVED" - fi -]) diff --git a/build-aux/m4/ax_gcc_func_attribute.m4 b/build-aux/m4/ax_gcc_func_attribute.m4 deleted file mode 100644 index c788ca9bd4..0000000000 --- a/build-aux/m4/ax_gcc_func_attribute.m4 +++ /dev/null @@ -1,223 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_gcc_func_attribute.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_GCC_FUNC_ATTRIBUTE(ATTRIBUTE) -# -# DESCRIPTION -# -# This macro checks if the compiler supports one of GCC's function -# attributes; many other compilers also provide function attributes with -# the same syntax. Compiler warnings are used to detect supported -# attributes as unsupported ones are ignored by default so quieting -# warnings when using this macro will yield false positives. -# -# The ATTRIBUTE parameter holds the name of the attribute to be checked. -# -# If ATTRIBUTE is supported define HAVE_FUNC_ATTRIBUTE_<ATTRIBUTE>. -# -# The macro caches its result in the ax_cv_have_func_attribute_<attribute> -# variable. -# -# The macro currently supports the following function attributes: -# -# alias -# aligned -# alloc_size -# always_inline -# artificial -# cold -# const -# constructor -# constructor_priority for constructor attribute with priority -# deprecated -# destructor -# dllexport -# dllimport -# error -# externally_visible -# flatten -# format -# format_arg -# gnu_inline -# hot -# ifunc -# leaf -# malloc -# noclone -# noinline -# nonnull -# noreturn -# nothrow -# optimize -# pure -# unused -# used -# visibility -# warning -# warn_unused_result -# weak -# weakref -# -# Unsuppored function attributes will be tested with a prototype returning -# an int and not accepting any arguments and the result of the check might -# be wrong or meaningless so use with care. -# -# LICENSE -# -# Copyright (c) 2013 Gabriele Svelto <gabriele.svelto@gmail.com> -# -# Copying and distribution of this file, with or without modification, are -# permitted in any medium without royalty provided the copyright notice -# and this notice are preserved. This file is offered as-is, without any -# warranty. - -#serial 3 - -AC_DEFUN([AX_GCC_FUNC_ATTRIBUTE], [ - AS_VAR_PUSHDEF([ac_var], [ax_cv_have_func_attribute_$1]) - - AC_CACHE_CHECK([for __attribute__(($1))], [ac_var], [ - AC_LINK_IFELSE([AC_LANG_PROGRAM([ - m4_case([$1], - [alias], [ - int foo( void ) { return 0; } - int bar( void ) __attribute__(($1("foo"))); - ], - [aligned], [ - int foo( void ) __attribute__(($1(32))); - ], - [alloc_size], [ - void *foo(int a) __attribute__(($1(1))); - ], - [always_inline], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [artificial], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [cold], [ - int foo( void ) __attribute__(($1)); - ], - [const], [ - int foo( void ) __attribute__(($1)); - ], - [constructor_priority], [ - int foo( void ) __attribute__((__constructor__(65535/2))); - ], - [constructor], [ - int foo( void ) __attribute__(($1)); - ], - [deprecated], [ - int foo( void ) __attribute__(($1(""))); - ], - [destructor], [ - int foo( void ) __attribute__(($1)); - ], - [dllexport], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [dllimport], [ - int foo( void ) __attribute__(($1)); - ], - [error], [ - int foo( void ) __attribute__(($1(""))); - ], - [externally_visible], [ - int foo( void ) __attribute__(($1)); - ], - [flatten], [ - int foo( void ) __attribute__(($1)); - ], - [format], [ - int foo(const char *p, ...) __attribute__(($1(printf, 1, 2))); - ], - [format_arg], [ - char *foo(const char *p) __attribute__(($1(1))); - ], - [gnu_inline], [ - inline __attribute__(($1)) int foo( void ) { return 0; } - ], - [hot], [ - int foo( void ) __attribute__(($1)); - ], - [ifunc], [ - int my_foo( void ) { return 0; } - static int (*resolve_foo(void))(void) { return my_foo; } - int foo( void ) __attribute__(($1("resolve_foo"))); - ], - [leaf], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [malloc], [ - void *foo( void ) __attribute__(($1)); - ], - [noclone], [ - int foo( void ) __attribute__(($1)); - ], - [noinline], [ - __attribute__(($1)) int foo( void ) { return 0; } - ], - [nonnull], [ - int foo(char *p) __attribute__(($1(1))); - ], - [noreturn], [ - void foo( void ) __attribute__(($1)); - ], - [nothrow], [ - int foo( void ) __attribute__(($1)); - ], - [optimize], [ - __attribute__(($1(3))) int foo( void ) { return 0; } - ], - [pure], [ - int foo( void ) __attribute__(($1)); - ], - [unused], [ - int foo( void ) __attribute__(($1)); - ], - [used], [ - int foo( void ) __attribute__(($1)); - ], - [visibility], [ - int foo_def( void ) __attribute__(($1("default"))); - int foo_hid( void ) __attribute__(($1("hidden"))); - int foo_int( void ) __attribute__(($1("internal"))); - int foo_pro( void ) __attribute__(($1("protected"))); - ], - [warning], [ - int foo( void ) __attribute__(($1(""))); - ], - [warn_unused_result], [ - int foo( void ) __attribute__(($1)); - ], - [weak], [ - int foo( void ) __attribute__(($1)); - ], - [weakref], [ - static int foo( void ) { return 0; } - static int bar( void ) __attribute__(($1("foo"))); - ], - [ - m4_warn([syntax], [Unsupported attribute $1, the test may fail]) - int foo( void ) __attribute__(($1)); - ] - )], []) - ], - dnl GCC doesn't exit with an error if an unknown attribute is - dnl provided but only outputs a warning, so accept the attribute - dnl only if no warning were issued. - [AS_IF([test -s conftest.err], - [AS_VAR_SET([ac_var], [no])], - [AS_VAR_SET([ac_var], [yes])])], - [AS_VAR_SET([ac_var], [no])]) - ]) - - AS_IF([test yes = AS_VAR_GET([ac_var])], - [AC_DEFINE_UNQUOTED(AS_TR_CPP(HAVE_FUNC_ATTRIBUTE_$1), 1, - [Define to 1 if the system has the `$1' function attribute])], []) - - AS_VAR_POPDEF([ac_var]) -]) diff --git a/build_msvc/bitcoin_config.h b/build_msvc/bitcoin_config.h index 53aead38b5..23c554e396 100644 --- a/build_msvc/bitcoin_config.h +++ b/build_msvc/bitcoin_config.h @@ -56,9 +56,6 @@ /* define if the Boost::System library is available */ #define HAVE_BOOST_SYSTEM /**/ -/* define if the Boost::Thread library is available */ -#define HAVE_BOOST_THREAD /**/ - /* define if the Boost::Unit_Test_Framework library is available */ #define HAVE_BOOST_UNIT_TEST_FRAMEWORK /**/ diff --git a/build_msvc/vcpkg.json b/build_msvc/vcpkg.json index dfd3929c44..42b9a5d16f 100644 --- a/build_msvc/vcpkg.json +++ b/build_msvc/vcpkg.json @@ -8,7 +8,6 @@ "boost-process", "boost-signals2", "boost-test", - "boost-thread", "sqlite3", "double-conversion", { diff --git a/ci/lint/04_install.sh b/ci/lint/04_install.sh index a0b579de1e..343b82a1ad 100755 --- a/ci/lint/04_install.sh +++ b/ci/lint/04_install.sh @@ -15,6 +15,7 @@ ${CI_RETRY_EXE} pip3 install codespell==2.0.0 ${CI_RETRY_EXE} pip3 install flake8==3.8.3 ${CI_RETRY_EXE} pip3 install yq ${CI_RETRY_EXE} pip3 install mypy==0.781 +${CI_RETRY_EXE} pip3 install vulture==2.3 SHELLCHECK_VERSION=v0.7.1 curl -sL "https://github.com/koalaman/shellcheck/releases/download/${SHELLCHECK_VERSION}/shellcheck-${SHELLCHECK_VERSION}.linux.x86_64.tar.xz" | tar --xz -xf - --directory /tmp/ diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh index b0de2ec0bb..6da011d19b 100644 --- a/ci/test/00_setup_env_mac.sh +++ b/ci/test/00_setup_env_mac.sh @@ -7,9 +7,9 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_macos_cross -export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to macos (bionic is used in the gitian build as well) +export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos (Focal is used in the gitian build as well) export HOST=x86_64-apple-darwin18 -export PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools python3-dev python3-setuptools xorriso" +export PACKAGES="cmake imagemagick libcap-dev librsvg2-bin libz-dev libbz2-dev libtiff-tools libtinfo5 python3-dev python3-setuptools xorriso" export XCODE_VERSION=11.3.1 export XCODE_BUILD_ID=11C505 export RUN_UNIT_TESTS=false diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index f682486088..e47119e6fa 100644 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_asan -export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libqrencode-dev libsqlite3-dev" +export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libqrencode-dev libsqlite3-dev" export DOCKER_NAME_TAG=ubuntu:20.04 export NO_DEPENDS=1 export GOAL="install" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index b7157c608d..ebb5a1cabe 100644 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export DOCKER_NAME_TAG="ubuntu:20.04" export CONTAINER_NAME=ci_native_fuzz -export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev" +export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false diff --git a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh index e06a40eb23..2cf672b91e 100644 --- a/ci/test/00_setup_env_native_fuzz_with_valgrind.sh +++ b/ci/test/00_setup_env_native_fuzz_with_valgrind.sh @@ -8,7 +8,7 @@ export LC_ALL=C.UTF-8 export DOCKER_NAME_TAG="ubuntu:20.04" export CONTAINER_NAME=ci_native_fuzz_valgrind -export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev valgrind" +export PACKAGES="clang llvm python3 libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev valgrind" export NO_DEPENDS=1 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh index 522a5d9fc2..c5692d786a 100644 --- a/ci/test/00_setup_env_native_multiprocess.sh +++ b/ci/test/00_setup_env_native_multiprocess.sh @@ -13,3 +13,4 @@ export DEP_OPTS="MULTIPROCESS=1" export GOAL="install" export BITCOIN_CONFIG="--with-boost-process" export TEST_RUNNER_ENV="BITCOIND=bitcoin-node" +export RUN_SECURITY_TESTS="true" diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh index 567145fe47..4c42605e9a 100644 --- a/ci/test/00_setup_env_native_qt5.sh +++ b/ci/test/00_setup_env_native_qt5.sh @@ -11,9 +11,9 @@ export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic gcc-7 can compile our c export PACKAGES="python3-zmq qtbase5-dev qttools5-dev-tools libdbus-1-dev libharfbuzz-dev" export DEP_OPTS="NO_QT=1 NO_UPNP=1 NO_NATPMP=1 DEBUG=1 ALLOW_HOST_PACKAGES=1" export TEST_RUNNER_EXTRA="--previous-releases --coverage --extended --exclude feature_dbcrash" # Run extended tests so that coverage does not fail, but exclude the very slow dbcrash -export RUN_SECURITY_TESTS="true" export RUN_UNIT_TESTS_SEQUENTIAL="true" export RUN_UNIT_TESTS="false" export GOAL="install" export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.2 v0.18.1 v0.19.1" -export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports --enable-debug CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process" +export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports +--enable-debug --disable-fuzz-binary CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --with-boost-process" diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh index f0c153158b..e079a7057c 100644 --- a/ci/test/00_setup_env_native_valgrind.sh +++ b/ci/test/00_setup_env_native_valgrind.sh @@ -7,7 +7,7 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_native_valgrind -export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev" +export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev" export USE_VALGRIND=1 export NO_DEPENDS=1 export TEST_RUNNER_EXTRA="--exclude rpc_bind" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547 diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index affaaaa1aa..1e68d2a61a 100644 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -7,11 +7,11 @@ export LC_ALL=C.UTF-8 export CONTAINER_NAME=ci_win64 -export DOCKER_NAME_TAG=ubuntu:18.04 # Check that bionic can cross-compile to win64 (bionic is used in the gitian build as well) +export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to win64 (Focal is used in the gitian build as well) export HOST=x86_64-w64-mingw32 -export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 file" +export DPKG_ADD_ARCH="i386" +export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 wine32 file" export RUN_FUNCTIONAL_TESTS=false -export RUN_SECURITY_TESTS="true" export GOAL="deploy" export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --without-boost-process" diff --git a/ci/test/05_before_script.sh b/ci/test/05_before_script.sh index 4644f28a4e..f69afd8a26 100755 --- a/ci/test/05_before_script.sh +++ b/ci/test/05_before_script.sh @@ -19,7 +19,7 @@ OSX_SDK_BASENAME="Xcode-${XCODE_VERSION}-${XCODE_BUILD_ID}-extracted-SDK-with-li OSX_SDK_PATH="${DEPENDS_DIR}/sdk-sources/${OSX_SDK_BASENAME}" if [ -n "$XCODE_VERSION" ] && [ ! -f "$OSX_SDK_PATH" ]; then - curl --location --fail "${SDK_URL}/${OSX_SDK_BASENAME}" -o "$OSX_SDK_PATH" + DOCKER_EXEC curl --location --fail "${SDK_URL}/${OSX_SDK_BASENAME}" -o "$OSX_SDK_PATH" fi if [[ ${USE_MEMORY_SANITIZER} == "true" ]]; then diff --git a/ci/test/wrap-wine.sh b/ci/test/wrap-wine.sh index 58a8983e6e..82964897e1 100755 --- a/ci/test/wrap-wine.sh +++ b/ci/test/wrap-wine.sh @@ -13,7 +13,7 @@ for b_name in {"${BASE_OUTDIR}/bin"/*,src/secp256k1/*tests,src/univalue/{no_nul, echo "Wrap $b ..." mv "$b" "${b}_orig" echo '#!/usr/bin/env bash' > "$b" - echo "wine64 \"${b}_orig\" \"\$@\"" >> "$b" + echo "( wine \"${b}_orig\" \"\$@\" ) || ( sleep 1 && wine \"${b}_orig\" \"\$@\" )" >> "$b" chmod +x "$b" fi done diff --git a/configure.ac b/configure.ac index dca4c5edd8..c0e3b32e3b 100644 --- a/configure.ac +++ b/configure.ac @@ -184,10 +184,16 @@ AC_ARG_ENABLE([extended-functional-tests], AC_ARG_ENABLE([fuzz], AS_HELP_STRING([--enable-fuzz], - [enable building of fuzz targets (default no). enabling this will disable all other targets]), + [build for fuzzing (default no). enabling this will disable all other targets and override --{enable,disable}-fuzz-binary]), [enable_fuzz=$enableval], [enable_fuzz=no]) +AC_ARG_ENABLE([fuzz-binary], + AS_HELP_STRING([--enable-fuzz-binary], + [enable building of fuzz binary (default yes).]), + [enable_fuzz_binary=$enableval], + [enable_fuzz_binary=yes]) + AC_ARG_ENABLE([danger_fuzz_link_all], AS_HELP_STRING([--enable-danger-fuzz-link-all], [Danger! Modifies source code. Needs git and gnu sed installed. Link each fuzz target (default no).]), @@ -332,6 +338,11 @@ AC_ARG_ENABLE([werror], [enable_werror=$enableval], [enable_werror=no]) +AC_ARG_WITH([boost-process], + [AS_HELP_STRING([--with-boost-process],[Opt in to using Boost Process (default is no)])], + [boost_process=$withval], + [boost_process=no]) + AC_LANG_PUSH([C++]) dnl Check for a flag to turn compiler warnings into errors. This is helpful for checks which may @@ -651,7 +662,7 @@ case $host in AC_MSG_ERROR("windres not found") fi - CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -DBOOST_THREAD_USE_LIB -D_WIN32_WINNT=0x0601 -D_WIN32_IE=0x0501 -DWIN32_LEAN_AND_MEAN" + CPPFLAGS="$CPPFLAGS -D_MT -DWIN32 -D_WINDOWS -D_WIN32_WINNT=0x0601 -D_WIN32_IE=0x0501 -DWIN32_LEAN_AND_MEAN" dnl libtool insists upon adding -nostdlib and a list of objects/libs to link against. dnl That breaks our ability to build dll's with static libgcc/libstdc++/libssp. Override @@ -807,10 +818,6 @@ if test x$ac_cv_sys_large_files != x && CPPFLAGS="$CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files" fi -AX_GCC_FUNC_ATTRIBUTE([visibility]) -AX_GCC_FUNC_ATTRIBUTE([dllexport]) -AX_GCC_FUNC_ATTRIBUTE([dllimport]) - if test x$use_glibc_compat != xno; then AX_CHECK_LINK_FLAG([[-Wl,--wrap=__divmoddi4]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=__divmoddi4"]) AX_CHECK_LINK_FLAG([[-Wl,--wrap=log2f]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=log2f"]) @@ -854,7 +861,10 @@ if test x$use_hardening != xno; then AX_CHECK_COMPILE_FLAG([-Wstack-protector],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -Wstack-protector"]) AX_CHECK_COMPILE_FLAG([-fstack-protector-all],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fstack-protector-all"]) - AX_CHECK_COMPILE_FLAG([-fcf-protection=full],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fcf-protection=full"]) + dnl -fcf-protection used with Clang 7 causes ld to emit warnings: + dnl ld: error: ... <corrupt x86 feature size: 0x8> + dnl Use CHECK_LINK_FLAG & --fatal-warnings to ensure we wont use the flag in this case. + AX_CHECK_LINK_FLAG([-fcf-protection=full],[HARDENED_CXXFLAGS="$HARDENED_CXXFLAGS -fcf-protection=full"],, [[$LDFLAG_WERROR]]) dnl stack-clash-protection does not work properly when building for Windows. dnl We use the test case from https://gcc.gnu.org/bugzilla/show_bug.cgi?id=90458 @@ -975,13 +985,13 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ [ AC_MSG_RESULT(no)] ) -AC_MSG_CHECKING([for visibility attribute]) -AC_LINK_IFELSE([AC_LANG_SOURCE([ - int foo_def( void ) __attribute__((visibility("default"))); +AC_MSG_CHECKING([for default visibility attribute]) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + int foo(void) __attribute__((visibility("default"))); int main(){} ])], [ - AC_DEFINE(HAVE_VISIBILITY_ATTRIBUTE,1,[Define if the visibility attribute is supported.]) + AC_DEFINE(HAVE_DEFAULT_VISIBILITY_ATTRIBUTE,1,[Define if the visibility attribute is supported.]) AC_MSG_RESULT(yes) ], [ @@ -992,6 +1002,18 @@ AC_LINK_IFELSE([AC_LANG_SOURCE([ ] ) +AC_MSG_CHECKING([for dllexport attribute]) +AC_COMPILE_IFELSE([AC_LANG_SOURCE([ + __declspec(dllexport) int foo(void); + int main(){} + ])], + [ + AC_DEFINE(HAVE_DLLEXPORT_ATTRIBUTE,1,[Define if the dllexport attribute is supported.]) + AC_MSG_RESULT(yes) + ], + [AC_MSG_RESULT(no)] +) + dnl thread_local is currently disabled when building with glibc back compat. dnl Our minimum supported glibc is 2.17, however support for thread_local dnl did not arrive in glibc until 2.18. @@ -1174,12 +1196,6 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[ [ AC_MSG_RESULT(no); HAVE_WEAK_GETAUXVAL=0 ] ) -dnl Check for reduced exports -if test x$use_reduce_exports = xyes; then - AX_CHECK_COMPILE_FLAG([-fvisibility=hidden],[RE_CXXFLAGS="-fvisibility=hidden"], - [AC_MSG_ERROR([Cannot set default symbol visibility. Use --disable-reduce-exports.])]) -fi - AC_MSG_CHECKING([for std::system]) AC_LINK_IFELSE( [ AC_LANG_PROGRAM( @@ -1224,7 +1240,7 @@ AC_DEFUN([SUPPRESS_WARNINGS], dnl enable-fuzz should disable all other targets if test "x$enable_fuzz" = "xyes"; then - AC_MSG_WARN(enable-fuzz will disable all other targets) + AC_MSG_WARN(enable-fuzz will disable all other targets and force --enable-fuzz-binary=yes) build_bitcoin_utils=no build_bitcoin_cli=no build_bitcoin_tx=no @@ -1240,10 +1256,11 @@ if test "x$enable_fuzz" = "xyes"; then use_upnp=no use_natpmp=no use_zmq=no + enable_fuzz_binary=yes AX_CHECK_PREPROC_FLAG([-DABORT_ON_FAILED_ASSUME],[[DEBUG_CPPFLAGS="$DEBUG_CPPFLAGS -DABORT_ON_FAILED_ASSUME"]],,[[$CXXFLAG_WERROR]]) - AC_MSG_CHECKING([whether main function is needed]) + AC_MSG_CHECKING([whether main function is needed for fuzz binary]) AX_CHECK_LINK_FLAG( [[-fsanitize=$use_sanitizers]], [AC_MSG_RESULT([no])], @@ -1254,8 +1271,10 @@ if test "x$enable_fuzz" = "xyes"; then #include <cstdint> #include <cstddef> extern "C" int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size) { return 0; } - /* unterminated comment to remove the main function ... - ]],[[]])]) + /* comment to remove the main function ... + ]],[[ + */ int not_main() { + ]])]) else BITCOIN_QT_INIT @@ -1269,6 +1288,8 @@ else QT_DBUS_INCLUDES=SUPPRESS_WARNINGS($QT_DBUS_INCLUDES) QT_TEST_INCLUDES=SUPPRESS_WARNINGS($QT_TEST_INCLUDES) fi + + CPPFLAGS="$CPPFLAGS -DPROVIDE_MAIN_FUNCTION" fi if test x$enable_wallet != xno; then @@ -1361,20 +1382,23 @@ fi if test x$use_boost = xyes; then -dnl Minimum required Boost version -define(MINIMUM_REQUIRED_BOOST, 1.58.0) - -dnl Check for Boost libs -AX_BOOST_BASE([MINIMUM_REQUIRED_BOOST]) +dnl Check for Boost headers +AX_BOOST_BASE([1.58.0],[],[AC_MSG_ERROR([Boost is not available!])]) if test x$want_boost = xno; then AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]]) fi AX_BOOST_SYSTEM AX_BOOST_FILESYSTEM -AX_BOOST_THREAD -dnl Opt-in to boost-process -AS_IF([ test x$with_boost_process != x ], [ AX_BOOST_PROCESS ], [ ax_cv_boost_process=no ] ) +dnl Opt-in to Boost Process +if test "x$boost_process" != xno; then +AC_MSG_CHECKING(for Boost Process) +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <boost/process.hpp>]], + [[ boost::process::child* child = new boost::process::child; delete child; ]])], + [ AC_MSG_RESULT(yes); AC_DEFINE([HAVE_BOOST_PROCESS],,[define if Boost::Process is available])], + [ AC_MSG_ERROR([Boost::Process is not available!])] +) +fi if test x$suppress_external_warnings != xno; then BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS) @@ -1385,12 +1409,14 @@ dnl counter implementations. In 1.63 and later the std::atomic approach is defau m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS" -BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB $BOOST_THREAD_LIB" +BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB" fi +dnl Check for reduced exports if test x$use_reduce_exports = xyes; then - CXXFLAGS="$CXXFLAGS $RE_CXXFLAGS" - AX_CHECK_LINK_FLAG([[-Wl,--exclude-libs,ALL]], [RELDFLAGS="-Wl,--exclude-libs,ALL"],, [[$LDFLAG_WERROR]]) + AX_CHECK_COMPILE_FLAG([-fvisibility=hidden],[CXXFLAGS="$CXXFLAGS -fvisibility=hidden"], + [AC_MSG_ERROR([Cannot set hidden symbol visibility. Use --disable-reduce-exports.])],[[$CXXFLAG_WERROR]]) + AX_CHECK_LINK_FLAG([[-Wl,--exclude-libs,ALL]],[RELDFLAGS="-Wl,--exclude-libs,ALL"],,[[$LDFLAG_WERROR]]) fi if test x$use_tests = xyes; then @@ -1714,6 +1740,7 @@ AM_CONDITIONAL([USE_BDB], [test "x$use_bdb" = "xyes"]) AM_CONDITIONAL([ENABLE_TRACING],[test x$have_sdt = xyes]) AM_CONDITIONAL([ENABLE_TESTS],[test x$BUILD_TEST = xyes]) AM_CONDITIONAL([ENABLE_FUZZ],[test x$enable_fuzz = xyes]) +AM_CONDITIONAL([ENABLE_FUZZ_BINARY],[test x$enable_fuzz_binary = xyes]) AM_CONDITIONAL([ENABLE_FUZZ_LINK_ALL],[test x$enable_danger_fuzz_link_all = xyes]) AM_CONDITIONAL([ENABLE_QT],[test x$bitcoin_enable_qt = xyes]) AM_CONDITIONAL([ENABLE_QT_TESTS],[test x$BUILD_TEST_QT = xyes]) @@ -1730,6 +1757,8 @@ AM_CONDITIONAL([ENABLE_SHANI],[test x$enable_shani = xyes]) AM_CONDITIONAL([ENABLE_ARM_CRC],[test x$enable_arm_crc = xyes]) AM_CONDITIONAL([USE_ASM],[test x$use_asm = xyes]) AM_CONDITIONAL([WORDS_BIGENDIAN],[test x$ac_cv_c_bigendian = xyes]) +AM_CONDITIONAL([USE_NATPMP],[test x$use_natpmp = xyes]) +AM_CONDITIONAL([USE_UPNP],[test x$use_upnp = xyes]) AC_DEFINE(CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MAJOR, [Major version]) AC_DEFINE(CLIENT_VERSION_MINOR, _CLIENT_VERSION_MINOR, [Minor version]) @@ -1856,8 +1885,9 @@ esac echo echo "Options used to compile and link:" -echo " boost process = $ax_cv_boost_process" +echo " boost process = $with_boost_process" echo " multiprocess = $build_multiprocess" +echo " with libs = $build_bitcoin_libs" echo " with wallet = $enable_wallet" if test "x$enable_wallet" != "xno"; then echo " with sqlite = $use_sqlite" diff --git a/contrib/debian/copyright b/contrib/debian/copyright index a18c5bccc5..7ee7f056d9 100644 --- a/contrib/debian/copyright +++ b/contrib/debian/copyright @@ -87,6 +87,10 @@ Files: src/qt/res/icons/proxy.png Copyright: Cristian Mircea Messel License: public-domain +Files: src/qt/fonts/RobotoMono-Bold.ttf +License: Apache-2.0 +Comment: Site: https://fonts.google.com/specimen/Roboto+Mono + License: Expat Permission is hereby granted, free of charge, to any person obtaining a @@ -144,3 +148,14 @@ Comment: License: public-domain This work is in the public domain. + +License: Apache-2.0 + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + http://www.apache.org/licenses/LICENSE-2.0 + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py index 18ed7d61e0..ee7bfc9805 100755 --- a/contrib/devtools/test-symbol-check.py +++ b/contrib/devtools/test-symbol-check.py @@ -23,29 +23,26 @@ class TestSymbolChecks(unittest.TestCase): executable = 'test1' cc = 'gcc' - # there's no way to do this test for RISC-V at the moment; bionic's libc is 2.27 - # and we allow all symbols from 2.27. - if 'riscv' in get_machine(cc): - self.skipTest("test not available for RISC-V") - - # memfd_create was introduced in GLIBC 2.27, so is newer than the upper limit of - # all but RISC-V but still available on bionic + # renameat2 was introduced in GLIBC 2.28, so is newer than the upper limit + # of glibc for all platforms with open(source, 'w', encoding="utf8") as f: f.write(''' #define _GNU_SOURCE - #include <sys/mman.h> + #include <stdio.h> + #include <linux/fs.h> - int memfd_create(const char *name, unsigned int flags); + int renameat2(int olddirfd, const char *oldpath, + int newdirfd, const char *newpath, unsigned int flags); int main() { - memfd_create("test", 0); + renameat2(0, "test", 0, "test_", RENAME_EXCHANGE); return 0; } ''') self.assertEqual(call_symbol_check(cc, source, executable, []), - (1, executable + ': symbol memfd_create from unsupported version GLIBC_2.27\n' + + (1, executable + ': symbol renameat2 from unsupported version GLIBC_2.28\n' + executable + ': failed IMPORTED_SYMBOLS')) # -lutil is part of the libc6 package so a safe bet that it's installed diff --git a/contrib/gitian-build.py b/contrib/gitian-build.py index 06b15574a7..60acb0d593 100755 --- a/contrib/gitian-build.py +++ b/contrib/gitian-build.py @@ -35,14 +35,14 @@ def setup(): if not os.path.isdir('bitcoin'): subprocess.check_call(['git', 'clone', 'https://github.com/bitcoin/bitcoin.git']) os.chdir('gitian-builder') - make_image_prog = ['bin/make-base-vm', '--suite', 'bionic', '--arch', 'amd64'] + make_image_prog = ['bin/make-base-vm', '--suite', 'focal', '--arch', 'amd64'] if args.docker: make_image_prog += ['--docker'] elif not args.kvm: - make_image_prog += ['--lxc'] + make_image_prog += ['--lxc', '--disksize', '13000'] subprocess.check_call(make_image_prog) os.chdir(workdir) - if args.is_bionic and not args.kvm and not args.docker: + if args.is_focal and not args.kvm and not args.docker: subprocess.check_call(['sudo', 'sed', '-i', 's/lxcbr0/br0/', '/etc/default/lxc-net']) print('Reboot is required') sys.exit(0) @@ -176,7 +176,7 @@ def main(): args = parser.parse_args() workdir = os.getcwd() - args.is_bionic = b'bionic' in subprocess.check_output(['lsb_release', '-cs']) + args.is_focal = b'focal' in subprocess.check_output(['lsb_release', '-cs']) if args.kvm and args.docker: raise Exception('Error: cannot have both kvm and docker') diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml index a0ff87b531..b06fc782a3 100644 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ b/contrib/gitian-descriptors/gitian-linux.yml @@ -3,7 +3,7 @@ name: "bitcoin-core-linux-22" enable_cache: true distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: @@ -15,6 +15,8 @@ packages: - "ca-certificates" - "curl" - "faketime" +- "g++-8" +- "gcc-8" - "git" - "libtool" - "patch" @@ -45,7 +47,7 @@ script: | WRAP_DIR=$HOME/wrapped HOSTS="x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu riscv64-linux-gnu" - CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests" + CONFIGFLAGS="--enable-glibc-back-compat --enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary" FAKETIME_HOST_PROGS="gcc g++" FAKETIME_PROGS="date ar ranlib nm" HOST_CFLAGS="-O2 -g" @@ -107,7 +109,7 @@ script: | BASEPREFIX="${PWD}/depends" # Build dependencies for each host for i in $HOSTS; do - make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" + make ${MAKEOPTS} -C ${BASEPREFIX} HOST="${i}" CC=${i}-gcc-8 CXX=${i}-g++-8 done # Faketime for binaries @@ -130,7 +132,7 @@ script: | # Extract the git archive into a dir for each host and build for i in ${HOSTS}; do export PATH=${BASEPREFIX}/${i}/native/bin:${ORIGPATH} - if [ "${i}" = "riscv64-linux-gnu" ] || [ "${i}" = "powerpc64-linux-gnu" ] || [ "${i}" = "powerpc64le-linux-gnu" ]; then + if [ "${i}" = "powerpc64-linux-gnu" ]; then # Workaround for https://bugs.launchpad.net/ubuntu/+source/gcc-8-cross-ports/+bug/1853740 # TODO: remove this when no longer needed HOST_LDFLAGS="${HOST_LDFLAGS_BASE} -Wl,-z,noexecstack" @@ -144,7 +146,7 @@ script: | tar --strip-components=1 -xf "${GIT_ARCHIVE}" ./autogen.sh - CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}" + CONFIG_SITE=${BASEPREFIX}/${i}/share/config.site ./configure --prefix=/ --disable-ccache --disable-maintainer-mode --disable-dependency-tracking ${CONFIGFLAGS} CFLAGS="${HOST_CFLAGS}" CXXFLAGS="${HOST_CXXFLAGS}" LDFLAGS="${HOST_LDFLAGS}" CC=${i}-gcc-8 CXX=${i}-g++-8 make ${MAKEOPTS} make ${MAKEOPTS} -C src check-security make ${MAKEOPTS} -C src check-symbols diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml index 6fcb21f729..3f0c0c3332 100644 --- a/contrib/gitian-descriptors/gitian-osx-signer.yml +++ b/contrib/gitian-descriptors/gitian-osx-signer.yml @@ -2,7 +2,7 @@ name: "bitcoin-dmg-signer" distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml index 2a47e90e6e..0dc531df0e 100644 --- a/contrib/gitian-descriptors/gitian-osx.yml +++ b/contrib/gitian-descriptors/gitian-osx.yml @@ -3,7 +3,7 @@ name: "bitcoin-core-osx-22" enable_cache: true distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: @@ -29,6 +29,7 @@ packages: - "python3-setuptools" - "fonts-tuffy" - "xorriso" +- "libtinfo5" remotes: - "url": "https://github.com/bitcoin/bitcoin.git" "dir": "bitcoin" @@ -39,7 +40,7 @@ script: | WRAP_DIR=$HOME/wrapped HOSTS="x86_64-apple-darwin18" - CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests XORRISOFS=${WRAP_DIR}/xorrisofs DMG=${WRAP_DIR}/dmg" + CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary XORRISOFS=${WRAP_DIR}/xorrisofs DMG=${WRAP_DIR}/dmg" FAKETIME_HOST_PROGS="" FAKETIME_PROGS="ar ranlib date dmg xorrisofs" diff --git a/contrib/gitian-descriptors/gitian-win-signer.yml b/contrib/gitian-descriptors/gitian-win-signer.yml index 6bcd126662..c13c24c3cc 100644 --- a/contrib/gitian-descriptors/gitian-win-signer.yml +++ b/contrib/gitian-descriptors/gitian-win-signer.yml @@ -2,7 +2,7 @@ name: "bitcoin-win-signer" distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml index 1edd8b2e81..95cf0185e2 100644 --- a/contrib/gitian-descriptors/gitian-win.yml +++ b/contrib/gitian-descriptors/gitian-win.yml @@ -3,7 +3,7 @@ name: "bitcoin-core-win-22" enable_cache: true distro: "ubuntu" suites: -- "bionic" +- "focal" architectures: - "amd64" packages: @@ -31,7 +31,7 @@ script: | WRAP_DIR=$HOME/wrapped HOSTS="x86_64-w64-mingw32" - CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests" + CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary" FAKETIME_HOST_PROGS="ar ranlib nm windres strip objcopy" FAKETIME_PROGS="date makensis zip" HOST_CFLAGS="-O2 -g -fno-ident" diff --git a/contrib/guix/README.md b/contrib/guix/README.md index 5870deee39..e218ba89a0 100644 --- a/contrib/guix/README.md +++ b/contrib/guix/README.md @@ -13,11 +13,9 @@ We achieve bootstrappability by using Guix as a functional package manager. Conservatively, a x86_64 machine with: -- 4GB of free disk space on the partition that /gnu/store will reside in -- 24GB of free disk space on the partition that the Bitcoin Core git repository - resides in - -> Note: these requirements are slightly less onerous than those of Gitian builds +- 16GB of free disk space on the partition that /gnu/store will reside in +- 8GB of free disk space per platform triple you're planning on building (see + the `HOSTS` environment variable description) ## Setup @@ -114,6 +112,12 @@ find output/ -type f -print0 | sort -z | xargs -r0 sha256sum depends tree. Setting this to the same directory across multiple builds of the depends tree can eliminate unnecessary redownloading of package sources. +* _**BASE_CACHE**_ + + Set the depends tree cache for built packages. This is passed through to the + depends tree. Setting this to the same directory across multiple builds of the + depends tree can eliminate unnecessary building of packages. + * _**MAX_JOBS**_ Override the maximum number of jobs to run simultaneously, you might want to @@ -217,11 +221,11 @@ As mentioned at the bottom of [this manual page][guix/bin-install]: > > make guix-binary.x86_64-linux.tar.xz -### When will Guix be packaged in debian? +### Is Guix packaged in my operating system? -Thanks to Vagrant Cascadian's diligent work, Guix is now [in debian -experimental][debian/guix-experimental]! Hopefully it will make its way into a -release soon. +Guix is shipped starting with [Debian Bullseye][debian/guix-bullseye] and +[Ubuntu 21.04 "Hirsute Hippo"][ubuntu/guix-hirsute]. Other operating systems +are working on packaging Guix as well. [b17e]: http://bootstrappable.org/ [r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/ @@ -233,5 +237,6 @@ release soon. [guix/substitute-server-auth]: https://www.gnu.org/software/guix/manual/en/html_node/Substitute-Server-Authorization.html [guix/time-machine]: https://guix.gnu.org/manual/en/html_node/Invoking-guix-time_002dmachine.html -[debian/guix-experimental]: https://packages.debian.org/experimental/guix +[debian/guix-bullseye]: https://packages.debian.org/bullseye/guix +[ubuntu/guix-hirsute]: https://packages.ubuntu.com/hirsute/guix [fanquake/guix-docker]: https://github.com/fanquake/core-review/tree/master/guix diff --git a/contrib/guix/guix-build.sh b/contrib/guix/guix-build.sh index da6bd13f6a..7af132b240 100755 --- a/contrib/guix/guix-build.sh +++ b/contrib/guix/guix-build.sh @@ -69,7 +69,7 @@ fi ################ # Default to building for all supported HOSTs (overridable by environment) -export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu +export HOSTS="${HOSTS:-x86_64-linux-gnu arm-linux-gnueabihf aarch64-linux-gnu riscv64-linux-gnu powerpc64-linux-gnu powerpc64le-linux-gnu x86_64-w64-mingw32 x86_64-apple-darwin18}" @@ -136,9 +136,24 @@ done # environment) MAX_JOBS="${MAX_JOBS:-$(nproc)}" +# Usage: host_to_commonname HOST +# +# HOST: The current platform triple we're building for +# +host_to_commonname() { + case "$1" in + *darwin*) echo osx ;; + *mingw*) echo win ;; + *linux*) echo linux ;; + *) exit 1 ;; + esac +} + # Download the depends sources now as we won't have internet access in the build # container -make -C "${PWD}/depends" -j"$MAX_JOBS" download ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} +for host in $HOSTS; do + make -C "${PWD}/depends" -j"$MAX_JOBS" download-"$(host_to_commonname "$host")" ${V:+V=1} ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} +done # Determine the reference time used for determinism (overridable by environment) SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" @@ -148,8 +163,9 @@ SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git log --format=%at -1)}" time-machine() { # shellcheck disable=SC2086 guix time-machine --url=https://github.com/dongcarl/guix.git \ - --commit=b066c25026f21fb57677aa34692a5034338e7ee3 \ + --commit=6c9d16db962a6f7155571b36eced681fd2889e23 \ --max-jobs="$MAX_JOBS" \ + --keep-failed \ ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_TIMEMACHINE_FLAGS} \ -- "$@" @@ -180,6 +196,11 @@ and untracked files and directories will be wiped, allowing you to start anew. EOF } +# Create SOURCES_PATH and BASE_CACHE if they are non-empty so that we can map +# them into the container +[ -z "$SOURCES_PATH" ] || mkdir -p "$SOURCES_PATH" +[ -z "$BASE_CACHE" ] || mkdir -p "$BASE_CACHE" + # Deterministically build Bitcoin Core # shellcheck disable=SC2153 for host in $HOSTS; do @@ -254,6 +275,12 @@ EOF # make the downloaded depends sources available to it. The sources # should have been downloaded prior to this invocation. # + # --keep-failed keep build tree of failed builds + # + # When builds of the Guix environment itself (not Bitcoin Core) + # fail, it is useful for the build tree to be kept for debugging + # purposes. + # # ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} # # fetch substitute from SUBSTITUTE_URLS if they are @@ -274,7 +301,9 @@ EOF --share="$OUTDIR"=/outdir \ --expose="$(git rev-parse --git-common-dir)" \ ${SOURCES_PATH:+--share="$SOURCES_PATH"} \ + ${BASE_CACHE:+--share="$BASE_CACHE"} \ --max-jobs="$MAX_JOBS" \ + --keep-failed \ ${SUBSTITUTE_URLS:+--substitute-urls="$SUBSTITUTE_URLS"} \ ${ADDITIONAL_GUIX_COMMON_FLAGS} ${ADDITIONAL_GUIX_ENVIRONMENT_FLAGS} \ -- env HOST="$host" \ @@ -282,6 +311,7 @@ EOF SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:?unable to determine value}" \ ${V:+V=1} \ ${SOURCES_PATH:+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE:+BASE_CACHE="$BASE_CACHE"} \ DISTSRC="$(DISTSRC_BASE=/distsrc-base && distsrc_for_host "$HOST")" \ OUTDIR=/outdir \ bash -c "cd /bitcoin && bash contrib/guix/libexec/build.sh" diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 39d3cb9b50..72588c54a7 100644 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -44,6 +44,8 @@ store_path() { NATIVE_GCC="$(store_path gcc-toolchain)" export LIBRARY_PATH="${NATIVE_GCC}/lib:${NATIVE_GCC}/lib64" export CPATH="${NATIVE_GCC}/include" +unset C_INCLUDE_PATH +unset CPLUS_INCLUDE_PATH case "$HOST" in *darwin*) # When targeting darwin, some native tools built by depends require @@ -66,7 +68,8 @@ case "$HOST" in # Determine output paths to use in CROSS_* environment variables CROSS_GLIBC="$(store_path "mingw-w64-x86_64-winpthreads")" CROSS_GCC="$(store_path "gcc-cross-${HOST}")" - CROSS_GCC_LIBS=( "${CROSS_GCC}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) # The search path ordering is generally: @@ -75,7 +78,7 @@ case "$HOST" in # 2. kernel-header-related search paths (not applicable to mingw-w64 hosts) export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include" export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" - export CROSS_LIBRARY_PATH="${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib" ;; *darwin*) # The CROSS toolchain for darwin uses the SDK and ignores environment variables. @@ -86,12 +89,13 @@ case "$HOST" in CROSS_GLIBC_STATIC="$(store_path "glibc-cross-${HOST}" static)" CROSS_KERNEL="$(store_path "linux-libre-headers-cross-${HOST}")" CROSS_GCC="$(store_path "gcc-cross-${HOST}")" - CROSS_GCC_LIBS=( "${CROSS_GCC}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... + CROSS_GCC_LIB_STORE="$(store_path "gcc-cross-${HOST}" lib)" + CROSS_GCC_LIBS=( "${CROSS_GCC_LIB_STORE}/lib/gcc/${HOST}"/* ) # This expands to an array of directories... CROSS_GCC_LIB="${CROSS_GCC_LIBS[0]}" # ...we just want the first one (there should only be one) export CROSS_C_INCLUDE_PATH="${CROSS_GCC_LIB}/include:${CROSS_GCC_LIB}/include-fixed:${CROSS_GLIBC}/include:${CROSS_KERNEL}/include" export CROSS_CPLUS_INCLUDE_PATH="${CROSS_GCC}/include/c++:${CROSS_GCC}/include/c++/${HOST}:${CROSS_GCC}/include/c++/backward:${CROSS_C_INCLUDE_PATH}" - export CROSS_LIBRARY_PATH="${CROSS_GCC}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" + export CROSS_LIBRARY_PATH="${CROSS_GCC_LIB_STORE}/lib:${CROSS_GCC}/${HOST}/lib:${CROSS_GCC_LIB}:${CROSS_GLIBC}/lib:${CROSS_GLIBC_STATIC}/lib" ;; *) exit 1 ;; @@ -132,12 +136,14 @@ case "$HOST" in *linux*) glibc_dynamic_linker=$( case "$HOST" in - i686-linux-gnu) echo /lib/ld-linux.so.2 ;; - x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;; - arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;; - aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;; - riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;; - *) exit 1 ;; + i686-linux-gnu) echo /lib/ld-linux.so.2 ;; + x86_64-linux-gnu) echo /lib64/ld-linux-x86-64.so.2 ;; + arm-linux-gnueabihf) echo /lib/ld-linux-armhf.so.3 ;; + aarch64-linux-gnu) echo /lib/ld-linux-aarch64.so.1 ;; + riscv64-linux-gnu) echo /lib/ld-linux-riscv64-lp64d.so.1 ;; + powerpc64-linux-gnu) echo /lib/ld64.so.1;; + powerpc64le-linux-gnu) echo /lib/ld64.so.2;; + *) exit 1 ;; esac ) ;; @@ -167,6 +173,7 @@ esac make -C depends --jobs="$MAX_JOBS" HOST="$HOST" \ ${V:+V=1} \ ${SOURCES_PATH+SOURCES_PATH="$SOURCES_PATH"} \ + ${BASE_CACHE+BASE_CACHE="$BASE_CACHE"} \ i686_linux_CC=i686-linux-gnu-gcc \ i686_linux_CXX=i686-linux-gnu-g++ \ i686_linux_AR=i686-linux-gnu-ar \ @@ -204,7 +211,7 @@ fi ########################### # CONFIGFLAGS -CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests" +CONFIGFLAGS="--enable-reduce-exports --disable-bench --disable-gui-tests --disable-fuzz-binary" case "$HOST" in *linux*) CONFIGFLAGS+=" --enable-glibc-back-compat" ;; esac @@ -226,6 +233,10 @@ case "$HOST" in *mingw*) HOST_LDFLAGS="-Wl,--no-insert-timestamp" ;; esac +case "$HOST" in + powerpc64-linux-*) HOST_LDFLAGS="${HOST_LDFLAGS} -Wl,-z,noexecstack" ;; +esac + # Make $HOST-specific native binaries from depends available in $PATH export PATH="${BASEPREFIX}/${HOST}/native/bin:${PATH}" mkdir -p "$DISTSRC" diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 3b89659263..fb585b7f25 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -115,7 +115,8 @@ http://www.linuxfromscratch.org/hlfs/view/development/chapter05/gcc-pass1.html" `(("binutils" ,xbinutils) ("libc" ,xlibc) ("libc:static" ,xlibc "static") - ("gcc" ,xgcc))) + ("gcc" ,xgcc) + ("gcc-lib" ,xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) @@ -159,7 +160,8 @@ desirable for building Bitcoin Core release binaries." (propagated-inputs `(("binutils" ,xbinutils) ("libc" ,pthreads-xlibc) - ("gcc" ,pthreads-xgcc))) + ("gcc" ,pthreads-xgcc) + ("gcc-lib" ,pthreads-xgcc "lib"))) (synopsis (string-append "Complete GCC tool chain for " target)) (description (string-append "This package provides a complete GCC tool chain for " target " development.")) @@ -219,7 +221,7 @@ chain for " target " development.")) pkg-config ;; Scripting perl - python-3.7 + python-3 ;; Git git ;; Native gcc 7 toolchain @@ -231,10 +233,13 @@ chain for " target " development.")) (make-mingw-pthreads-cross-toolchain "x86_64-w64-mingw32") (make-nsis-with-sde-support nsis-x86_64))) ((string-contains target "riscv64-linux-") - (list (make-bitcoin-cross-toolchain "riscv64-linux-gnu" + (list (make-bitcoin-cross-toolchain target + #:base-gcc-for-libc gcc-7))) + ((string-contains target "powerpc64le-linux-") + (list (make-bitcoin-cross-toolchain target #:base-gcc-for-libc gcc-7))) ((string-contains target "-linux-") (list (make-bitcoin-cross-toolchain target))) ((string-contains target "darwin") - (list clang-8 libcap binutils imagemagick libtiff librsvg font-tuffy cmake-3.15.5 xorriso)) + (list clang-8 libcap binutils imagemagick libtiff librsvg font-tuffy cmake xorriso)) (else '()))))) diff --git a/contrib/message-capture/message-capture-docs.md b/contrib/message-capture/message-capture-docs.md new file mode 100644 index 0000000000..7301968461 --- /dev/null +++ b/contrib/message-capture/message-capture-docs.md @@ -0,0 +1,25 @@ +# Per-Peer Message Capture + +## Purpose + +This feature allows for message capture on a per-peer basis. It answers the simple question: "Can I see what messages my node is sending and receiving?" + +## Usage and Functionality + +* Run `bitcoind` with the `-capturemessages` option. +* Look in the `message_capture` folder in your datadir. + * Typically this will be `~/.bitcoin/message_capture`. + * See that there are many folders inside, one for each peer names with its IP address and port. + * Inside each peer's folder there are two `.dat` files: one is for received messages (`msgs_recv.dat`) and the other is for sent messages (`msgs_sent.dat`). +* Run `contrib/message-capture/message-capture-parser.py` with the proper arguments. + * See the `-h` option for help. + * To see all messages, both sent and received, for all peers use: + ``` + ./contrib/message-capture/message-capture-parser.py -o out.json \ + ~/.bitcoin/message_capture/**/*.dat + ``` + * Note: The messages in the given `.dat` files will be interleaved in chronological order. So, giving both received and sent `.dat` files (as above with `*.dat`) will result in all messages being interleaved in chronological order. + * If an output file is not provided (i.e. the `-o` option is not used), then the output prints to `stdout`. +* View the resulting output. + * The output file is `JSON` formatted. + * Suggestion: use `jq` to view the output, with `jq . out.json` diff --git a/contrib/message-capture/message-capture-parser.py b/contrib/message-capture/message-capture-parser.py new file mode 100755 index 0000000000..9988478f1b --- /dev/null +++ b/contrib/message-capture/message-capture-parser.py @@ -0,0 +1,214 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Parse message capture binary files. To be used in conjunction with -capturemessages.""" + +import argparse +import os +import shutil +import sys +from io import BytesIO +import json +from pathlib import Path +from typing import Any, List, Optional + +sys.path.append(os.path.join(os.path.dirname(__file__), '../../test/functional')) + +from test_framework.messages import ser_uint256 # noqa: E402 +from test_framework.p2p import MESSAGEMAP # noqa: E402 + +TIME_SIZE = 8 +LENGTH_SIZE = 4 +MSGTYPE_SIZE = 12 + +# The test framework classes stores hashes as large ints in many cases. +# These are variables of type uint256 in core. +# There isn't a way to distinguish between a large int and a large int that is actually a blob of bytes. +# As such, they are itemized here. +# Any variables with these names that are of type int are actually uint256 variables. +# (These can be easily found by looking for calls to deser_uint256, deser_uint256_vector, and uint256_from_str in messages.py) +HASH_INTS = [ + "blockhash", + "block_hash", + "hash", + "hashMerkleRoot", + "hashPrevBlock", + "hashstop", + "prev_header", + "sha256", + "stop_hash", +] + +HASH_INT_VECTORS = [ + "hashes", + "headers", + "vHave", + "vHash", +] + + +class ProgressBar: + def __init__(self, total: float): + self.total = total + self.running = 0 + + def set_progress(self, progress: float): + cols = shutil.get_terminal_size()[0] + if cols <= 12: + return + max_blocks = cols - 9 + num_blocks = int(max_blocks * progress) + print('\r[ {}{} ] {:3.0f}%' + .format('#' * num_blocks, + ' ' * (max_blocks - num_blocks), + progress * 100), + end ='') + + def update(self, more: float): + self.running += more + self.set_progress(self.running / self.total) + + +def to_jsonable(obj: Any) -> Any: + if hasattr(obj, "__dict__"): + return obj.__dict__ + elif hasattr(obj, "__slots__"): + ret = {} # type: Any + for slot in obj.__slots__: + val = getattr(obj, slot, None) + if slot in HASH_INTS and isinstance(val, int): + ret[slot] = ser_uint256(val).hex() + elif slot in HASH_INT_VECTORS and isinstance(val[0], int): + ret[slot] = [ser_uint256(a).hex() for a in val] + else: + ret[slot] = to_jsonable(val) + return ret + elif isinstance(obj, list): + return [to_jsonable(a) for a in obj] + elif isinstance(obj, bytes): + return obj.hex() + else: + return obj + + +def process_file(path: str, messages: List[Any], recv: bool, progress_bar: Optional[ProgressBar]) -> None: + with open(path, 'rb') as f_in: + if progress_bar: + bytes_read = 0 + + while True: + if progress_bar: + # Update progress bar + diff = f_in.tell() - bytes_read - 1 + progress_bar.update(diff) + bytes_read = f_in.tell() - 1 + + # Read the Header + tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) + if not tmp_header_raw: + break + tmp_header = BytesIO(tmp_header_raw) + time = int.from_bytes(tmp_header.read(TIME_SIZE), "little") # type: int + msgtype = tmp_header.read(MSGTYPE_SIZE).split(b'\x00', 1)[0] # type: bytes + length = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") # type: int + + # Start converting the message to a dictionary + msg_dict = {} + msg_dict["direction"] = "recv" if recv else "sent" + msg_dict["time"] = time + msg_dict["size"] = length # "size" is less readable here, but more readable in the output + + msg_ser = BytesIO(f_in.read(length)) + + # Determine message type + if msgtype not in MESSAGEMAP: + # Unrecognized message type + try: + msgtype_tmp = msgtype.decode() + if not msgtype_tmp.isprintable(): + raise UnicodeDecodeError + msg_dict["msgtype"] = msgtype_tmp + except UnicodeDecodeError: + msg_dict["msgtype"] = "UNREADABLE" + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unrecognized message type." + messages.append(msg_dict) + print(f"WARNING - Unrecognized message type {msgtype} in {path}", file=sys.stderr) + continue + + # Deserialize the message + msg = MESSAGEMAP[msgtype]() + msg_dict["msgtype"] = msgtype.decode() + + try: + msg.deserialize(msg_ser) + except KeyboardInterrupt: + raise + except Exception: + # Unable to deserialize message body + msg_ser.seek(0, os.SEEK_SET) + msg_dict["body"] = msg_ser.read().hex() + msg_dict["error"] = "Unable to deserialize message." + messages.append(msg_dict) + print(f"WARNING - Unable to deserialize message in {path}", file=sys.stderr) + continue + + # Convert body of message into a jsonable object + if length: + msg_dict["body"] = to_jsonable(msg) + messages.append(msg_dict) + + if progress_bar: + # Update the progress bar to the end of the current file + # in case we exited the loop early + f_in.seek(0, os.SEEK_END) # Go to end of file + diff = f_in.tell() - bytes_read - 1 + progress_bar.update(diff) + + +def main(): + parser = argparse.ArgumentParser( + description=__doc__, + epilog="EXAMPLE \n\t{0} -o out.json <data-dir>/message_capture/**/*.dat".format(sys.argv[0]), + formatter_class=argparse.RawTextHelpFormatter) + parser.add_argument( + "capturepaths", + nargs='+', + help="binary message capture files to parse.") + parser.add_argument( + "-o", "--output", + help="output file. If unset print to stdout") + parser.add_argument( + "-n", "--no-progress-bar", + action='store_true', + help="disable the progress bar. Automatically set if the output is not a terminal") + args = parser.parse_args() + capturepaths = [Path.cwd() / Path(capturepath) for capturepath in args.capturepaths] + output = Path.cwd() / Path(args.output) if args.output else False + use_progress_bar = (not args.no_progress_bar) and sys.stdout.isatty() + + messages = [] # type: List[Any] + if use_progress_bar: + total_size = sum(capture.stat().st_size for capture in capturepaths) + progress_bar = ProgressBar(total_size) + else: + progress_bar = None + + for capture in capturepaths: + process_file(str(capture), messages, "recv" in capture.stem, progress_bar) + + messages.sort(key=lambda msg: msg['time']) + + if use_progress_bar: + progress_bar.set_progress(1) + + jsonrep = json.dumps(messages) + if output: + with open(str(output), 'w+', encoding="utf8") as f_out: + f_out.write(jsonrep) + else: + print(jsonrep) + +if __name__ == "__main__": + main() diff --git a/depends/Makefile b/depends/Makefile index 596a46d4a2..4cd4d72fc2 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -2,7 +2,7 @@ # Pattern rule to print variables, e.g. make print-top_srcdir print-%: - @echo $* = $($*) + @echo '$*' = '$($*)' # When invoking a sub-make, keep only the command line variable definitions # matching the pattern in the filter function. @@ -112,19 +112,27 @@ include builders/$(build_os).mk include builders/default.mk include packages/packages.mk +full_env=$(shell printenv) + build_id_string:=$(BUILD_ID_SALT) -build_id_string+=$(shell $(build_CC) --version 2>/dev/null) -build_id_string+=$(shell $(build_AR) --version 2>/dev/null) -build_id_string+=$(shell $(build_CXX) --version 2>/dev/null) -build_id_string+=$(shell $(build_RANLIB) --version 2>/dev/null) -build_id_string+=$(shell $(build_STRIP) --version 2>/dev/null) + +# GCC only prints COLLECT_LTO_WRAPPER when invoked with just "-v", but we want +# the information from "-v -E -" as well, so just include both. +# +# '3>&1 1>&2 2>&3 > /dev/null' is supposed to swap stdin and stdout and silence +# stdin, since we only want the stderr output +build_id_string+=$(shell $(build_CC) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(build_CC) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null) +build_id_string+=$(shell $(build_AR) --version 2>/dev/null) $(filter AR_%,$(full_env)) ZERO_AR_DATE=$(ZERO_AR_DATE) +build_id_string+=$(shell $(build_CXX) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(build_CXX) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null) +build_id_string+=$(shell $(build_RANLIB) --version 2>/dev/null) $(filter RANLIB_%,$(full_env)) +build_id_string+=$(shell $(build_STRIP) --version 2>/dev/null) $(filter STRIP_%,$(full_env)) $(host_arch)_$(host_os)_id_string:=$(HOST_ID_SALT) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_CC) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_AR) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) -$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_CC) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(host_CC) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_AR) --version 2>/dev/null) $(filter AR_%,$(full_env)) ZERO_AR_DATE=$(ZERO_AR_DATE) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_CXX) -v < /dev/null 3>&1 1>&2 2>&3 > /dev/null) $(shell $(host_CXX) -v -E - < /dev/null 3>&1 1>&2 2>&3 > /dev/null) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_RANLIB) --version 2>/dev/null) $(filter RANLIB_%,$(full_env)) +$(host_arch)_$(host_os)_id_string+=$(shell $(host_STRIP) --version 2>/dev/null) $(filter STRIP_%,$(full_env)) ifneq ($(strip $(FORCE_USE_SYSTEM_CLANG)),) # Make sure that cache is invalidated when switching between system and @@ -133,6 +141,9 @@ build_id_string+=system_clang $(host_arch)_$(host_os)_id_string+=system_clang endif +build_id_string+=GUIX_ENVIRONMENT=$(GUIX_ENVIRONMENT) +$(host_arch)_$(host_os)_id_string+=GUIX_ENVIRONMENT=$(GUIX_ENVIRONMENT) + qrencode_packages_$(NO_QR) = $(qrencode_packages) qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages) $(qrencode_packages_) @@ -265,7 +276,7 @@ install: check-packages $(host_prefix)/share/config.site download-one: check-sources $(all_sources) download-osx: - @$(MAKE) -s HOST=x86_64-apple-darwin14 download-one + @$(MAKE) -s HOST=x86_64-apple-darwin download-one download-linux: @$(MAKE) -s HOST=x86_64-unknown-linux-gnu download-one download-win: diff --git a/depends/packages/boost.mk b/depends/packages/boost.mk index ff8a252db9..29a3efdfe6 100644 --- a/depends/packages/boost.mk +++ b/depends/packages/boost.mk @@ -22,7 +22,7 @@ $(package)_toolset_$(host_os)=clang else $(package)_toolset_$(host_os)=gcc endif -$(package)_config_libraries=filesystem,system,thread,test +$(package)_config_libraries=filesystem,system,test $(package)_cxxflags=-std=c++17 -fvisibility=hidden $(package)_cxxflags_linux=-fPIC $(package)_cxxflags_android=-fPIC diff --git a/doc/REST-interface.md b/doc/REST-interface.md index 6237734390..ea06952af4 100644 --- a/doc/REST-interface.md +++ b/doc/REST-interface.md @@ -111,12 +111,7 @@ $ curl localhost:18332/rest/getutxos/checkmempool/b2cdfd7b89def827ff8af7cd9bff76 Returns various information about the TX mempool. Only supports JSON as output format. -* loaded : (boolean) if the mempool is fully loaded -* size : (numeric) the number of transactions in the TX mempool -* bytes : (numeric) size of the TX mempool in bytes -* usage : (numeric) total TX mempool memory usage -* maxmempool : (numeric) maximum memory usage for the mempool in bytes -* mempoolminfee : (numeric) minimum feerate (BTC per KB) for tx to be accepted +Refer to the `getmempoolinfo` RPC for documentation of the fields. `GET /rest/mempool/contents.json` diff --git a/doc/bips.md b/doc/bips.md index 8c20533c9b..a5e9a6c020 100644 --- a/doc/bips.md +++ b/doc/bips.md @@ -15,6 +15,9 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**): * [`BIP 35`](https://github.com/bitcoin/bips/blob/master/bip-0035.mediawiki): The 'mempool' protocol message (and the protocol version bump to 60002) has been implemented since **v0.7.0** ([PR #1641](https://github.com/bitcoin/bitcoin/pull/1641)). As of **v0.13.0**, this is only available for `NODE_BLOOM` (BIP 111) peers. * [`BIP 37`](https://github.com/bitcoin/bips/blob/master/bip-0037.mediawiki): The bloom filtering for transaction relaying, partial Merkle trees for blocks, and the protocol version bump to 70001 (enabling low-bandwidth SPV clients) has been implemented since **v0.8.0** ([PR #1795](https://github.com/bitcoin/bitcoin/pull/1795)). Disabled by default since **v0.19.0**, can be enabled by the `-peerbloomfilters` option. * [`BIP 42`](https://github.com/bitcoin/bips/blob/master/bip-0042.mediawiki): The bug that would have caused the subsidy schedule to resume after block 13440000 was fixed in **v0.9.2** ([PR #3842](https://github.com/bitcoin/bitcoin/pull/3842)). +* [`BIP 43`](https://github.com/bitcoin/bips/blob/master/bip-0043.mediawiki): The experimental descriptor wallets introduced in **v0.21.0** by default use the Hierarchical Deterministic Wallet derivation proposed by BIP 43. ([PR #16528](https://github.com/bitcoin/bitcoin/pull/16528)) +* [`BIP 44`](https://github.com/bitcoin/bips/blob/master/bip-0044.mediawiki): The experimental descriptor wallets introduced in **v0.21.0** by default use the Hierarchical Deterministic Wallet derivation proposed by BIP 44. ([PR #16528](https://github.com/bitcoin/bitcoin/pull/16528)) +* [`BIP 49`](https://github.com/bitcoin/bips/blob/master/bip-0049.mediawiki): The experimental descriptor wallets introduced in **v0.21.0** by default use the Hierarchical Deterministic Wallet derivation proposed by BIP 49. ([PR #16528](https://github.com/bitcoin/bitcoin/pull/16528)) * [`BIP 61`](https://github.com/bitcoin/bips/blob/master/bip-0061.mediawiki): The 'reject' protocol message (and the protocol version bump to 70002) was added in **v0.9.0** ([PR #3185](https://github.com/bitcoin/bitcoin/pull/3185)). Starting **v0.17.0**, whether to send reject messages can be configured with the `-enablebip61` option, and support is deprecated (disabled by default) as of **v0.18.0**. Support was removed in **v0.20.0** ([PR #15437](https://github.com/bitcoin/bitcoin/pull/15437)). * [`BIP 65`](https://github.com/bitcoin/bips/blob/master/bip-0065.mediawiki): The CHECKLOCKTIMEVERIFY softfork was merged in **v0.12.0** ([PR #6351](https://github.com/bitcoin/bitcoin/pull/6351)), and backported to **v0.11.2** and **v0.10.4**. Mempool-only CLTV was added in [PR #6124](https://github.com/bitcoin/bitcoin/pull/6124). * [`BIP 66`](https://github.com/bitcoin/bips/blob/master/bip-0066.mediawiki): The strict DER rules and associated version 3 blocks have been implemented since **v0.10.0** ([PR #5713](https://github.com/bitcoin/bitcoin/pull/5713)). @@ -24,6 +27,7 @@ BIPs that are implemented by Bitcoin Core (up-to-date up to **v0.21.0**): Support can be optionally disabled at build time since **v0.18.0** ([PR 14451](https://github.com/bitcoin/bitcoin/pull/14451)), and it is disabled by default at build time since **v0.19.0** ([PR #15584](https://github.com/bitcoin/bitcoin/pull/15584)). It has been removed as of **v0.20.0** ([PR 17165](https://github.com/bitcoin/bitcoin/pull/17165)). +* [`BIP 84`](https://github.com/bitcoin/bips/blob/master/bip-0084.mediawiki): The experimental descriptor wallets introduced in **v0.21.0** by default use the Hierarchical Deterministic Wallet derivation proposed by BIP 84. ([PR #16528](https://github.com/bitcoin/bitcoin/pull/16528)) * [`BIP 90`](https://github.com/bitcoin/bips/blob/master/bip-0090.mediawiki): Trigger mechanism for activation of BIPs 34, 65, and 66 has been simplified to block height checks since **v0.14.0** ([PR #8391](https://github.com/bitcoin/bitcoin/pull/8391)). * [`BIP 111`](https://github.com/bitcoin/bips/blob/master/bip-0111.mediawiki): `NODE_BLOOM` service bit added, and enforced for all peer versions as of **v0.13.0** ([PR #6579](https://github.com/bitcoin/bitcoin/pull/6579) and [PR #6641](https://github.com/bitcoin/bitcoin/pull/6641)). * [`BIP 112`](https://github.com/bitcoin/bips/blob/master/bip-0112.mediawiki): The CHECKSEQUENCEVERIFY opcode has been implemented since **v0.12.1** ([PR #7524](https://github.com/bitcoin/bitcoin/pull/7524)), and has been *buried* since **v0.19.0** ([PR #16060](https://github.com/bitcoin/bitcoin/pull/16060)). diff --git a/doc/build-unix.md b/doc/build-unix.md index 5c24886dbf..07fb9c453e 100644 --- a/doc/build-unix.md +++ b/doc/build-unix.md @@ -9,7 +9,7 @@ Note Always use absolute paths to configure and compile Bitcoin Core and the dependencies. For example, when specifying the path of the dependency: - ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=$BDB_PREFIX + ../dist/configure --enable-cxx --disable-shared --with-pic --prefix=$BDB_PREFIX Here BDB_PREFIX must be an absolute path - it is defined using $(pwd) which ensures the usage of the absolute path. @@ -82,7 +82,7 @@ Build requirements: Now, you can either build from self-compiled [depends](/depends/README.md) or install the required dependencies: - sudo apt-get install libevent-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libboost-thread-dev + sudo apt-get install libevent-dev libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev BerkeleyDB is required for the wallet. @@ -166,9 +166,9 @@ miniupnpc https://miniupnp.tuxfamily.org/files/). UPnP support is compiled in and turned off by default. See the configure options for UPnP behavior desired: - --without-miniupnpc No UPnP support, miniupnp not required - --disable-upnp-default (the default) UPnP support turned off by default at runtime - --enable-upnp-default UPnP support turned on by default at runtime + --without-miniupnpc No UPnP support, miniupnp not required + --disable-upnp-default (the default) UPnP support turned off by default at runtime + --enable-upnp-default UPnP support turned on by default at runtime libnatpmp --------- @@ -177,9 +177,9 @@ libnatpmp from [here](https://miniupnp.tuxfamily.org/files/). NAT-PMP support is compiled in and turned off by default. See the configure options for NAT-PMP behavior desired: - --without-natpmp No NAT-PMP support, libnatpmp not required - --disable-natpmp-default (the default) NAT-PMP support turned off by default at runtime - --enable-natpmp-default NAT-PMP support turned on by default at runtime + --without-natpmp No NAT-PMP support, libnatpmp not required + --disable-natpmp-default (the default) NAT-PMP support turned off by default at runtime + --enable-natpmp-default NAT-PMP support turned on by default at runtime Berkeley DB ----------- @@ -199,9 +199,9 @@ Boost ----- If you need to build Boost yourself: - sudo su - ./bootstrap.sh - ./bjam install + sudo su + ./bootstrap.sh + ./bjam install Security @@ -212,8 +212,8 @@ This can be disabled with: Hardening Flags: - ./configure --enable-hardening - ./configure --disable-hardening + ./configure --enable-hardening + ./configure --disable-hardening Hardening enables the following features: @@ -228,7 +228,7 @@ Hardening enables the following features: To test that you have built PIE executable, install scanelf, part of paxutils, and use: - scanelf -e ./bitcoin + scanelf -e ./bitcoin The output should contain: @@ -245,8 +245,8 @@ Hardening enables the following features: `scanelf -e ./bitcoin` The output should contain: - STK/REL/PTL - RW- R-- RW- + STK/REL/PTL + RW- R-- RW- The STK RW- means that the stack is readable and writeable but not executable. diff --git a/doc/descriptors.md b/doc/descriptors.md index 63acb9167f..c4fc2a66bf 100644 --- a/doc/descriptors.md +++ b/doc/descriptors.md @@ -191,7 +191,7 @@ steps, or for dumping wallet descriptors including private key material. In order to easily represent the sets of scripts currently supported by existing Bitcoin Core wallets, a convenience function `combo` is provided, which takes as input a public key, and describes a set of P2PK, -P2PKH, P2WPKH, and P2SH-P2WPH scripts for that key. In case the key is +P2PKH, P2WPKH, and P2SH-P2WPKH scripts for that key. In case the key is uncompressed, the set only includes P2PK and P2PKH scripts. ### Checksums diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 596f65cf10..8f2d7af089 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -75,6 +75,11 @@ tool to clean up patches automatically before submission. on the same line as the `if`, without braces. In every other case, braces are required, and the `then` and `else` clauses must appear correctly indented on a new line. + - There's no hard limit on line width, but prefer to keep lines to <100 + characters if doing so does not decrease readability. Break up long + function declarations over multiple lines using the Clang Format + [AlignAfterOpenBracket](https://clang.llvm.org/docs/ClangFormatStyleOptions.html) + style option. - **Symbol naming conventions**. These are preferred in new code, but are not required when doing so would need changes to significant pieces of existing @@ -780,6 +785,11 @@ Threads and synchronization get compile-time warnings about potential race conditions in code. Combine annotations in function declarations with run-time asserts in function definitions: + - In functions that are declared separately from where they are defined, the + thread safety annotations should be added exclusively to the function + declaration. Annotations on the definition could lead to false positives + (lack of compile failure) at call sites between the two. + ```C++ // txmempool.h class CTxMemPool diff --git a/doc/fuzzing.md b/doc/fuzzing.md index 80ce821091..87df2bbbb9 100644 --- a/doc/fuzzing.md +++ b/doc/fuzzing.md @@ -157,3 +157,77 @@ $ FUZZ=process_message honggfuzz/honggfuzz -i inputs/ -- src/test/fuzz/fuzz ``` Read the [Honggfuzz documentation](https://github.com/google/honggfuzz/blob/master/docs/USAGE.md) for more information. + +## Fuzzing the Bitcoin Core P2P layer using Honggfuzz NetDriver + +Honggfuzz NetDriver allows for very easy fuzzing of TCP servers such as Bitcoin +Core without having to write any custom fuzzing harness. The `bitcoind` server +process is largely fuzzed without modification. + +This makes the fuzzing highly realistic: a bug reachable by the fuzzer is likely +also remotely triggerable by an untrusted peer. + +To quickly get started fuzzing the P2P layer using Honggfuzz NetDriver: + +```sh +$ mkdir bitcoin-honggfuzz-p2p/ +$ cd bitcoin-honggfuzz-p2p/ +$ git clone https://github.com/bitcoin/bitcoin +$ cd bitcoin/ +$ ./autogen.sh +$ git clone https://github.com/google/honggfuzz +$ cd honggfuzz/ +$ make +$ cd .. +$ CC=$(pwd)/honggfuzz/hfuzz_cc/hfuzz-clang \ + CXX=$(pwd)/honggfuzz/hfuzz_cc/hfuzz-clang++ \ + ./configure --disable-wallet --with-gui=no \ + --with-sanitizers=address,undefined +$ git apply << "EOF" +diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp +index 455a82e39..2faa3f80f 100644 +--- a/src/bitcoind.cpp ++++ b/src/bitcoind.cpp +@@ -158,7 +158,11 @@ static bool AppInit(int argc, char* argv[]) + return fRet; + } + ++#ifdef HFND_FUZZING_ENTRY_FUNCTION_CXX ++HFND_FUZZING_ENTRY_FUNCTION_CXX(int argc, char* argv[]) ++#else + int main(int argc, char* argv[]) ++#endif + { + #ifdef WIN32 + util::WinCmdLineArgs winArgs; +diff --git a/src/net.cpp b/src/net.cpp +index cf987b699..636a4176a 100644 +--- a/src/net.cpp ++++ b/src/net.cpp +@@ -709,7 +709,7 @@ int V1TransportDeserializer::readHeader(const char *pch, unsigned int nBytes) + } + + // Check start string, network magic +- if (memcmp(hdr.pchMessageStart, m_chain_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) { ++ if (false && memcmp(hdr.pchMessageStart, m_chain_params.MessageStart(), CMessageHeader::MESSAGE_START_SIZE) != 0) { // skip network magic checking + LogPrint(BCLog::NET, "HEADER ERROR - MESSAGESTART (%s, %u bytes), received %s, peer=%d\n", hdr.GetCommand(), hdr.nMessageSize, HexStr(hdr.pchMessageStart), m_node_id); + return -1; + } +@@ -768,7 +768,7 @@ Optional<CNetMessage> V1TransportDeserializer::GetMessage(const std::chrono::mic + RandAddEvent(ReadLE32(hash.begin())); + + // Check checksum and header command string +- if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { ++ if (false && memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { // skip checksum checking + LogPrint(BCLog::NET, "CHECKSUM ERROR (%s, %u bytes), expected %s was %s, peer=%d\n", + SanitizeString(msg->m_command), msg->m_message_size, + HexStr(Span<uint8_t>(hash.begin(), hash.begin() + CMessageHeader::CHECKSUM_SIZE)), +EOF +$ make -C src/ bitcoind +$ mkdir -p inputs/ +$ honggfuzz/honggfuzz --exit_upon_crash --quiet --timeout 4 -n 1 -Q \ + -E HFND_TCP_PORT=18444 -f inputs/ -- \ + src/bitcoind -regtest -discover=0 -dns=0 -dnsseed=0 -listenonion=0 \ + -nodebuglogfile -bind=127.0.0.1:18444 -logthreadnames \ + -debug +``` diff --git a/doc/init.md b/doc/init.md index 99aa0a0def..399b819bf4 100644 --- a/doc/init.md +++ b/doc/init.md @@ -53,11 +53,12 @@ Paths All three configurations assume several paths that might need to be adjusted. -Binary: `/usr/bin/bitcoind` -Configuration file: `/etc/bitcoin/bitcoin.conf` -Data directory: `/var/lib/bitcoind` -PID file: `/var/run/bitcoind/bitcoind.pid` (OpenRC and Upstart) or `/run/bitcoind/bitcoind.pid` (systemd) -Lock file: `/var/lock/subsys/bitcoind` (CentOS) + Binary: /usr/bin/bitcoind + Configuration file: /etc/bitcoin/bitcoin.conf + Data directory: /var/lib/bitcoind + PID file: /var/run/bitcoind/bitcoind.pid (OpenRC and Upstart) or + /run/bitcoind/bitcoind.pid (systemd) + Lock file: /var/lock/subsys/bitcoind (CentOS) The PID directory (if applicable) and data directory should both be owned by the bitcoin user and group. It is advised for security reasons to make the @@ -83,10 +84,10 @@ OpenRC). ### macOS -Binary: `/usr/local/bin/bitcoind` -Configuration file: `~/Library/Application Support/Bitcoin/bitcoin.conf` -Data directory: `~/Library/Application Support/Bitcoin` -Lock file: `~/Library/Application Support/Bitcoin/.lock` + Binary: /usr/local/bin/bitcoind + Configuration file: ~/Library/Application Support/Bitcoin/bitcoin.conf + Data directory: ~/Library/Application Support/Bitcoin + Lock file: ~/Library/Application Support/Bitcoin/.lock Installing Service Configuration ----------------------------------- diff --git a/doc/release-notes.md b/doc/release-notes.md index 8f1e03e16b..0f248494c7 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -93,6 +93,10 @@ Tools and Utilities Wallet ------ +- A new `listdescriptors` RPC is available to inspect the contents of descriptor-enabled wallets. + The RPC returns public versions of all imported descriptors, including their timestamp and flags. + For ranged descriptors, it also returns the range boundaries and the next index to generate addresses from. (#20226) + GUI changes ----------- diff --git a/doc/tor.md b/doc/tor.md index 8a2aef2d07..e38ada5bd6 100644 --- a/doc/tor.md +++ b/doc/tor.md @@ -21,39 +21,39 @@ information in the debug log about your Tor configuration. The first step is running Bitcoin Core behind a Tor proxy. This will already anonymize all outgoing connections, but more is possible. - -proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy - server will be used to try to reach .onion addresses as well. - You need to use -noonion or -onion=0 to explicitly disable - outbound access to onion services. - - -onion=ip:port Set the proxy server to use for Tor onion services. You do not - need to set this if it's the same as -proxy. You can use -onion=0 - to explicitly disable access to onion services. - Note: Only the -proxy option sets the proxy for DNS requests; - with -onion they will not route over Tor, so use -proxy if you - have privacy concerns. - - -listen When using -proxy, listening is disabled by default. If you want - to manually configure an onion service (see section 3), you'll - need to enable it explicitly. - - -connect=X When behind a Tor proxy, you can specify .onion addresses instead - -addnode=X of IP addresses or hostnames in these parameters. It requires - -seednode=X SOCKS5. In Tor mode, such addresses can also be exchanged with - other P2P nodes. - - -onlynet=onion Make outgoing connections only to .onion addresses. Incoming - connections are not affected by this option. This option can be - specified multiple times to allow multiple network types, e.g. - ipv4, ipv6 or onion. If you use this option with values other - than onion you *cannot* disable onion connections; outgoing onion - connections will be enabled when you use -proxy or -onion. Use - -noonion or -onion=0 if you want to be sure there are no outbound - onion connections over the default proxy or your defined -proxy. + -proxy=ip:port Set the proxy server. If SOCKS5 is selected (default), this proxy + server will be used to try to reach .onion addresses as well. + You need to use -noonion or -onion=0 to explicitly disable + outbound access to onion services. + + -onion=ip:port Set the proxy server to use for Tor onion services. You do not + need to set this if it's the same as -proxy. You can use -onion=0 + to explicitly disable access to onion services. + Note: Only the -proxy option sets the proxy for DNS requests; + with -onion they will not route over Tor, so use -proxy if you + have privacy concerns. + + -listen When using -proxy, listening is disabled by default. If you want + to manually configure an onion service (see section 3), you'll + need to enable it explicitly. + + -connect=X When behind a Tor proxy, you can specify .onion addresses instead + -addnode=X of IP addresses or hostnames in these parameters. It requires + -seednode=X SOCKS5. In Tor mode, such addresses can also be exchanged with + other P2P nodes. + + -onlynet=onion Make outgoing connections only to .onion addresses. Incoming + connections are not affected by this option. This option can be + specified multiple times to allow multiple network types, e.g. + ipv4, ipv6 or onion. If you use this option with values other + than onion you *cannot* disable onion connections; outgoing onion + connections will be enabled when you use -proxy or -onion. Use + -noonion or -onion=0 if you want to be sure there are no outbound + onion connections over the default proxy or your defined -proxy. In a typical situation, this suffices to run behind a Tor proxy: - ./bitcoind -proxy=127.0.0.1:9050 + ./bitcoind -proxy=127.0.0.1:9050 ## 2. Automatically create a Bitcoin Core onion service @@ -152,57 +152,57 @@ details). You can also manually configure your node to be reachable from the Tor network. Add these lines to your `/etc/tor/torrc` (or equivalent config file): - HiddenServiceDir /var/lib/tor/bitcoin-service/ - HiddenServicePort 8333 127.0.0.1:8334 + HiddenServiceDir /var/lib/tor/bitcoin-service/ + HiddenServicePort 8333 127.0.0.1:8334 The directory can be different of course, but virtual port numbers should be equal to your bitcoind's P2P listen port (8333 by default), and target addresses and ports should be equal to binding address and port for inbound Tor connections (127.0.0.1:8334 by default). - -externalip=X You can tell bitcoin about its publicly reachable addresses using - this option, and this can be an onion address. Given the above - configuration, you can find your onion address in - /var/lib/tor/bitcoin-service/hostname. For connections - coming from unroutable addresses (such as 127.0.0.1, where the - Tor proxy typically runs), onion addresses are given - preference for your node to advertise itself with. - - You can set multiple local addresses with -externalip. The - one that will be rumoured to a particular peer is the most - compatible one and also using heuristics, e.g. the address - with the most incoming connections, etc. - - -listen You'll need to enable listening for incoming connections, as this - is off by default behind a proxy. - - -discover When -externalip is specified, no attempt is made to discover local - IPv4 or IPv6 addresses. If you want to run a dual stack, reachable - from both Tor and IPv4 (or IPv6), you'll need to either pass your - other addresses using -externalip, or explicitly enable -discover. - Note that both addresses of a dual-stack system may be easily - linkable using traffic analysis. + -externalip=X You can tell bitcoin about its publicly reachable addresses using + this option, and this can be an onion address. Given the above + configuration, you can find your onion address in + /var/lib/tor/bitcoin-service/hostname. For connections + coming from unroutable addresses (such as 127.0.0.1, where the + Tor proxy typically runs), onion addresses are given + preference for your node to advertise itself with. + + You can set multiple local addresses with -externalip. The + one that will be rumoured to a particular peer is the most + compatible one and also using heuristics, e.g. the address + with the most incoming connections, etc. + + -listen You'll need to enable listening for incoming connections, as this + is off by default behind a proxy. + + -discover When -externalip is specified, no attempt is made to discover local + IPv4 or IPv6 addresses. If you want to run a dual stack, reachable + from both Tor and IPv4 (or IPv6), you'll need to either pass your + other addresses using -externalip, or explicitly enable -discover. + Note that both addresses of a dual-stack system may be easily + linkable using traffic analysis. In a typical situation, where you're only reachable via Tor, this should suffice: - ./bitcoind -proxy=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -listen + ./bitcoind -proxy=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -listen (obviously, replace the .onion address with your own). It should be noted that you still listen on all devices and another node could establish a clearnet connection, when knowing your address. To mitigate this, additionally bind the address of your Tor proxy: - ./bitcoind ... -bind=127.0.0.1 + ./bitcoind ... -bind=127.0.0.1 If you don't care too much about hiding your node, and want to be reachable on IPv4 as well, use `discover` instead: - ./bitcoind ... -discover + ./bitcoind ... -discover and open port 8333 on your firewall (or use port mapping, i.e., `-upnp` or `-natpmp`). If you only want to use Tor to reach .onion addresses, but not use it as a proxy for normal IPv4/IPv6 communication, use: - ./bitcoind -onion=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -discover + ./bitcoind -onion=127.0.0.1:9050 -externalip=7zvj7a2imdgkdbg4f2dryd5rgtrn7upivr5eeij4cicjh65pooxeshid.onion -discover ## 4. Privacy recommendations diff --git a/src/.clang-format b/src/.clang-format index ef7a0ef5c7..a69c57f3e0 100644 --- a/src/.clang-format +++ b/src/.clang-format @@ -11,7 +11,8 @@ AllowShortIfStatementsOnASingleLine: true AllowShortLoopsOnASingleLine: false AlwaysBreakBeforeMultilineStrings: false AlwaysBreakTemplateDeclarations: true -BinPackParameters: false +BinPackArguments: true +BinPackParameters: true BreakBeforeBinaryOperators: false BreakBeforeBraces: Custom BraceWrapping: diff --git a/src/Makefile.am b/src/Makefile.am index 2616eb8638..67efbbeae4 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -4,7 +4,7 @@ # Pattern rule to print variables, e.g. make print-top_srcdir print-%: - @echo $* = $($*) + @echo '$*' = '$($*)' DIST_SUBDIRS = secp256k1 univalue @@ -233,6 +233,7 @@ BITCOIN_CORE_H = \ util/check.h \ util/error.h \ util/fees.h \ + util/getuniquepath.h \ util/golombrice.h \ util/hasher.h \ util/macros.h \ @@ -242,6 +243,7 @@ BITCOIN_CORE_H = \ util/rbf.h \ util/ref.h \ util/settings.h \ + util/sock.h \ util/spanparsing.h \ util/string.h \ util/system.h \ @@ -556,7 +558,9 @@ libbitcoin_util_a_SOURCES = \ util/bytevectorhash.cpp \ util/error.cpp \ util/fees.cpp \ + util/getuniquepath.cpp \ util/hasher.cpp \ + util/sock.cpp \ util/system.cpp \ util/message.cpp \ util/moneystr.cpp \ @@ -683,7 +687,7 @@ endif bitcoin_util_SOURCES = bitcoin-util.cpp bitcoin_util_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) bitcoin_util_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) -bitcoin_util_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) +bitcoin_util_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $(PTHREAD_FLAGS) if TARGET_WINDOWS bitcoin_util_SOURCES += bitcoin-util-res.rc diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include index 969f0ca411..59cfdb9839 100644 --- a/src/Makefile.qt.include +++ b/src/Makefile.qt.include @@ -164,6 +164,9 @@ BITCOIN_QT_H = \ qt/walletview.h \ qt/winshutdownmonitor.h +RES_FONTS = \ + qt/res/fonts/RobotoMono-Bold.ttf + RES_ICONS = \ qt/res/icons/add.png \ qt/res/icons/address-book.png \ @@ -290,7 +293,7 @@ qt_libbitcoinqt_a_CXXFLAGS = $(AM_CXXFLAGS) $(QT_PIE_FLAGS) qt_libbitcoinqt_a_OBJCXXFLAGS = $(AM_OBJCXXFLAGS) $(QT_PIE_FLAGS) qt_libbitcoinqt_a_SOURCES = $(BITCOIN_QT_CPP) $(BITCOIN_QT_H) $(QT_FORMS_UI) \ - $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(RES_ICONS) $(RES_ANIMATION) + $(QT_QRC) $(QT_QRC_LOCALE) $(QT_TS) $(RES_FONTS) $(RES_ICONS) $(RES_ANIMATION) if TARGET_DARWIN qt_libbitcoinqt_a_SOURCES += $(BITCOIN_MM) endif @@ -361,7 +364,7 @@ $(QT_QRC_LOCALE_CPP): $(QT_QRC_LOCALE) $(QT_QM) $(SED) -e '/^\*\*.*Created:/d' -e '/^\*\*.*by:/d' > $@ @rm $(@D)/temp_$(<F) -$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(RES_ICONS) $(RES_ANIMATION) +$(QT_QRC_CPP): $(QT_QRC) $(QT_FORMS_H) $(RES_FONTS) $(RES_ICONS) $(RES_ANIMATION) @test -f $(RCC) $(AM_V_GEN) QT_SELECT=$(QT_SELECT) $(RCC) -name bitcoin $< | \ $(SED) -e '/^\*\*.*Created:/d' -e '/^\*\*.*by:/d' > $@ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index e9f9b73abe..e817bb2ee2 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -2,9 +2,11 @@ # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -if ENABLE_FUZZ +if ENABLE_FUZZ_BINARY noinst_PROGRAMS += test/fuzz/fuzz -else +endif + +if !ENABLE_FUZZ bin_PROGRAMS += test/test_bitcoin endif @@ -50,6 +52,14 @@ FUZZ_SUITE_LD_COMMON = \ $(EVENT_LIBS) \ $(EVENT_PTHREADS_LIBS) +if USE_UPNP +FUZZ_SUITE_LD_COMMON += $(MINIUPNPC_LIBS) +endif + +if USE_NATPMP +FUZZ_SUITE_LD_COMMON += $(NATPMP_LIBS) +endif + # test_bitcoin binary # BITCOIN_TESTS =\ test/arith_uint256_tests.cpp \ @@ -114,6 +124,7 @@ BITCOIN_TESTS =\ test/sighash_tests.cpp \ test/sigopcount_tests.cpp \ test/skiplist_tests.cpp \ + test/sock_tests.cpp \ test/streams_tests.cpp \ test/sync_tests.cpp \ test/system_tests.cpp \ @@ -145,10 +156,16 @@ BITCOIN_TESTS += \ wallet/test/ismine_tests.cpp \ wallet/test/scriptpubkeyman_tests.cpp +FUZZ_SUITE_LD_COMMON +=\ + $(LIBBITCOIN_WALLET) \ + $(SQLITE_LIBS) \ + $(BDB_LIBS) + if USE_BDB BITCOIN_TESTS += wallet/test/db_tests.cpp endif + BITCOIN_TEST_SUITE += \ wallet/test/wallet_test_fixture.cpp \ wallet/test/wallet_test_fixture.h \ @@ -172,12 +189,12 @@ test_test_bitcoin_LDFLAGS = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $( if ENABLE_ZMQ test_test_bitcoin_LDADD += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS) +FUZZ_SUITE_LD_COMMON += $(LIBBITCOIN_ZMQ) $(ZMQ_LIBS) endif -if ENABLE_FUZZ - FUZZ_SUITE_LDFLAGS_COMMON = $(RELDFLAGS) $(AM_LDFLAGS) $(LIBTOOL_APP_LDFLAGS) $(PTHREAD_FLAGS) +if ENABLE_FUZZ_BINARY test_fuzz_fuzz_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) test_fuzz_fuzz_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS) test_fuzz_fuzz_LDADD = $(FUZZ_SUITE_LD_COMMON) @@ -278,8 +295,7 @@ test_fuzz_fuzz_SOURCES = \ test/fuzz/tx_in.cpp \ test/fuzz/tx_out.cpp \ test/fuzz/txrequest.cpp - -endif # ENABLE_FUZZ +endif # ENABLE_FUZZ_BINARY nodist_test_test_bitcoin_SOURCES = $(GENERATED_TEST_FILES) diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include index 0621da8ddf..1abfb667a0 100644 --- a/src/Makefile.test_util.include +++ b/src/Makefile.test_util.include @@ -12,6 +12,7 @@ TEST_UTIL_H = \ test/util/logging.h \ test/util/mining.h \ test/util/net.h \ + test/util/script.h \ test/util/setup_common.h \ test/util/str.h \ test/util/transaction_utils.h \ diff --git a/src/addrman.h b/src/addrman.h index 9ac67b7af6..92a5570953 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -335,22 +335,20 @@ public: * * nNew * * nTried * * number of "new" buckets XOR 2**30 - * * all nNew addrinfos in vvNew - * * all nTried addrinfos in vvTried - * * for each bucket: + * * all new addresses (total count: nNew) + * * all tried addresses (total count: nTried) + * * for each new bucket: * * number of elements - * * for each element: index + * * for each element: index in the serialized "all new addresses" + * * asmap checksum * * 2**30 is xorred with the number of buckets to make addrman deserializer v0 detect it * as incompatible. This is necessary because it did not check the version number on * deserialization. * - * Notice that vvTried, mapAddr and vVector are never encoded explicitly; + * vvNew, vvTried, mapInfo, mapAddr and vRandom are never encoded explicitly; * they are instead reconstructed from the other information. * - * vvNew is serialized, but only used if ADDRMAN_UNKNOWN_BUCKET_COUNT didn't change, - * otherwise it is reconstructed as well. - * * This format is more complex, but significantly smaller (at most 1.5 MiB), and supports * changes to the ADDRMAN_ parameters without breaking the on-disk structure. * @@ -413,13 +411,13 @@ public: } } } - // Store asmap version after bucket entries so that it + // Store asmap checksum after bucket entries so that it // can be ignored by older clients for backward compatibility. - uint256 asmap_version; + uint256 asmap_checksum; if (m_asmap.size() != 0) { - asmap_version = SerializeHash(m_asmap); + asmap_checksum = SerializeHash(m_asmap); } - s << asmap_version; + s << asmap_checksum; } template <typename Stream> @@ -500,47 +498,63 @@ public: nTried -= nLost; // Store positions in the new table buckets to apply later (if possible). - std::map<int, int> entryToBucket; // Represents which entry belonged to which bucket when serializing - - for (int bucket = 0; bucket < nUBuckets; bucket++) { - int nSize = 0; - s >> nSize; - for (int n = 0; n < nSize; n++) { - int nIndex = 0; - s >> nIndex; - if (nIndex >= 0 && nIndex < nNew) { - entryToBucket[nIndex] = bucket; + // An entry may appear in up to ADDRMAN_NEW_BUCKETS_PER_ADDRESS buckets, + // so we store all bucket-entry_index pairs to iterate through later. + std::vector<std::pair<int, int>> bucket_entries; + + for (int bucket = 0; bucket < nUBuckets; ++bucket) { + int num_entries{0}; + s >> num_entries; + for (int n = 0; n < num_entries; ++n) { + int entry_index{0}; + s >> entry_index; + if (entry_index >= 0 && entry_index < nNew) { + bucket_entries.emplace_back(bucket, entry_index); } } } - uint256 supplied_asmap_version; + // If the bucket count and asmap checksum haven't changed, then attempt + // to restore the entries to the buckets/positions they were in before + // serialization. + uint256 supplied_asmap_checksum; if (m_asmap.size() != 0) { - supplied_asmap_version = SerializeHash(m_asmap); + supplied_asmap_checksum = SerializeHash(m_asmap); } - uint256 serialized_asmap_version; + uint256 serialized_asmap_checksum; if (format >= Format::V2_ASMAP) { - s >> serialized_asmap_version; + s >> serialized_asmap_checksum; } + const bool restore_bucketing{nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && + serialized_asmap_checksum == supplied_asmap_checksum}; - for (int n = 0; n < nNew; n++) { - CAddrInfo &info = mapInfo[n]; - int bucket = entryToBucket[n]; - int nUBucketPos = info.GetBucketPosition(nKey, true, bucket); - if (format >= Format::V2_ASMAP && nUBuckets == ADDRMAN_NEW_BUCKET_COUNT && vvNew[bucket][nUBucketPos] == -1 && - info.nRefCount < ADDRMAN_NEW_BUCKETS_PER_ADDRESS && serialized_asmap_version == supplied_asmap_version) { + if (!restore_bucketing) { + LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n"); + } + + for (auto bucket_entry : bucket_entries) { + int bucket{bucket_entry.first}; + const int entry_index{bucket_entry.second}; + CAddrInfo& info = mapInfo[entry_index]; + + // The entry shouldn't appear in more than + // ADDRMAN_NEW_BUCKETS_PER_ADDRESS. If it has already, just skip + // this bucket_entry. + if (info.nRefCount >= ADDRMAN_NEW_BUCKETS_PER_ADDRESS) continue; + + int bucket_position = info.GetBucketPosition(nKey, true, bucket); + if (restore_bucketing && vvNew[bucket][bucket_position] == -1) { // Bucketing has not changed, using existing bucket positions for the new table - vvNew[bucket][nUBucketPos] = n; - info.nRefCount++; + vvNew[bucket][bucket_position] = entry_index; + ++info.nRefCount; } else { - // In case the new table data cannot be used (format unknown, bucket count wrong or new asmap), + // In case the new table data cannot be used (bucket count wrong or new asmap), // try to give them a reference based on their primary source address. - LogPrint(BCLog::ADDRMAN, "Bucketing method was updated, re-bucketing addrman entries from disk\n"); bucket = info.GetNewBucket(nKey, m_asmap); - nUBucketPos = info.GetBucketPosition(nKey, true, bucket); - if (vvNew[bucket][nUBucketPos] == -1) { - vvNew[bucket][nUBucketPos] = n; - info.nRefCount++; + bucket_position = info.GetBucketPosition(nKey, true, bucket); + if (vvNew[bucket][bucket_position] == -1) { + vvNew[bucket][bucket_position] = entry_index; + ++info.nRefCount; } } } diff --git a/src/base58.cpp b/src/base58.cpp index 65e373283c..fb04673c5c 100644 --- a/src/base58.cpp +++ b/src/base58.cpp @@ -52,7 +52,7 @@ static const int8_t mapBase58[256] = { int size = strlen(psz) * 733 /1000 + 1; // log(58) / log(256), rounded up. std::vector<unsigned char> b256(size); // Process the characters. - static_assert(sizeof(mapBase58)/sizeof(mapBase58[0]) == 256, "mapBase58.size() should be 256"); // guarantee not out of range + static_assert(std::size(mapBase58) == 256, "mapBase58.size() should be 256"); // guarantee not out of range while (*psz && !IsSpace(*psz)) { // Decode base58 character int carry = mapBase58[(uint8_t)*psz]; diff --git a/src/bench/bench.h b/src/bench/bench.h index bafc7f8716..22f06d8cb8 100644 --- a/src/bench/bench.h +++ b/src/bench/bench.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_BENCH_BENCH_H #define BITCOIN_BENCH_BENCH_H +#include <util/macros.h> + #include <chrono> #include <functional> #include <map> @@ -12,8 +14,6 @@ #include <vector> #include <bench/nanobench.h> -#include <boost/preprocessor/cat.hpp> -#include <boost/preprocessor/stringize.hpp> /* * Usage: @@ -56,8 +56,8 @@ public: static void RunAll(const Args& args); }; } -// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo"); +// BENCHMARK(foo) expands to: benchmark::BenchRunner bench_11foo("foo", foo); #define BENCHMARK(n) \ - benchmark::BenchRunner BOOST_PP_CAT(bench_, BOOST_PP_CAT(__LINE__, n))(BOOST_PP_STRINGIZE(n), n); + benchmark::BenchRunner PASTE2(bench_, PASTE2(__LINE__, n))(STRINGIZE(n), n); #endif // BITCOIN_BENCH_BENCH_H diff --git a/src/bench/block_assemble.cpp b/src/bench/block_assemble.cpp index af5a82f69f..8f656c44d9 100644 --- a/src/bench/block_assemble.cpp +++ b/src/bench/block_assemble.cpp @@ -48,9 +48,8 @@ static void AssembleBlock(benchmark::Bench& bench) LOCK(::cs_main); // Required for ::AcceptToMemoryPool. for (const auto& txr : txs) { - TxValidationState state; - bool ret{::AcceptToMemoryPool(*test_setup.m_node.mempool, state, txr, nullptr /* plTxnReplaced */, false /* bypass_limits */)}; - assert(ret); + const MempoolAcceptResult res = ::AcceptToMemoryPool(::ChainstateActive(), *test_setup.m_node.mempool, txr, false /* bypass_limits */); + assert(res.m_result_type == MempoolAcceptResult::ResultType::VALID); } } diff --git a/src/bench/data.cpp b/src/bench/data.cpp index 0ae4c7cad4..481e372105 100644 --- a/src/bench/data.cpp +++ b/src/bench/data.cpp @@ -8,7 +8,7 @@ namespace benchmark { namespace data { #include <bench/data/block413567.raw.h> -const std::vector<uint8_t> block413567{block413567_raw, block413567_raw + sizeof(block413567_raw) / sizeof(block413567_raw[0])}; +const std::vector<uint8_t> block413567{std::begin(block413567_raw), std::end(block413567_raw)}; } // namespace data } // namespace benchmark diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index fa41208a31..0830cb54cb 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -300,9 +300,13 @@ class NetinfoRequestHandler : public BaseRequestHandler { private: static constexpr int8_t UNKNOWN_NETWORK{-1}; - static constexpr uint8_t m_networks_size{3}; - const std::array<std::string, m_networks_size> m_networks{{"ipv4", "ipv6", "onion"}}; - std::array<std::array<uint16_t, m_networks_size + 2>, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total/block-relay) + static constexpr int8_t NET_I2P{3}; // pos of "i2p" in m_networks + static constexpr uint8_t m_networks_size{4}; + static constexpr uint8_t MAX_DETAIL_LEVEL{4}; + const std::array<std::string, m_networks_size> m_networks{{"ipv4", "ipv6", "onion", "i2p"}}; + std::array<std::array<uint16_t, m_networks_size + 1>, 3> m_counts{{{}}}; //!< Peer counts by (in/out/total, networks/total) + uint8_t m_block_relay_peers_count{0}; + uint8_t m_manual_peers_count{0}; int8_t NetworkStringToId(const std::string& str) const { for (uint8_t i = 0; i < m_networks_size; ++i) { @@ -316,12 +320,14 @@ private: bool IsAddressSelected() const { return m_details_level == 2 || m_details_level == 4; } bool IsVersionSelected() const { return m_details_level == 3 || m_details_level == 4; } bool m_is_asmap_on{false}; + bool m_is_i2p_on{false}; size_t m_max_addr_length{0}; - size_t m_max_age_length{4}; + size_t m_max_age_length{3}; size_t m_max_id_length{2}; struct Peer { std::string addr; std::string sub_version; + std::string conn_type; std::string network; std::string age; double min_ping; @@ -333,6 +339,8 @@ private: int id; int mapped_as; int version; + bool is_bip152_hb_from; + bool is_bip152_hb_to; bool is_block_relay; bool is_outbound; bool operator<(const Peer& rhs) const { return std::tie(is_outbound, min_ping) < std::tie(rhs.is_outbound, rhs.min_ping); } @@ -351,6 +359,14 @@ private: const double milliseconds{round(1000 * seconds)}; return milliseconds > 999999 ? "-" : ToString(milliseconds); } + std::string ConnectionTypeForNetinfo(const std::string& conn_type) const + { + if (conn_type == "outbound-full-relay") return "full"; + if (conn_type == "block-relay-only") return "block"; + if (conn_type == "manual" || conn_type == "feeler") return conn_type; + if (conn_type == "addr-fetch") return "addr"; + return ""; + } const UniValue NetinfoHelp() { return std::string{ @@ -379,6 +395,9 @@ private: " type Type of peer connection\n" " \"full\" - full relay, the default\n" " \"block\" - block relay; like full relay but does not relay transactions or addresses\n" + " \"manual\" - peer we manually added using RPC addnode or the -addnode/-connect config options\n" + " \"feeler\" - short-lived connection for testing addresses\n" + " \"addr\" - address fetch; short-lived connection for requesting addresses\n" " net Network the peer connected through (\"ipv4\", \"ipv6\", \"onion\", \"i2p\", or \"cjdns\")\n" " mping Minimum observed ping time, in milliseconds (ms)\n" " ping Last observed ping time, in milliseconds (ms)\n" @@ -386,6 +405,9 @@ private: " recv Time since last message received from the peer, in seconds\n" " txn Time since last novel transaction received from the peer and accepted into our mempool, in minutes\n" " blk Time since last novel block passing initial validity checks received from the peer, in minutes\n" + " hb High-bandwidth BIP152 compact block relay\n" + " \".\" (to) - we selected the peer as a high-bandwidth peer\n" + " \"*\" (from) - the peer selected us as a high-bandwidth peer\n" " age Duration of connection to the peer, in minutes\n" " asmap Mapped AS (Autonomous System) number in the BGP route to the peer, used for diversifying\n" " peer selection (only displayed if the -asmap config option is set)\n" @@ -393,7 +415,7 @@ private: " address IP address and port of the peer\n" " version Peer version and subversion concatenated, e.g. \"70016/Satoshi:21.0.0/\"\n\n" "* The connection counts table displays the number of peers by direction, network, and the totals\n" - " for each, as well as a column for block relay peers.\n\n" + " for each, as well as two special outbound columns for block relay peers and manual peers.\n\n" "* The local addresses table lists each local address broadcast by the node, the port, and the score.\n\n" "Examples:\n\n" "Connection counts and local addresses only\n" @@ -418,7 +440,7 @@ public: if (!args.empty()) { uint8_t n{0}; if (ParseUInt8(args.at(0), &n)) { - m_details_level = n; + m_details_level = std::min(n, MAX_DETAIL_LEVEL); } else if (args.at(0) == "help") { m_is_help_requested = true; } else { @@ -450,16 +472,16 @@ public: const std::string network{peer["network"].get_str()}; const int8_t network_id{NetworkStringToId(network)}; if (network_id == UNKNOWN_NETWORK) continue; + m_is_i2p_on |= (network_id == NET_I2P); const bool is_outbound{!peer["inbound"].get_bool()}; const bool is_block_relay{!peer["relaytxes"].get_bool()}; + const std::string conn_type{peer["connection_type"].get_str()}; ++m_counts.at(is_outbound).at(network_id); // in/out by network ++m_counts.at(is_outbound).at(m_networks_size); // in/out overall ++m_counts.at(2).at(network_id); // total by network ++m_counts.at(2).at(m_networks_size); // total overall - if (is_block_relay) { - ++m_counts.at(is_outbound).at(m_networks_size + 1); // in/out block-relay - ++m_counts.at(2).at(m_networks_size + 1); // total block-relay - } + if (conn_type == "block-relay-only") ++m_block_relay_peers_count; + if (conn_type == "manual") ++m_manual_peers_count; if (DetailsRequested()) { // Push data for this peer to the peers vector. const int peer_id{peer["id"].get_int()}; @@ -475,7 +497,9 @@ public: const std::string addr{peer["addr"].get_str()}; const std::string age{conn_time == 0 ? "" : ToString((m_time_now - conn_time) / 60)}; const std::string sub_version{peer["subver"].get_str()}; - m_peers.push_back({addr, sub_version, network, age, min_ping, ping, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_block_relay, is_outbound}); + const bool is_bip152_hb_from{peer["bip152_hb_from"].get_bool()}; + const bool is_bip152_hb_to{peer["bip152_hb_to"].get_bool()}; + m_peers.push_back({addr, sub_version, conn_type, network, age, min_ping, ping, last_blck, last_recv, last_send, last_trxn, peer_id, mapped_as, version, is_bip152_hb_from, is_bip152_hb_to, is_block_relay, is_outbound}); m_max_addr_length = std::max(addr.length() + 1, m_max_addr_length); m_max_age_length = std::max(age.length(), m_max_age_length); m_max_id_length = std::max(ToString(peer_id).length(), m_max_id_length); @@ -489,15 +513,15 @@ public: // Report detailed peer connections list sorted by direction and minimum ping time. if (DetailsRequested() && !m_peers.empty()) { std::sort(m_peers.begin(), m_peers.end()); - result += strprintf("<-> relay net mping ping send recv txn blk %*s ", m_max_age_length, "age"); + result += strprintf("<-> type net mping ping send recv txn blk hb %*s ", m_max_age_length, "age"); if (m_is_asmap_on) result += " asmap "; result += strprintf("%*s %-*s%s\n", m_max_id_length, "id", IsAddressSelected() ? m_max_addr_length : 0, IsAddressSelected() ? "address" : "", IsVersionSelected() ? "version" : ""); for (const Peer& peer : m_peers) { std::string version{ToString(peer.version) + peer.sub_version}; result += strprintf( - "%3s %5s %5s%7s%7s%5s%5s%5s%5s %*s%*i %*s %-*s%s\n", + "%3s %6s %5s%7s%7s%5s%5s%5s%5s %2s %*s%*i %*s %-*s%s\n", peer.is_outbound ? "out" : "in", - peer.is_block_relay ? "block" : "full", + ConnectionTypeForNetinfo(peer.conn_type), peer.network, PingTimeToString(peer.min_ping), PingTimeToString(peer.ping), @@ -505,6 +529,7 @@ public: peer.last_recv == 0 ? "" : ToString(m_time_now - peer.last_recv), peer.last_trxn == 0 ? "" : ToString((m_time_now - peer.last_trxn) / 60), peer.last_blck == 0 ? "" : ToString((m_time_now - peer.last_blck) / 60), + strprintf("%s%s", peer.is_bip152_hb_to ? "." : " ", peer.is_bip152_hb_from ? "*" : " "), m_max_age_length, // variable spacing peer.age, m_is_asmap_on ? 7 : 0, // variable spacing @@ -515,18 +540,27 @@ public: IsAddressSelected() ? peer.addr : "", IsVersionSelected() && version != "0" ? version : ""); } - result += strprintf(" ms ms sec sec min min %*s\n\n", m_max_age_length, "min"); + result += strprintf(" ms ms sec sec min min %*s\n\n", m_max_age_length, "min"); } // Report peer connection totals by type. - result += " ipv4 ipv6 onion total block-relay\n"; + result += " ipv4 ipv6 onion"; + if (m_is_i2p_on) result += " i2p"; + result += " total block"; + if (m_manual_peers_count) result += " manual"; const std::array<std::string, 3> rows{{"in", "out", "total"}}; - for (uint8_t i = 0; i < m_networks_size; ++i) { - result += strprintf("%-5s %5i %5i %5i %5i %5i\n", rows.at(i), m_counts.at(i).at(0), m_counts.at(i).at(1), m_counts.at(i).at(2), m_counts.at(i).at(m_networks_size), m_counts.at(i).at(m_networks_size + 1)); + for (uint8_t i = 0; i < 3; ++i) { + result += strprintf("\n%-5s %5i %5i %5i", rows.at(i), m_counts.at(i).at(0), m_counts.at(i).at(1), m_counts.at(i).at(2)); // ipv4/ipv6/onion peers counts + if (m_is_i2p_on) result += strprintf(" %5i", m_counts.at(i).at(3)); // i2p peers count + result += strprintf(" %5i", m_counts.at(i).at(m_networks_size)); // total peers count + if (i == 1) { // the outbound row has two extra columns for block relay and manual peer counts + result += strprintf(" %5i", m_block_relay_peers_count); + if (m_manual_peers_count) result += strprintf(" %5i", m_manual_peers_count); + } } // Report local addresses, ports, and scores. - result += "\nLocal addresses"; + result += "\n\nLocal addresses"; const std::vector<UniValue>& local_addrs{networkinfo["localaddresses"].getValues()}; if (local_addrs.empty()) { result += ": n/a\n"; diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp index 3e8e5fc7bc..b84d909b07 100644 --- a/src/bitcoin-wallet.cpp +++ b/src/bitcoin-wallet.cpp @@ -33,51 +33,52 @@ static void SetupWalletToolArgs(ArgsManager& argsman) argsman.AddArg("-format=<format>", "The format of the wallet file to create. Either \"bdb\" or \"sqlite\". Only used with 'createfromdump'", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("info", "Get wallet info", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - argsman.AddArg("create", "Create new wallet file", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - argsman.AddArg("salvage", "Attempt to recover private keys from a corrupt wallet. Warning: 'salvage' is experimental.", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - argsman.AddArg("dump", "Print out all of the wallet key-value records", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); - argsman.AddArg("createfromdump", "Create new wallet file from dumped records", ArgsManager::ALLOW_ANY, OptionsCategory::COMMANDS); + argsman.AddCommand("info", "Get wallet info", OptionsCategory::COMMANDS); + argsman.AddCommand("create", "Create new wallet file", OptionsCategory::COMMANDS); + argsman.AddCommand("salvage", "Attempt to recover private keys from a corrupt wallet. Warning: 'salvage' is experimental.", OptionsCategory::COMMANDS); + argsman.AddCommand("dump", "Print out all of the wallet key-value records", OptionsCategory::COMMANDS); + argsman.AddCommand("createfromdump", "Create new wallet file from dumped records", OptionsCategory::COMMANDS); } -static bool WalletAppInit(int argc, char* argv[]) +static bool WalletAppInit(ArgsManager& args, int argc, char* argv[]) { - SetupWalletToolArgs(gArgs); + SetupWalletToolArgs(args); std::string error_message; - if (!gArgs.ParseParameters(argc, argv, error_message)) { + if (!args.ParseParameters(argc, argv, error_message)) { tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error_message); return false; } - if (argc < 2 || HelpRequested(gArgs) || gArgs.IsArgSet("-version")) { + if (argc < 2 || HelpRequested(args) || args.IsArgSet("-version")) { std::string strUsage = strprintf("%s bitcoin-wallet version", PACKAGE_NAME) + " " + FormatFullVersion() + "\n"; - if (!gArgs.IsArgSet("-version")) { - strUsage += "\n" - "bitcoin-wallet is an offline tool for creating and interacting with " PACKAGE_NAME " wallet files.\n" - "By default bitcoin-wallet will act on wallets in the default mainnet wallet directory in the datadir.\n" - "To change the target wallet, use the -datadir, -wallet and -testnet/-regtest arguments.\n\n" - "Usage:\n" - " bitcoin-wallet [options] <command>\n"; - strUsage += "\n" + gArgs.GetHelpMessage(); - } + if (!args.IsArgSet("-version")) { + strUsage += "\n" + "bitcoin-wallet is an offline tool for creating and interacting with " PACKAGE_NAME " wallet files.\n" + "By default bitcoin-wallet will act on wallets in the default mainnet wallet directory in the datadir.\n" + "To change the target wallet, use the -datadir, -wallet and -testnet/-regtest arguments.\n\n" + "Usage:\n" + " bitcoin-wallet [options] <command>\n"; + strUsage += "\n" + args.GetHelpMessage(); + } tfm::format(std::cout, "%s", strUsage); return false; } // check for printtoconsole, allow -debug - LogInstance().m_print_to_console = gArgs.GetBoolArg("-printtoconsole", gArgs.GetBoolArg("-debug", false)); + LogInstance().m_print_to_console = args.GetBoolArg("-printtoconsole", args.GetBoolArg("-debug", false)); if (!CheckDataDirOption()) { - tfm::format(std::cerr, "Error: Specified data directory \"%s\" does not exist.\n", gArgs.GetArg("-datadir", "")); + tfm::format(std::cerr, "Error: Specified data directory \"%s\" does not exist.\n", args.GetArg("-datadir", "")); return false; } // Check for chain settings (Params() calls are only valid after this clause) - SelectParams(gArgs.GetChainName()); + SelectParams(args.GetChainName()); return true; } int main(int argc, char* argv[]) { + ArgsManager& args = gArgs; #ifdef WIN32 util::WinCmdLineArgs winArgs; std::tie(argc, argv) = winArgs.get(); @@ -85,7 +86,7 @@ int main(int argc, char* argv[]) SetupEnvironment(); RandomInit(); try { - if (!WalletAppInit(argc, argv)) return EXIT_FAILURE; + if (!WalletAppInit(args, argc, argv)) return EXIT_FAILURE; } catch (const std::exception& e) { PrintExceptionContinue(&e, "WalletAppInit()"); return EXIT_FAILURE; @@ -94,33 +95,19 @@ int main(int argc, char* argv[]) return EXIT_FAILURE; } - std::string method {}; - for(int i = 1; i < argc; ++i) { - if (!IsSwitchChar(argv[i][0])) { - if (!method.empty()) { - tfm::format(std::cerr, "Error: two methods provided (%s and %s). Only one method should be provided.\n", method, argv[i]); - return EXIT_FAILURE; - } - method = argv[i]; - } - } - - if (method.empty()) { + const auto command = args.GetCommand(); + if (!command) { tfm::format(std::cerr, "No method provided. Run `bitcoin-wallet -help` for valid methods.\n"); return EXIT_FAILURE; } - - // A name must be provided when creating a file - if (method == "create" && !gArgs.IsArgSet("-wallet")) { - tfm::format(std::cerr, "Wallet name must be provided when creating a new wallet.\n"); + if (command->args.size() != 0) { + tfm::format(std::cerr, "Error: Additional arguments provided (%s). Methods do not take arguments. Please refer to `-help`.\n", Join(command->args, ", ")); return EXIT_FAILURE; } - std::string name = gArgs.GetArg("-wallet", ""); - ECCVerifyHandle globalVerifyHandle; ECC_Start(); - if (!WalletTool::ExecuteWalletToolFunc(gArgs, method, name)) { + if (!WalletTool::ExecuteWalletToolFunc(args, command->command)) { return EXIT_FAILURE; } ECC_Stop(); diff --git a/src/chain.h b/src/chain.h index 43e8a39f36..04a5db5a17 100644 --- a/src/chain.h +++ b/src/chain.h @@ -163,14 +163,27 @@ public: //! Number of transactions in this block. //! Note: in a potential headers-first mode, this number cannot be relied upon + //! Note: this value is faked during UTXO snapshot load to ensure that + //! LoadBlockIndex() will load index entries for blocks that we lack data for. + //! @sa ActivateSnapshot unsigned int nTx{0}; //! (memory only) Number of transactions in the chain up to and including this block. //! This value will be non-zero only if and only if transactions for this block and all its parents are available. //! Change to 64-bit type when necessary; won't happen before 2030 + //! + //! Note: this value is faked during use of a UTXO snapshot because we don't + //! have the underlying block data available during snapshot load. + //! @sa AssumeutxoData + //! @sa ActivateSnapshot unsigned int nChainTx{0}; //! Verification status of this block. See enum BlockStatus + //! + //! Note: this value is modified to show BLOCK_OPT_WITNESS during UTXO snapshot + //! load to avoid the block index being spuriously rewound. + //! @sa RewindBlockIndex + //! @sa ActivateSnapshot uint32_t nStatus{0}; //! block header diff --git a/src/chainparams.cpp b/src/chainparams.cpp index 88cf5ef0a8..16efffa6f0 100644 --- a/src/chainparams.cpp +++ b/src/chainparams.cpp @@ -8,9 +8,7 @@ #include <chainparamsseeds.h> #include <consensus/merkle.h> #include <hash.h> // for signet block challenge hash -#include <tinyformat.h> #include <util/system.h> -#include <util/strencodings.h> #include <versionbitsinfo.h> #include <assert.h> @@ -136,7 +134,7 @@ public: bech32_hrp = "bc"; - vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_main, pnSeed6_main + ARRAYLEN(pnSeed6_main)); + vFixedSeeds = std::vector<SeedSpec6>(std::begin(pnSeed6_main), std::end(pnSeed6_main)); fDefaultConsistencyChecks = false; fRequireStandard = true; @@ -161,6 +159,10 @@ public: } }; + m_assumeutxo_data = MapAssumeutxo{ + // TODO to be specified in a future patch. + }; + chainTxData = ChainTxData{ // Data from RPC: getchaintxstats 4096 0000000000000000000b9d2ec5a352ecba0592946514a92f14319dc2b367fc72 /* nTime */ 1603995752, @@ -237,7 +239,7 @@ public: bech32_hrp = "tb"; - vFixedSeeds = std::vector<SeedSpec6>(pnSeed6_test, pnSeed6_test + ARRAYLEN(pnSeed6_test)); + vFixedSeeds = std::vector<SeedSpec6>(std::begin(pnSeed6_test), std::end(pnSeed6_test)); fDefaultConsistencyChecks = false; fRequireStandard = false; @@ -250,6 +252,10 @@ public: } }; + m_assumeutxo_data = MapAssumeutxo{ + // TODO to be specified in a future patch. + }; + chainTxData = ChainTxData{ // Data from RPC: getchaintxstats 4096 000000000000006433d1efec504c53ca332b64963c425395515b01977bd7b3b0 /* nTime */ 1603359686, @@ -406,7 +412,7 @@ public: pchMessageStart[2] = 0xb5; pchMessageStart[3] = 0xda; nDefaultPort = 18444; - nPruneAfterHeight = 1000; + nPruneAfterHeight = gArgs.GetBoolArg("-fastprune", false) ? 100 : 1000; m_assumed_blockchain_size = 0; m_assumed_chain_state_size = 0; @@ -431,6 +437,17 @@ public: } }; + m_assumeutxo_data = MapAssumeutxo{ + { + 110, + {uint256S("0x76fd7334ac7c1baf57ddc0c626f073a655a35d98a4258cd1382c8cc2b8392e10"), 110}, + }, + { + 210, + {uint256S("0x9c5ed99ef98544b34f8920b6d1802f72ac28ae6e2bd2bd4c316ff10c230df3f2"), 210}, + }, + }; + chainTxData = ChainTxData{ 0, 0, @@ -526,3 +543,9 @@ void SelectParams(const std::string& network) SelectBaseParams(network); globalChainParams = CreateChainParams(gArgs, network); } + +std::ostream& operator<<(std::ostream& o, const AssumeutxoData& aud) +{ + o << strprintf("AssumeutxoData(%s, %s)", aud.hash_serialized.ToString(), aud.nChainTx); + return o; +} diff --git a/src/chainparams.h b/src/chainparams.h index d8b25c7220..4d24dcdb7c 100644 --- a/src/chainparams.h +++ b/src/chainparams.h @@ -31,6 +31,26 @@ struct CCheckpointData { }; /** + * Holds configuration for use during UTXO snapshot load and validation. The contents + * here are security critical, since they dictate which UTXO snapshots are recognized + * as valid. + */ +struct AssumeutxoData { + //! The expected hash of the deserialized UTXO set. + const uint256 hash_serialized; + + //! Used to populate the nChainTx value, which is used during BlockManager::LoadBlockIndex(). + //! + //! We need to hardcode the value here because this is computed cumulatively using block data, + //! which we do not necessarily have at the time of snapshot load. + const unsigned int nChainTx; +}; + +std::ostream& operator<<(std::ostream& o, const AssumeutxoData& aud); + +using MapAssumeutxo = std::map<int, const AssumeutxoData>; + +/** * Holds various statistics on transactions within a chain. Used to estimate * verification progress during chain sync. * @@ -90,6 +110,11 @@ public: const std::string& Bech32HRP() const { return bech32_hrp; } const std::vector<SeedSpec6>& FixedSeeds() const { return vFixedSeeds; } const CCheckpointData& Checkpoints() const { return checkpointData; } + + //! Get allowed assumeutxo configuration. + //! @see ChainstateManager + const MapAssumeutxo& Assumeutxo() const { return m_assumeutxo_data; } + const ChainTxData& TxData() const { return chainTxData; } protected: CChainParams() {} @@ -111,6 +136,7 @@ protected: bool m_is_test_chain; bool m_is_mockable_chain; CCheckpointData checkpointData; + MapAssumeutxo m_assumeutxo_data; ChainTxData chainTxData; }; diff --git a/src/clientversion.h b/src/clientversion.h index 2da909f829..0ed3f68094 100644 --- a/src/clientversion.h +++ b/src/clientversion.h @@ -5,6 +5,8 @@ #ifndef BITCOIN_CLIENTVERSION_H #define BITCOIN_CLIENTVERSION_H +#include <util/macros.h> + #if defined(HAVE_CONFIG_H) #include <config/bitcoin-config.h> #endif //HAVE_CONFIG_H @@ -14,13 +16,6 @@ #error Client version information missing: version is not defined by bitcoin-config.h or in any other way #endif -/** - * Converts the parameter X to a string after macro replacement on X has been performed. - * Don't merge these into one macro! - */ -#define STRINGIZE(X) DO_STRINGIZE(X) -#define DO_STRINGIZE(X) #X - //! Copyright string used in Windows .rc files #define COPYRIGHT_STR "2009-" STRINGIZE(COPYRIGHT_YEAR) " " COPYRIGHT_HOLDERS_FINAL diff --git a/src/coins.cpp b/src/coins.cpp index dd84e720e7..d52851cadd 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -97,6 +97,14 @@ void CCoinsViewCache::AddCoin(const COutPoint &outpoint, Coin&& coin, bool possi cachedCoinsUsage += it->second.coin.DynamicMemoryUsage(); } +void CCoinsViewCache::EmplaceCoinInternalDANGER(COutPoint&& outpoint, Coin&& coin) { + cachedCoinsUsage += coin.DynamicMemoryUsage(); + cacheCoins.emplace( + std::piecewise_construct, + std::forward_as_tuple(std::move(outpoint)), + std::forward_as_tuple(std::move(coin), CCoinsCacheEntry::DIRTY)); +} + void AddCoins(CCoinsViewCache& cache, const CTransaction &tx, int nHeight, bool check_for_overwrite) { bool fCoinbase = tx.IsCoinBase(); const uint256& txid = tx.GetHash(); diff --git a/src/coins.h b/src/coins.h index d2eb42d8cf..feb441fd6a 100644 --- a/src/coins.h +++ b/src/coins.h @@ -20,6 +20,8 @@ #include <functional> #include <unordered_map> +class ChainstateManager; + /** * A UTXO entry. * @@ -125,6 +127,7 @@ struct CCoinsCacheEntry CCoinsCacheEntry() : flags(0) {} explicit CCoinsCacheEntry(Coin&& coin_) : coin(std::move(coin_)), flags(0) {} + CCoinsCacheEntry(Coin&& coin_, unsigned char flag) : coin(std::move(coin_)), flags(flag) {} }; typedef std::unordered_map<COutPoint, CCoinsCacheEntry, SaltedOutpointHasher> CCoinsMap; @@ -263,6 +266,15 @@ public: void AddCoin(const COutPoint& outpoint, Coin&& coin, bool possible_overwrite); /** + * Emplace a coin into cacheCoins without performing any checks, marking + * the emplaced coin as dirty. + * + * NOT FOR GENERAL USE. Used only when loading coins from a UTXO snapshot. + * @sa ChainstateManager::PopulateAndValidateSnapshot() + */ + void EmplaceCoinInternalDANGER(COutPoint&& outpoint, Coin&& coin); + + /** * Spend a coin. Pass moveto in order to get the deleted data. * If no unspent output exists for the passed outpoint, this call * has no effect. diff --git a/src/crypto/muhash.cpp b/src/crypto/muhash.cpp index fbd14f9325..e5a0d4cb9c 100644 --- a/src/crypto/muhash.cpp +++ b/src/crypto/muhash.cpp @@ -17,7 +17,6 @@ namespace { using limb_t = Num3072::limb_t; using double_limb_t = Num3072::double_limb_t; constexpr int LIMB_SIZE = Num3072::LIMB_SIZE; -constexpr int LIMBS = Num3072::LIMBS; /** 2^3072 - 1103717, the largest 3072-bit safe prime number, is used as the modulus. */ constexpr limb_t MAX_PRIME_DIFF = 1103717; @@ -123,7 +122,7 @@ inline void square_n_mul(Num3072& in_out, const int sq, const Num3072& mul) } // namespace -/** Indicates wether d is larger than the modulus. */ +/** Indicates whether d is larger than the modulus. */ bool Num3072::IsOverflow() const { if (this->limbs[0] <= std::numeric_limits<limb_t>::max() - MAX_PRIME_DIFF) return false; @@ -276,18 +275,33 @@ void Num3072::Divide(const Num3072& a) if (this->IsOverflow()) this->FullReduce(); } -Num3072 MuHash3072::ToNum3072(Span<const unsigned char> in) { - Num3072 out{}; - uint256 hashed_in = (CHashWriter(SER_DISK, 0) << in).GetSHA256(); - unsigned char tmp[BYTE_SIZE]; - ChaCha20(hashed_in.data(), hashed_in.size()).Keystream(tmp, BYTE_SIZE); +Num3072::Num3072(const unsigned char (&data)[BYTE_SIZE]) { + for (int i = 0; i < LIMBS; ++i) { + if (sizeof(limb_t) == 4) { + this->limbs[i] = ReadLE32(data + 4 * i); + } else if (sizeof(limb_t) == 8) { + this->limbs[i] = ReadLE64(data + 8 * i); + } + } +} + +void Num3072::ToBytes(unsigned char (&out)[BYTE_SIZE]) { for (int i = 0; i < LIMBS; ++i) { if (sizeof(limb_t) == 4) { - out.limbs[i] = ReadLE32(tmp + 4 * i); + WriteLE32(out + i * 4, this->limbs[i]); } else if (sizeof(limb_t) == 8) { - out.limbs[i] = ReadLE64(tmp + 8 * i); + WriteLE64(out + i * 8, this->limbs[i]); } } +} + +Num3072 MuHash3072::ToNum3072(Span<const unsigned char> in) { + unsigned char tmp[Num3072::BYTE_SIZE]; + + uint256 hashed_in = (CHashWriter(SER_DISK, 0) << in).GetSHA256(); + ChaCha20(hashed_in.data(), hashed_in.size()).Keystream(tmp, Num3072::BYTE_SIZE); + Num3072 out{tmp}; + return out; } @@ -301,14 +315,8 @@ void MuHash3072::Finalize(uint256& out) noexcept m_numerator.Divide(m_denominator); m_denominator.SetToOne(); // Needed to keep the MuHash object valid - unsigned char data[384]; - for (int i = 0; i < LIMBS; ++i) { - if (sizeof(limb_t) == 4) { - WriteLE32(data + i * 4, m_numerator.limbs[i]); - } else if (sizeof(limb_t) == 8) { - WriteLE64(data + i * 8, m_numerator.limbs[i]); - } - } + unsigned char data[Num3072::BYTE_SIZE]; + m_numerator.ToBytes(data); out = (CHashWriter(SER_DISK, 0) << data).GetSHA256(); } diff --git a/src/crypto/muhash.h b/src/crypto/muhash.h index 0c710007c4..c023a8b9d3 100644 --- a/src/crypto/muhash.h +++ b/src/crypto/muhash.h @@ -22,6 +22,7 @@ private: Num3072 GetInverse() const; public: + static constexpr size_t BYTE_SIZE = 384; #ifdef HAVE___INT128 typedef unsigned __int128 double_limb_t; @@ -48,8 +49,10 @@ public: void Divide(const Num3072& a); void SetToOne(); void Square(); + void ToBytes(unsigned char (&out)[BYTE_SIZE]); Num3072() { this->SetToOne(); }; + Num3072(const unsigned char (&data)[BYTE_SIZE]); SERIALIZE_METHODS(Num3072, obj) { @@ -78,7 +81,7 @@ public: * arbitrary subset of the update operations, allowing them to be * efficiently combined later. * - * Muhash does not support checking if an element is already part of the + * MuHash does not support checking if an element is already part of the * set. That is why this class does not enforce the use of a set as the * data it represents because there is no efficient way to do so. * It is possible to add elements more than once and also to remove @@ -91,8 +94,6 @@ public: class MuHash3072 { private: - static constexpr size_t BYTE_SIZE = 384; - Num3072 m_numerator; Num3072 m_denominator; diff --git a/src/flatfile.cpp b/src/flatfile.cpp index 11cf357f3d..151f1a38f1 100644 --- a/src/flatfile.cpp +++ b/src/flatfile.cpp @@ -66,7 +66,7 @@ size_t FlatFileSeq::Allocate(const FlatFilePos& pos, size_t add_size, bool& out_ if (CheckDiskSpace(m_dir, inc_size)) { FILE *file = Open(pos); if (file) { - LogPrintf("Pre-allocating up to position 0x%x in %s%05u.dat\n", new_size, m_prefix, pos.nFile); + LogPrint(BCLog::VALIDATION, "Pre-allocating up to position 0x%x in %s%05u.dat\n", new_size, m_prefix, pos.nFile); AllocateFileRange(file, pos.nPos, inc_size); fclose(file); return inc_size; diff --git a/src/index/base.cpp b/src/index/base.cpp index 3d3dda95b1..25644c3b41 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -65,6 +65,43 @@ bool BaseIndex::Init() m_best_block_index = g_chainman.m_blockman.FindForkInGlobalIndex(::ChainActive(), locator); } m_synced = m_best_block_index.load() == ::ChainActive().Tip(); + if (!m_synced) { + bool prune_violation = false; + if (!m_best_block_index) { + // index is not built yet + // make sure we have all block data back to the genesis + const CBlockIndex* block = ::ChainActive().Tip(); + while (block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA)) { + block = block->pprev; + } + prune_violation = block != ::ChainActive().Genesis(); + } + // in case the index has a best block set and is not fully synced + // check if we have the required blocks to continue building the index + else { + const CBlockIndex* block_to_test = m_best_block_index.load(); + if (!ChainActive().Contains(block_to_test)) { + // if the bestblock is not part of the mainchain, find the fork + // and make sure we have all data down to the fork + block_to_test = ::ChainActive().FindFork(block_to_test); + } + const CBlockIndex* block = ::ChainActive().Tip(); + prune_violation = true; + // check backwards from the tip if we have all block data until we reach the indexes bestblock + while (block_to_test && block->pprev && (block->pprev->nStatus & BLOCK_HAVE_DATA)) { + if (block_to_test == block) { + prune_violation = false; + break; + } + block = block->pprev; + } + } + if (prune_violation) { + // throw error and graceful shutdown if we can't build the index + FatalError("%s: %s best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)", __func__, GetName()); + return false; + } + } return true; } @@ -177,6 +214,10 @@ bool BaseIndex::Rewind(const CBlockIndex* current_tip, const CBlockIndex* new_ti assert(current_tip->GetAncestor(new_tip->nHeight) == new_tip); // In the case of a reorg, ensure persisted block locator is not stale. + // Pruning has a minimum of 288 blocks-to-keep and getting the index + // out of sync may be possible but a users fault. + // In case we reorg beyond the pruned depth, ReadBlockFromDisk would + // throw and lead to a graceful shutdown m_best_block_index = new_tip; if (!Commit()) { // If commit fails, revert the best block index to avoid corruption. @@ -325,6 +366,6 @@ IndexSummary BaseIndex::GetSummary() const IndexSummary summary{}; summary.name = GetName(); summary.synced = m_synced; - summary.best_block_height = m_best_block_index.load()->nHeight; + summary.best_block_height = m_best_block_index ? m_best_block_index.load()->nHeight : 0; return summary; } diff --git a/src/init.cpp b/src/init.cpp index 716c06cd3a..befba2eb2d 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -386,6 +386,7 @@ void SetupServerArgs(NodeContext& node) #endif argsman.AddArg("-assumevalid=<hex>", strprintf("If this block is in the chain assume that it and its ancestors are valid and potentially skip their script verification (0 to verify all, default: %s, testnet: %s, signet: %s)", defaultChainParams->GetConsensus().defaultAssumeValid.GetHex(), testnetChainParams->GetConsensus().defaultAssumeValid.GetHex(), signetChainParams->GetConsensus().defaultAssumeValid.GetHex()), ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-blocksdir=<dir>", "Specify directory to hold blocks subdirectory for *.dat files (default: <datadir>)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-fastprune", "Use smaller block files and lower minimum prune height for testing purposes", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); #if HAVE_SYSTEM argsman.AddArg("-blocknotify=<cmd>", "Execute command when the best block changes (%s in cmd is replaced by block hash)", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); #endif @@ -434,8 +435,9 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-connect=<ip>", "Connect only to the specified node; -noconnect disables automatic connections (the rules for this peer are the same as for -addnode). This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::CONNECTION); argsman.AddArg("-discover", "Discover own IP addresses (default: 1 when listening and no -externalip or -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-dns", strprintf("Allow DNS lookups for -addnode, -seednode and -connect (default: %u)", DEFAULT_NAME_LOOKUP), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - argsman.AddArg("-dnsseed", "Query for peer addresses via DNS lookup, if low on addresses (default: 1 unless -connect used)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-dnsseed", strprintf("Query for peer addresses via DNS lookup, if low on addresses (default: %u unless -connect used)", DEFAULT_DNSSEED), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); argsman.AddArg("-externalip=<ip>", "Specify your own public address", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-fixedseeds", strprintf("Allow fixed seeds if DNS seeds don't provide peers (default: %u)", DEFAULT_FIXEDSEEDS), ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); argsman.AddArg("-forcednsseed", strprintf("Always query for peer addresses via DNS lookup (default: %u)", DEFAULT_FORCEDNSSEED), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-listen", "Accept connections from outside (default: 1 if no -proxy or -connect)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-listenonion", strprintf("Automatically create Tor onion service (default: %d)", DEFAULT_LISTEN_ONION), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); @@ -445,7 +447,7 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-maxtimeadjustment", strprintf("Maximum allowed median peer time offset adjustment. Local perspective of time may be influenced by peers forward or backward by this amount. (default: %u seconds)", DEFAULT_MAX_TIME_ADJUSTMENT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-maxuploadtarget=<n>", strprintf("Tries to keep outbound traffic under the given target (in MiB per 24h). Limit does not apply to peers with 'download' permission. 0 = no limit (default: %d)", DEFAULT_MAX_UPLOAD_TARGET), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-onion=<ip:port>", "Use separate SOCKS5 proxy to reach peers via Tor onion services, set -noonion to disable (default: -proxy)", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (ipv4, ipv6 or onion). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks. Warning: if it is used with ipv4 or ipv6 but not onion and the -onion or -proxy option is set, then outbound onion connections will still be made; use -noonion or -onion=0 to disable outbound onion connections in this case.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-onlynet=<net>", "Make outgoing connections only through network <net> (" + Join(GetNetworkNames(), ", ") + "). Incoming connections are not affected by this option. This option can be specified multiple times to allow multiple networks. Warning: if it is used with ipv4 or ipv6 but not onion and the -onion or -proxy option is set, then outbound onion connections will still be made; use -noonion or -onion=0 to disable outbound onion connections in this case.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerbloomfilters", strprintf("Support filtering of blocks and transaction with bloom filters (default: %u)", DEFAULT_PEERBLOOMFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-peerblockfilters", strprintf("Serve compact block filters to peers per BIP 157 (default: %u)", DEFAULT_PEERBLOCKFILTERS), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-permitbaremultisig", strprintf("Relay non-P2SH multisig (default: %u)", DEFAULT_PERMIT_BAREMULTISIG), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); @@ -454,8 +456,8 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-proxyrandomize", strprintf("Randomize credentials for every proxy connection. This enables Tor stream isolation (default: %u)", DEFAULT_PROXYRANDOMIZE), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-seednode=<ip>", "Connect to a node to retrieve peer addresses, and disconnect. This option can be specified multiple times to connect to multiple nodes.", ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-networkactive", "Enable all P2P network activity (default: 1). Can be changed by the setnetworkactive RPC command", ArgsManager::ALLOW_BOOL, OptionsCategory::CONNECTION); - argsman.AddArg("-timeout=<n>", strprintf("Specify connection timeout in milliseconds (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); - argsman.AddArg("-peertimeout=<n>", strprintf("Specify p2p connection timeout in seconds. This option determines the amount of time a peer may be inactive before the connection to it is dropped. (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); + argsman.AddArg("-timeout=<n>", strprintf("Specify socket connection timeout in milliseconds. If an initial attempt to connect is unsuccessful after this amount of time, drop it (minimum: 1, default: %d)", DEFAULT_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); + argsman.AddArg("-peertimeout=<n>", strprintf("Specify a p2p connection timeout delay in seconds. After connecting to a peer, wait this amount of time before considering disconnection based on inactivity (minimum: 1, default: %d)", DEFAULT_PEER_CONNECT_TIMEOUT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::CONNECTION); argsman.AddArg("-torcontrol=<ip>:<port>", strprintf("Tor control port to use if onion listening enabled (default: %s)", DEFAULT_TOR_CONTROL), ArgsManager::ALLOW_ANY, OptionsCategory::CONNECTION); argsman.AddArg("-torpassword=<pass>", "Tor control port password (default: empty)", ArgsManager::ALLOW_ANY | ArgsManager::SENSITIVE, OptionsCategory::CONNECTION); #ifdef USE_UPNP @@ -519,10 +521,11 @@ void SetupServerArgs(NodeContext& node) argsman.AddArg("-limitdescendantcount=<n>", strprintf("Do not accept transactions if any ancestor would have <n> or more in-mempool descendants (default: %u)", DEFAULT_DESCENDANT_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-limitdescendantsize=<n>", strprintf("Do not accept transactions if any ancestor would have more than <n> kilobytes of in-mempool descendants (default: %u).", DEFAULT_DESCENDANT_SIZE_LIMIT), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-addrmantest", "Allows to test address relay on localhost", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-capturemessages", "Capture all P2P messages to disk", ArgsManager::ALLOW_BOOL | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-debug=<category>", "Output debugging information (default: -nodebug, supplying <category> is optional). " - "If <category> is not supplied or if <category> = 1, output all debugging information. <category> can be: " + LogInstance().LogCategoriesString() + ".", + "If <category> is not supplied or if <category> = 1, output all debugging information. <category> can be: " + LogInstance().LogCategoriesString() + ". This option can be specified multiple times to output multiple categories.", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - argsman.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except one or more specified categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); + argsman.AddArg("-debugexclude=<category>", strprintf("Exclude debugging information for a category. Can be used in conjunction with -debug=1 to output debug logs for all categories except the specified category. This option can be specified multiple times to exclude multiple categories."), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logips", strprintf("Include IP addresses in debug output (default: %u)", DEFAULT_LOGIPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logtimestamps", strprintf("Prepend debug output with timestamp (default: %u)", DEFAULT_LOGTIMESTAMPS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); #ifdef HAVE_THREAD_LOCAL @@ -530,6 +533,7 @@ void SetupServerArgs(NodeContext& node) #else hidden_args.emplace_back("-logthreadnames"); #endif + argsman.AddArg("-logsourcelocations", strprintf("Prepend debug output with name of the originating source location (source file, line number and function name) (default: %u)", DEFAULT_LOGSOURCELOCATIONS), ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-logtimemicros", strprintf("Add microsecond precision to debug timestamps (default: %u)", DEFAULT_LOGTIMEMICROS), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-mocktime=<n>", "Replace actual time with " + UNIX_EPOCH_TIME + " (default: 0)", ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); argsman.AddArg("-maxsigcachesize=<n>", strprintf("Limit sum of signature cache and script execution cache sizes to <n> MiB (default: %u)", DEFAULT_MAX_SIG_CACHE_SIZE), ArgsManager::ALLOW_ANY | ArgsManager::DEBUG_ONLY, OptionsCategory::DEBUG_TEST); @@ -771,6 +775,10 @@ static bool InitSanityCheck() return InitError(Untranslated("OS cryptographic RNG sanity check failure. Aborting.")); } + if (!ChronoSanityCheck()) { + return InitError(Untranslated("Clock epoch mismatch. Aborting.")); + } + return true; } @@ -876,6 +884,7 @@ void InitLogging(const ArgsManager& args) #ifdef HAVE_THREAD_LOCAL LogInstance().m_log_threadnames = args.GetBoolArg("-logthreadnames", DEFAULT_LOGTHREADNAMES); #endif + LogInstance().m_log_sourcelocations = args.GetBoolArg("-logsourcelocations", DEFAULT_LOGSOURCELOCATIONS); fLogIPs = args.GetBoolArg("-logips", DEFAULT_LOGIPS); @@ -1022,9 +1031,6 @@ bool AppInitParameterInteraction(const ArgsManager& args) if (args.GetArg("-prune", 0)) { if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) return InitError(_("Prune mode is incompatible with -txindex.")); - if (!g_enabled_filter_types.empty()) { - return InitError(_("Prune mode is incompatible with -blockfilterindex.")); - } } // -bind and -whitebind can't be set when not listening @@ -1040,16 +1046,17 @@ bool AppInitParameterInteraction(const ArgsManager& args) // Trim requested connection counts, to fit into system limitations // <int> in std::min<int>(...) to work around FreeBSD compilation issue described in #2695 - nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS + nBind); + nFD = RaiseFileDescriptorLimit(nMaxConnections + MIN_CORE_FILEDESCRIPTORS + MAX_ADDNODE_CONNECTIONS + nBind + NUM_FDS_MESSAGE_CAPTURE); + #ifdef USE_POLL int fd_max = nFD; #else int fd_max = FD_SETSIZE; #endif - nMaxConnections = std::max(std::min<int>(nMaxConnections, fd_max - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS), 0); + nMaxConnections = std::max(std::min<int>(nMaxConnections, fd_max - nBind - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS - NUM_FDS_MESSAGE_CAPTURE), 0); if (nFD < MIN_CORE_FILEDESCRIPTORS) return InitError(_("Not enough file descriptors available.")); - nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS, nMaxConnections); + nMaxConnections = std::min(nFD - MIN_CORE_FILEDESCRIPTORS - MAX_ADDNODE_CONNECTIONS - NUM_FDS_MESSAGE_CAPTURE, nMaxConnections); if (nMaxConnections < nUserMaxConnections) InitWarning(strprintf(_("Reducing -maxconnections from %d to %d, because of system limitations."), nUserMaxConnections, nMaxConnections)); diff --git a/src/logging.cpp b/src/logging.cpp index 4ddcf1d930..e82f2c2810 100644 --- a/src/logging.cpp +++ b/src/logging.cpp @@ -5,6 +5,7 @@ #include <logging.h> #include <util/threadnames.h> +#include <util/string.h> #include <util/time.h> #include <mutex> @@ -203,9 +204,9 @@ std::string BCLog::Logger::LogTimestampStr(const std::string& str) strStamped.pop_back(); strStamped += strprintf(".%06dZ", nTimeMicros%1000000); } - int64_t mocktime = GetMockTime(); - if (mocktime) { - strStamped += " (mocktime: " + FormatISO8601DateTime(mocktime) + ")"; + std::chrono::seconds mocktime = GetMockTime(); + if (mocktime > 0s) { + strStamped += " (mocktime: " + FormatISO8601DateTime(count_seconds(mocktime)) + ")"; } strStamped += ' ' + str; } else @@ -236,11 +237,15 @@ namespace BCLog { } } -void BCLog::Logger::LogPrintStr(const std::string& str) +void BCLog::Logger::LogPrintStr(const std::string& str, const std::string& logging_function, const std::string& source_file, const int source_line) { StdLockGuard scoped_lock(m_cs); std::string str_prefixed = LogEscapeMessage(str); + if (m_log_sourcelocations && m_started_new_line) { + str_prefixed.insert(0, "[" + RemovePrefix(source_file, "./") + ":" + ToString(source_line) + "] [" + logging_function + "] "); + } + if (m_log_threadnames && m_started_new_line) { str_prefixed.insert(0, "[" + util::ThreadGetInternalName() + "] "); } diff --git a/src/logging.h b/src/logging.h index 9efecc7c12..4ece8f5e3a 100644 --- a/src/logging.h +++ b/src/logging.h @@ -22,6 +22,7 @@ static const bool DEFAULT_LOGTIMEMICROS = false; static const bool DEFAULT_LOGIPS = false; static const bool DEFAULT_LOGTIMESTAMPS = true; static const bool DEFAULT_LOGTHREADNAMES = false; +static const bool DEFAULT_LOGSOURCELOCATIONS = false; extern const char * const DEFAULT_DEBUGLOGFILE; extern bool fLogIPs; @@ -90,12 +91,13 @@ namespace BCLog { bool m_log_timestamps = DEFAULT_LOGTIMESTAMPS; bool m_log_time_micros = DEFAULT_LOGTIMEMICROS; bool m_log_threadnames = DEFAULT_LOGTHREADNAMES; + bool m_log_sourcelocations = DEFAULT_LOGSOURCELOCATIONS; fs::path m_file_path; std::atomic<bool> m_reopen_file{false}; /** Send a string to the log output */ - void LogPrintStr(const std::string& str); + void LogPrintStr(const std::string& str, const std::string& logging_function, const std::string& source_file, const int source_line); /** Returns whether logs will be written to any output */ bool Enabled() const @@ -163,7 +165,7 @@ bool GetLogCategory(BCLog::LogFlags& flag, const std::string& str); // peer can fill up a user's disk with debug.log entries. template <typename... Args> -static inline void LogPrintf(const char* fmt, const Args&... args) +static inline void LogPrintf_(const std::string& logging_function, const std::string& source_file, const int source_line, const char* fmt, const Args&... args) { if (LogInstance().Enabled()) { std::string log_msg; @@ -173,10 +175,12 @@ static inline void LogPrintf(const char* fmt, const Args&... args) /* Original format string will have newline so don't add one here */ log_msg = "Error \"" + std::string(fmterr.what()) + "\" while formatting log message: " + fmt; } - LogInstance().LogPrintStr(log_msg); + LogInstance().LogPrintStr(log_msg, logging_function, source_file, source_line); } } +#define LogPrintf(...) LogPrintf_(__func__, __FILE__, __LINE__, __VA_ARGS__) + // Use a macro instead of a function for conditional logging to prevent // evaluating arguments when logging for the category is not enabled. #define LogPrint(category, ...) \ diff --git a/src/net.cpp b/src/net.cpp index 76bf7effa4..533815b755 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -20,6 +20,7 @@ #include <protocol.h> #include <random.h> #include <scheduler.h> +#include <util/sock.h> #include <util/strencodings.h> #include <util/translation.h> @@ -200,31 +201,29 @@ bool IsPeerAddrLocalGood(CNode *pnode) IsReachable(addrLocal.GetNetwork()); } -// pushes our own address to a peer -void AdvertiseLocal(CNode *pnode) +Optional<CAddress> GetLocalAddrForPeer(CNode *pnode) { - if (fListen && pnode->fSuccessfullyConnected) + CAddress addrLocal = GetLocalAddress(&pnode->addr, pnode->GetLocalServices()); + if (gArgs.GetBoolArg("-addrmantest", false)) { + // use IPv4 loopback during addrmantest + addrLocal = CAddress(CService(LookupNumeric("127.0.0.1", GetListenPort())), pnode->GetLocalServices()); + } + // If discovery is enabled, sometimes give our peer the address it + // tells us that it sees us as in case it has a better idea of our + // address than we do. + FastRandomContext rng; + if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() || + rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) { - CAddress addrLocal = GetLocalAddress(&pnode->addr, pnode->GetLocalServices()); - if (gArgs.GetBoolArg("-addrmantest", false)) { - // use IPv4 loopback during addrmantest - addrLocal = CAddress(CService(LookupNumeric("127.0.0.1", GetListenPort())), pnode->GetLocalServices()); - } - // If discovery is enabled, sometimes give our peer the address it - // tells us that it sees us as in case it has a better idea of our - // address than we do. - FastRandomContext rng; - if (IsPeerAddrLocalGood(pnode) && (!addrLocal.IsRoutable() || - rng.randbits((GetnScore(addrLocal) > LOCAL_MANUAL) ? 3 : 1) == 0)) - { - addrLocal.SetIP(pnode->GetAddrLocal()); - } - if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false)) - { - LogPrint(BCLog::NET, "AdvertiseLocal: advertising address %s\n", addrLocal.ToString()); - pnode->PushAddress(addrLocal, rng); - } + addrLocal.SetIP(pnode->GetAddrLocal()); + } + if (addrLocal.IsRoutable() || gArgs.GetBoolArg("-addrmantest", false)) + { + LogPrint(BCLog::NET, "Advertising address %s to peer=%d\n", addrLocal.ToString(), pnode->GetId()); + return addrLocal; } + // Address is unroutable. Don't advertise. + return nullopt; } // learn a new local address @@ -429,24 +428,26 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo // Connect bool connected = false; - SOCKET hSocket = INVALID_SOCKET; + std::unique_ptr<Sock> sock; proxyType proxy; if (addrConnect.IsValid()) { bool proxyConnectionFailed = false; if (GetProxy(addrConnect.GetNetwork(), proxy)) { - hSocket = CreateSocket(proxy.proxy); - if (hSocket == INVALID_SOCKET) { + sock = CreateSock(proxy.proxy); + if (!sock) { return nullptr; } - connected = ConnectThroughProxy(proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), hSocket, nConnectTimeout, proxyConnectionFailed); + connected = ConnectThroughProxy(proxy, addrConnect.ToStringIP(), addrConnect.GetPort(), + *sock, nConnectTimeout, proxyConnectionFailed); } else { // no proxy needed (none set for target network) - hSocket = CreateSocket(addrConnect); - if (hSocket == INVALID_SOCKET) { + sock = CreateSock(addrConnect); + if (!sock) { return nullptr; } - connected = ConnectSocketDirectly(addrConnect, hSocket, nConnectTimeout, conn_type == ConnectionType::MANUAL); + connected = ConnectSocketDirectly(addrConnect, sock->Get(), nConnectTimeout, + conn_type == ConnectionType::MANUAL); } if (!proxyConnectionFailed) { // If a connection to the node was attempted, and failure (if any) is not caused by a problem connecting to @@ -454,26 +455,26 @@ CNode* CConnman::ConnectNode(CAddress addrConnect, const char *pszDest, bool fCo addrman.Attempt(addrConnect, fCountFailure); } } else if (pszDest && GetNameProxy(proxy)) { - hSocket = CreateSocket(proxy.proxy); - if (hSocket == INVALID_SOCKET) { + sock = CreateSock(proxy.proxy); + if (!sock) { return nullptr; } std::string host; int port = default_port; SplitHostPort(std::string(pszDest), port, host); bool proxyConnectionFailed; - connected = ConnectThroughProxy(proxy, host, port, hSocket, nConnectTimeout, proxyConnectionFailed); + connected = ConnectThroughProxy(proxy, host, port, *sock, nConnectTimeout, + proxyConnectionFailed); } if (!connected) { - CloseSocket(hSocket); return nullptr; } // Add node NodeId id = GetNewNodeId(); uint64_t nonce = GetDeterministicRandomizer(RANDOMIZER_ID_LOCALHOSTNONCE).Write(id).Finalize(); - CAddress addr_bind = GetBindAddress(hSocket); - CNode* pnode = new CNode(id, nLocalServices, hSocket, addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type); + CAddress addr_bind = GetBindAddress(sock->Get()); + CNode* pnode = new CNode(id, nLocalServices, sock->Release(), addrConnect, CalculateKeyedNetGroup(addrConnect), nonce, addr_bind, pszDest ? pszDest : "", conn_type, /* inbound_onion */ false); pnode->AddRef(); // We're making a new connection, harvest entropy from the time (and our peer count) @@ -598,21 +599,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap) stats.minFeeFilter = 0; } - // It is common for nodes with good ping times to suddenly become lagged, - // due to a new block arriving or other large transfer. - // Merely reporting pingtime might fool the caller into thinking the node was still responsive, - // since pingtime does not update until the ping is complete, which might take a while. - // So, if a ping is taking an unusually long time in flight, - // the caller can immediately detect that this is happening. - std::chrono::microseconds ping_wait{0}; - if ((0 != nPingNonceSent) && (0 != m_ping_start.load().count())) { - ping_wait = GetTime<std::chrono::microseconds>() - m_ping_start.load(); - } - - // Raw ping time is in microseconds, but show it to user as whole seconds (Bitcoin users should be well used to small numbers with many decimal places by now :) - stats.m_ping_usec = nPingUsecTime; - stats.m_min_ping_usec = nMinPingUsecTime; - stats.m_ping_wait_usec = count_microseconds(ping_wait); + stats.m_ping_usec = m_last_ping_time; + stats.m_min_ping_usec = m_min_ping_time; // Leave string empty if addrLocal invalid (not filled in yet) CService addrLocalUnlocked = GetAddrLocal(); @@ -834,7 +822,7 @@ size_t CConnman::SocketSendData(CNode& node) const static bool ReverseCompareNodeMinPingTime(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { - return a.nMinPingUsecTime > b.nMinPingUsecTime; + return a.m_min_ping_time > b.m_min_ping_time; } static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) @@ -989,7 +977,7 @@ bool CConnman::AttemptToEvictConnection() peer_relay_txes = node->m_tx_relay->fRelayTxes; peer_filter_not_null = node->m_tx_relay->pfilter != nullptr; } - NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->nMinPingUsecTime, + NodeEvictionCandidate candidate = {node->GetId(), node->nTimeConnected, node->m_min_ping_time, node->nLastBlockTime, node->nLastTXTime, HasAllDesirableServiceFlags(node->nServices), peer_relay_txes, peer_filter_not_null, node->nKeyedNetGroup, @@ -1218,18 +1206,17 @@ void CConnman::NotifyNumConnectionsChanged() } } +bool CConnman::RunInactivityChecks(const CNode& node) const +{ + return GetSystemTimeInSeconds() > node.nTimeConnected + m_peer_connect_timeout; +} + bool CConnman::InactivityCheck(const CNode& node) const { // Use non-mockable system time (otherwise these timers will pop when we // use setmocktime in the tests). int64_t now = GetSystemTimeInSeconds(); - if (now <= node.nTimeConnected + m_peer_connect_timeout) { - // Only run inactivity checks if the peer has been connected longer - // than m_peer_connect_timeout. - return false; - } - if (node.nLastRecv == 0 || node.nLastSend == 0) { LogPrint(BCLog::NET, "socket no message in first %i seconds, %d %d peer=%d\n", m_peer_connect_timeout, node.nLastRecv != 0, node.nLastSend != 0, node.GetId()); return true; @@ -1245,14 +1232,6 @@ bool CConnman::InactivityCheck(const CNode& node) const return true; } - if (node.nPingNonceSent && node.m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL} < GetTime<std::chrono::microseconds>()) { - // We use mockable time for ping timeouts. This means that setmocktime - // may cause pings to time out for peers that have been connected for - // longer than m_peer_connect_timeout. - LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(GetTime<std::chrono::microseconds>() - node.m_ping_start.load()), node.GetId()); - return true; - } - if (!node.fSuccessfullyConnected) { LogPrint(BCLog::NET, "version handshake timeout peer=%d\n", node.GetId()); return true; @@ -1534,7 +1513,7 @@ void CConnman::SocketHandler() if (bytes_sent) RecordBytesSent(bytes_sent); } - if (InactivityCheck(*pnode)) pnode->fDisconnect = true; + if (RunInactivityChecks(*pnode) && InactivityCheck(*pnode)) pnode->fDisconnect = true; } { LOCK(cs_vNodes); @@ -1769,11 +1748,19 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) } // Initiate network connections - int64_t nStart = GetTime(); + auto start = GetTime<std::chrono::seconds>(); // Minimum time before next feeler connection (in microseconds). - int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL); - int64_t nNextExtraBlockRelay = PoissonNextSend(nStart*1000*1000, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + + int64_t nNextFeeler = PoissonNextSend(count_microseconds(start), FEELER_INTERVAL); + int64_t nNextExtraBlockRelay = PoissonNextSend(count_microseconds(start), EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED); + bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS); + + if (!add_fixed_seeds) { + LogPrintf("Fixed seeds are disabled\n"); + } + while (!interruptNet) { ProcessAddrFetch(); @@ -1785,18 +1772,32 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) if (interruptNet) return; - // Add seed nodes if DNS seeds are all down (an infrastructure attack?). - // Note that we only do this if we started with an empty peers.dat, - // (in which case we will query DNS seeds immediately) *and* the DNS - // seeds have not returned any results. - if (addrman.size() == 0 && (GetTime() - nStart > 60)) { - static bool done = false; - if (!done) { - LogPrintf("Adding fixed seed nodes as DNS doesn't seem to be available.\n"); + if (add_fixed_seeds && addrman.size() == 0) { + // When the node starts with an empty peers.dat, there are a few other sources of peers before + // we fallback on to fixed seeds: -dnsseed, -seednode, -addnode + // If none of those are available, we fallback on to fixed seeds immediately, else we allow + // 60 seconds for any of those sources to populate addrman. + bool add_fixed_seeds_now = false; + // It is cheapest to check if enough time has passed first. + if (GetTime<std::chrono::seconds>() > start + std::chrono::minutes{1}) { + add_fixed_seeds_now = true; + LogPrintf("Adding fixed seeds as 60 seconds have passed and addrman is empty\n"); + } + + // Checking !dnsseed is cheaper before locking 2 mutexes. + if (!add_fixed_seeds_now && !dnsseed) { + LOCK2(m_addr_fetches_mutex, cs_vAddedNodes); + if (m_addr_fetches.empty() && vAddedNodes.empty()) { + add_fixed_seeds_now = true; + LogPrintf("Adding fixed seeds as -dnsseed=0, -addnode is not provided and all -seednode(s) attempted\n"); + } + } + + if (add_fixed_seeds_now) { CNetAddr local; local.SetInternal("fixedseeds"); addrman.Add(convertSeed6(Params().FixedSeeds()), local); - done = true; + add_fixed_seeds = false; } } @@ -2188,9 +2189,8 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, return false; } - SOCKET hListenSocket = CreateSocket(addrBind); - if (hListenSocket == INVALID_SOCKET) - { + std::unique_ptr<Sock> sock = CreateSock(addrBind); + if (!sock) { strError = strprintf(Untranslated("Error: Couldn't open socket for incoming connections (socket returned error %s)"), NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError.original); return false; @@ -2198,21 +2198,21 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, // Allow binding if the port is still in TIME_WAIT state after // the program was closed and restarted. - setsockopt(hListenSocket, SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne, sizeof(int)); + setsockopt(sock->Get(), SOL_SOCKET, SO_REUSEADDR, (sockopt_arg_type)&nOne, sizeof(int)); // some systems don't have IPV6_V6ONLY but are always v6only; others do have the option // and enable it by default or not. Try to enable it, if possible. if (addrBind.IsIPv6()) { #ifdef IPV6_V6ONLY - setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_V6ONLY, (sockopt_arg_type)&nOne, sizeof(int)); + setsockopt(sock->Get(), IPPROTO_IPV6, IPV6_V6ONLY, (sockopt_arg_type)&nOne, sizeof(int)); #endif #ifdef WIN32 int nProtLevel = PROTECTION_LEVEL_UNRESTRICTED; - setsockopt(hListenSocket, IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char*)&nProtLevel, sizeof(int)); + setsockopt(sock->Get(), IPPROTO_IPV6, IPV6_PROTECTION_LEVEL, (const char*)&nProtLevel, sizeof(int)); #endif } - if (::bind(hListenSocket, (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) + if (::bind(sock->Get(), (struct sockaddr*)&sockaddr, len) == SOCKET_ERROR) { int nErr = WSAGetLastError(); if (nErr == WSAEADDRINUSE) @@ -2220,21 +2220,19 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError, else strError = strprintf(_("Unable to bind to %s on this computer (bind returned error %s)"), addrBind.ToString(), NetworkErrorString(nErr)); LogPrintf("%s\n", strError.original); - CloseSocket(hListenSocket); return false; } LogPrintf("Bound to %s\n", addrBind.ToString()); // Listen for incoming connections - if (listen(hListenSocket, SOMAXCONN) == SOCKET_ERROR) + if (listen(sock->Get(), SOMAXCONN) == SOCKET_ERROR) { strError = strprintf(_("Error: Listening for incoming connections failed (listen returned error %s)"), NetworkErrorString(WSAGetLastError())); LogPrintf("%s\n", strError.original); - CloseSocket(hListenSocket); return false; } - vhListenSocket.push_back(ListenSocket(hListenSocket, permissions)); + vhListenSocket.push_back(ListenSocket(sock->Release(), permissions)); return true; } @@ -2434,7 +2432,7 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions) // Send and receive from sockets, accept connections threadSocketHandler = std::thread(&TraceThread<std::function<void()> >, "net", std::function<void()>(std::bind(&CConnman::ThreadSocketHandler, this))); - if (!gArgs.GetBoolArg("-dnsseed", true)) + if (!gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED)) LogPrintf("DNS seeding disabled\n"); else threadDNSAddressSeed = std::thread(&TraceThread<std::function<void()> >, "dnsseed", std::function<void()>(std::bind(&CConnman::ThreadDNSAddressSeed, this))); @@ -2833,12 +2831,12 @@ CNode::CNode(NodeId idIn, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const : nTimeConnected(GetSystemTimeInSeconds()), addr(addrIn), addrBind(addrBindIn), + m_inbound_onion(inbound_onion), nKeyedNetGroup(nKeyedNetGroupIn), id(idIn), nLocalHostNonce(nLocalHostNonceIn), m_conn_type(conn_type_in), - nLocalServices(nLocalServicesIn), - m_inbound_onion(inbound_onion) + nLocalServices(nLocalServicesIn) { if (inbound_onion) assert(conn_type_in == ConnectionType::INBOUND); hSocket = hSocketIn; @@ -2879,6 +2877,9 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) { size_t nMessageSize = msg.data.size(); LogPrint(BCLog::NET, "sending %s (%d bytes) peer=%d\n", SanitizeString(msg.m_type), nMessageSize, pnode->GetId()); + if (gArgs.GetBoolArg("-capturemessages", false)) { + CaptureMessage(pnode->addr, msg.m_type, msg.data, /* incoming */ false); + } // make sure we use the appropriate network transport format std::vector<unsigned char> serializedHeader; @@ -2894,18 +2895,14 @@ void CConnman::PushMessage(CNode* pnode, CSerializedNetMsg&& msg) pnode->mapSendBytesPerMsgCmd[msg.m_type] += nTotalSize; pnode->nSendSize += nTotalSize; - if (pnode->nSendSize > nSendBufferMaxSize) - pnode->fPauseSend = true; + if (pnode->nSendSize > nSendBufferMaxSize) pnode->fPauseSend = true; pnode->vSendMsg.push_back(std::move(serializedHeader)); - if (nMessageSize) - pnode->vSendMsg.push_back(std::move(msg.data)); + if (nMessageSize) pnode->vSendMsg.push_back(std::move(msg.data)); // If write queue empty, attempt "optimistic write" - if (optimisticSend == true) - nBytesSent = SocketSendData(*pnode); + if (optimisticSend) nBytesSent = SocketSendData(*pnode); } - if (nBytesSent) - RecordBytesSent(nBytesSent); + if (nBytesSent) RecordBytesSent(nBytesSent); } bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func) @@ -2948,3 +2945,31 @@ uint64_t CConnman::CalculateKeyedNetGroup(const CAddress& ad) const return GetDeterministicRandomizer(RANDOMIZER_ID_NETGROUP).Write(vchNetGroup.data(), vchNetGroup.size()).Finalize(); } + +void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming) +{ + // Note: This function captures the message at the time of processing, + // not at socket receive/send time. + // This ensures that the messages are always in order from an application + // layer (processing) perspective. + auto now = GetTime<std::chrono::microseconds>(); + + // Windows folder names can not include a colon + std::string clean_addr = addr.ToString(); + std::replace(clean_addr.begin(), clean_addr.end(), ':', '_'); + + fs::path base_path = GetDataDir() / "message_capture" / clean_addr; + fs::create_directories(base_path); + + fs::path path = base_path / (is_incoming ? "msgs_recv.dat" : "msgs_sent.dat"); + CAutoFile f(fsbridge::fopen(path, "ab"), SER_DISK, CLIENT_VERSION); + + ser_writedata64(f, now.count()); + f.write(msg_type.data(), msg_type.length()); + for (auto i = msg_type.length(); i < CMessageHeader::COMMAND_SIZE; ++i) { + f << '\0'; + } + uint32_t size = data.size(); + ser_writedata32(f, size); + f.write((const char*)data.data(), data.size()); +} @@ -20,6 +20,7 @@ #include <policy/feerate.h> #include <protocol.h> #include <random.h> +#include <span.h> #include <streams.h> #include <sync.h> #include <threadinterrupt.h> @@ -75,8 +76,12 @@ static constexpr uint64_t DEFAULT_MAX_UPLOAD_TARGET = 0; static const bool DEFAULT_BLOCKSONLY = false; /** -peertimeout default */ static const int64_t DEFAULT_PEER_CONNECT_TIMEOUT = 60; +/** Number of file descriptors required for message capture **/ +static const int NUM_FDS_MESSAGE_CAPTURE = 1; static const bool DEFAULT_FORCEDNSSEED = false; +static const bool DEFAULT_DNSSEED = true; +static const bool DEFAULT_FIXEDSEEDS = true; static const size_t DEFAULT_MAXRECEIVEBUFFER = 5 * 1000; static const size_t DEFAULT_MAXSENDBUFFER = 1 * 1000; @@ -192,7 +197,8 @@ enum }; bool IsPeerAddrLocalGood(CNode *pnode); -void AdvertiseLocal(CNode *pnode); +/** Returns a local address that we should advertise to this peer */ +Optional<CAddress> GetLocalAddrForPeer(CNode *pnode); /** * Mark a network as reachable or unreachable (no automatic connects to it) @@ -255,7 +261,6 @@ public: mapMsgCmdSize mapRecvBytesPerMsgCmd; NetPermissionFlags m_permissionFlags; int64_t m_ping_usec; - int64_t m_ping_wait_usec; int64_t m_min_ping_usec; CAmount minFeeFilter; // Our address, as reported by the peer @@ -424,6 +429,8 @@ public: const CAddress addr; // Bind address of our side of the connection const CAddress addrBind; + //! Whether this peer is an inbound onion, i.e. connected via our Tor onion service. + const bool m_inbound_onion; std::atomic<int> nVersion{0}; RecursiveMutex cs_SubVer; /** @@ -442,6 +449,7 @@ public: * messages, implying a preference to receive ADDRv2 instead of ADDR ones. */ std::atomic_bool m_wants_addrv2{false}; + /** fSuccessfullyConnected is set to true on receiving VERACK from the peer. */ std::atomic_bool fSuccessfullyConnected{false}; // Setting fDisconnect to true will cause the node to be disconnected the // next time DisconnectNodes() runs @@ -583,19 +591,14 @@ public: * in CConnman::AttemptToEvictConnection. */ std::atomic<int64_t> nLastTXTime{0}; - // Ping time measurement: - // The pong reply we're expecting, or 0 if no pong expected. - std::atomic<uint64_t> nPingNonceSent{0}; - /** When the last ping was sent, or 0 if no ping was ever sent */ - std::atomic<std::chrono::microseconds> m_ping_start{0us}; - // Last measured round-trip time. - std::atomic<int64_t> nPingUsecTime{0}; - // Best measured round-trip time. - std::atomic<int64_t> nMinPingUsecTime{std::numeric_limits<int64_t>::max()}; - // Whether a ping is requested. - std::atomic<bool> fPingQueued{false}; - - CNode(NodeId id, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion = false); + /** Last measured round-trip time. Used only for RPC/GUI stats/debugging.*/ + std::atomic<int64_t> m_last_ping_time{0}; + + /** Lowest measured round-trip time. Used as an inbound peer eviction + * criterium in CConnman::AttemptToEvictConnection. */ + std::atomic<int64_t> m_min_ping_time{std::numeric_limits<int64_t>::max()}; + + CNode(NodeId id, ServiceFlags nLocalServicesIn, SOCKET hSocketIn, const CAddress& addrIn, uint64_t nKeyedNetGroupIn, uint64_t nLocalHostNonceIn, const CAddress& addrBindIn, const std::string& addrNameIn, ConnectionType conn_type_in, bool inbound_onion); ~CNode(); CNode(const CNode&) = delete; CNode& operator=(const CNode&) = delete; @@ -713,8 +716,11 @@ public: std::string ConnectionTypeAsString() const { return ::ConnectionTypeAsString(m_conn_type); } - /** Whether this peer is an inbound onion, e.g. connected via our Tor onion service. */ - bool IsInboundOnion() const { return m_inbound_onion; } + /** A ping-pong round trip has completed successfully. Update latest and minimum ping times. */ + void PongReceived(std::chrono::microseconds ping_time) { + m_last_ping_time = count_microseconds(ping_time); + m_min_ping_time = std::min(m_min_ping_time.load(), count_microseconds(ping_time)); + } private: const NodeId id; @@ -748,9 +754,6 @@ private: CService addrLocal GUARDED_BY(cs_addrLocal); mutable RecursiveMutex cs_addrLocal; - //! Whether this peer is an inbound onion, e.g. connected via our Tor onion service. - const bool m_inbound_onion{false}; - mapMsgCmdSize mapSendBytesPerMsgCmd GUARDED_BY(cs_vSend); mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); }; @@ -1020,6 +1023,9 @@ public: void SetAsmap(std::vector<bool> asmap) { addrman.m_asmap = std::move(asmap); } + /** Return true if the peer has been connected for long enough to do inactivity checks. */ + bool RunInactivityChecks(const CNode& node) const; + private: struct ListenSocket { public: @@ -1241,11 +1247,14 @@ inline std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, return std::chrono::microseconds{PoissonNextSend(now.count(), average_interval.count())}; } +/** Dump binary message to file, with timestamp */ +void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming); + struct NodeEvictionCandidate { NodeId id; int64_t nTimeConnected; - int64_t nMinPingUsecTime; + int64_t m_min_ping_time; int64_t nLastBlockTime; int64_t nLastTXTime; bool fRelevantServices; diff --git a/src/net_processing.cpp b/src/net_processing.cpp index b68453759a..c97f7ced46 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -169,6 +169,14 @@ void EraseOrphansFor(NodeId peer); // Internal stuff namespace { +/** Blocks that are in flight, and that are in the queue to be downloaded. */ +struct QueuedBlock { + uint256 hash; + const CBlockIndex* pindex; //!< Optional. + bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request. + std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads +}; + /** * Data structure for an individual peer. This struct is not protected by * cs_main since it does not contain validation-critical data. @@ -211,6 +219,13 @@ struct Peer { /** This peer's reported block height when we connected */ std::atomic<int> m_starting_height{-1}; + /** The pong reply we're expecting, or 0 if no pong expected. */ + std::atomic<uint64_t> m_ping_nonce_sent{0}; + /** When the last ping was sent, or 0 if no ping was ever sent */ + std::atomic<std::chrono::microseconds> m_ping_start{0us}; + /** Whether a ping has been requested by the user */ + std::atomic<bool> m_ping_queued{false}; + /** Set of txids to reconsider once their parent transactions have been accepted **/ std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans); @@ -248,6 +263,7 @@ public: void CheckForStaleTipAndEvictPeers() override; bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) override; bool IgnoresIncomingTxs() override { return m_ignore_incoming_txs; } + void SendPings() override; void SetBestHeight(int height) override { m_best_height = height; }; void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message) override; void ProcessMessage(CNode& pfrom, const std::string& msg_type, CDataStream& vRecv, @@ -294,9 +310,10 @@ private: /** Maybe disconnect a peer and discourage future connections from its address. * * @param[in] pnode The node to check. + * @param[in] peer The peer object to check. * @return True if the peer was marked for disconnection in this function */ - bool MaybeDiscourageAndDisconnect(CNode& pnode); + bool MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer); void ProcessOrphanTx(std::set<uint256>& orphan_work_set) EXCLUSIVE_LOCKS_REQUIRED(cs_main, g_cs_orphans); /** Process a single headers message from a peer. */ @@ -315,6 +332,10 @@ private: /** Send a version message to a peer */ void PushNodeVersion(CNode& pnode, int64_t nTime); + /** Send a ping message every PING_INTERVAL or if requested via RPC. May + * mark the peer to be disconnected if a ping has timed out. */ + void MaybeSendPing(CNode& node_to, Peer& peer); + const CChainParams& m_chainparams; CConnman& m_connman; /** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */ @@ -345,10 +366,7 @@ private: * their own locks. */ std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); -}; -} // namespace -namespace { /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; @@ -360,6 +378,14 @@ namespace { */ std::map<uint256, std::pair<NodeId, bool>> mapBlockSource GUARDED_BY(cs_main); + /** Number of peers with wtxid relay. */ + int m_wtxid_relay_peers GUARDED_BY(cs_main) = 0; + + /** Number of outbound peers with m_chain_sync.m_protect. */ + int m_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; + + bool AlreadyHaveTx(const GenTxid& gtxid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + /** * Filter for transactions that were recently rejected by * AcceptToMemoryPool. These are not rerequested until the chain tip @@ -402,35 +428,36 @@ namespace { * We use this to avoid requesting transactions that have already been * confirnmed. */ - Mutex g_cs_recent_confirmed_transactions; - std::unique_ptr<CRollingBloomFilter> g_recent_confirmed_transactions GUARDED_BY(g_cs_recent_confirmed_transactions); - - /** Blocks that are in flight, and that are in the queue to be downloaded. */ - struct QueuedBlock { - uint256 hash; - const CBlockIndex* pindex; //!< Optional. - bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request. - std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads - }; - std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); + Mutex m_recent_confirmed_transactions_mutex; + std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex); - /** Stack of nodes which we have set to announce using compact blocks */ - std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); + /* Returns a bool indicating whether we requested this block. + * Also used if a block was /not/ received and timed out or started with another peer + */ + bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Number of preferable block download peers. */ - int nPreferredDownload GUARDED_BY(cs_main) = 0; + /* Mark a block as in flight + * Returns false, still setting pit, if the block was already in flight from the same peer + * pit will only be valid as long as the same cs_main lock is being held + */ + bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Number of peers from which we're downloading blocks. */ - int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Number of peers with wtxid relay. */ - int g_wtxid_relay_peers GUARDED_BY(cs_main) = 0; + /** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has + * at most count entries. + */ + void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) EXCLUSIVE_LOCKS_REQUIRED(cs_main); - /** Number of outbound peers with m_chain_sync.m_protect. */ - int g_outbound_peers_with_protect_from_disconnect GUARDED_BY(cs_main) = 0; + std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> > mapBlocksInFlight GUARDED_BY(cs_main); /** When our tip was last updated. */ - std::atomic<int64_t> g_last_tip_update(0); + std::atomic<int64_t> m_last_tip_update{0}; + + /** Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). */ + CTransactionRef FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main); + + void ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex); /** Relay map (txid or wtxid -> CTransactionRef) */ typedef std::map<uint256, CTransactionRef> MapRelay; @@ -438,6 +465,28 @@ namespace { /** Expiration-time ordered list of (expire time, relay map entry) pairs. */ std::deque<std::pair<int64_t, MapRelay::iterator>> vRelayExpiration GUARDED_BY(cs_main); + /** + * When a peer sends us a valid block, instruct it to announce blocks to us + * using CMPCTBLOCK if possible by adding its nodeid to the end of + * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by + * removing the first element if necessary. + */ + void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Stack of nodes which we have set to announce using compact blocks */ + std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); + + /** Number of peers from which we're downloading blocks. */ + int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + +}; +} // namespace + +namespace { + + /** Number of preferable block download peers. */ + int nPreferredDownload GUARDED_BY(cs_main) = 0; + struct IteratorComparator { template<typename I> @@ -610,9 +659,8 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS nPreferredDownload += state->fPreferredDownload; } -// Returns a bool indicating whether we requested this block. -// Also used if a block was /not/ received and timed out or started with another peer -static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { +bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash) +{ std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); if (itInFlight != mapBlocksInFlight.end()) { CNodeState *state = State(itInFlight->second.first); @@ -635,9 +683,8 @@ static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs return false; } -// returns false, still setting pit, if the block was already in flight from the same peer -// pit will only be valid as long as the same cs_main lock is being held -static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { +bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex, std::list<QueuedBlock>::iterator** pit) +{ CNodeState *state = State(nodeid); assert(state != nullptr); @@ -654,7 +701,7 @@ static bool MarkBlockAsInFlight(CTxMemPool& mempool, NodeId nodeid, const uint25 MarkBlockAsReceived(hash); std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), - {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&mempool) : nullptr)}); + {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); state->nBlocksInFlight++; state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; if (state->nBlocksInFlight == 1) { @@ -705,13 +752,7 @@ static void UpdateBlockAvailability(NodeId nodeid, const uint256 &hash) EXCLUSIV } } -/** - * When a peer sends us a valid block, instruct it to announce blocks to us - * using CMPCTBLOCK if possible by adding its nodeid to the end of - * lNodesAnnouncingHeaderAndIDs, and keeping that list under a certain size by - * removing the first element if necessary. - */ -static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) { AssertLockHeld(cs_main); CNodeState* nodestate = State(nodeid); @@ -727,21 +768,21 @@ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connma return; } } - connman.ForNode(nodeid, [&connman](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { + m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); uint64_t nCMPCTBLOCKVersion = (pfrom->GetLocalServices() & NODE_WITNESS) ? 2 : 1; if (lNodesAnnouncingHeaderAndIDs.size() >= 3) { // As per BIP152, we only get 3 of our peers to announce // blocks using compact encodings. - connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode* pnodeStop){ - connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); + m_connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [this, nCMPCTBLOCKVersion](CNode* pnodeStop){ + m_connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion)); // save BIP152 bandwidth state: we select peer to be low-bandwidth pnodeStop->m_bip152_highbandwidth_to = false; return true; }); lNodesAnnouncingHeaderAndIDs.pop_front(); } - connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); + m_connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion)); // save BIP152 bandwidth state: we select peer to be high-bandwidth pfrom->m_bip152_highbandwidth_to = true; lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId()); @@ -750,13 +791,14 @@ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connma } } -static bool TipMayBeStale(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +bool PeerManagerImpl::TipMayBeStale() { AssertLockHeld(cs_main); - if (g_last_tip_update == 0) { - g_last_tip_update = GetTime(); + const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); + if (m_last_tip_update == 0) { + m_last_tip_update = GetTime(); } - return g_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty(); + return m_last_tip_update < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty(); } static bool CanDirectFetch(const Consensus::Params &consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -773,9 +815,7 @@ static bool PeerHasHeader(CNodeState *state, const CBlockIndex *pindex) EXCLUSIV return false; } -/** Update pindexLastCommonBlock and add not-in-flight missing successors to vBlocks, until it has - * at most count entries. */ -static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller, const Consensus::Params& consensusParams) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vector<const CBlockIndex*>& vBlocks, NodeId& nodeStaller) { if (count == 0) return; @@ -804,6 +844,7 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec if (state->pindexLastCommonBlock == state->pindexBestKnownBlock) return; + const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); std::vector<const CBlockIndex*> vToFetch; const CBlockIndex *pindexWalk = state->pindexLastCommonBlock; // Never fetch further than the best block we know the peer has, or more than BLOCK_DOWNLOAD_WINDOW + 1 beyond the last @@ -911,7 +952,7 @@ void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, auto delay = std::chrono::microseconds{0}; const bool preferred = state->fPreferredDownload; if (!preferred) delay += NONPREF_PEER_TX_DELAY; - if (!gtxid.IsWtxid() && g_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY; + if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY; const bool overloaded = !node.HasPermission(PF_RELAY) && m_txrequest.CountInFlight(nodeid) >= MAX_PEER_TX_REQUEST_IN_FLIGHT; if (overloaded) delay += OVERLOADED_PEER_TX_DELAY; @@ -1004,10 +1045,10 @@ void PeerManagerImpl::FinalizeNode(const CNode& node, bool& fUpdateConnectionTim nPreferredDownload -= state->fPreferredDownload; nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); assert(nPeersWithValidatedDownloads >= 0); - g_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; - assert(g_outbound_peers_with_protect_from_disconnect >= 0); - g_wtxid_relay_peers -= state->m_wtxid_relay; - assert(g_wtxid_relay_peers >= 0); + m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; + assert(m_outbound_peers_with_protect_from_disconnect >= 0); + m_wtxid_relay_peers -= state->m_wtxid_relay; + assert(m_wtxid_relay_peers >= 0); mapNodeState.erase(nodeid); @@ -1016,8 +1057,8 @@ void PeerManagerImpl::FinalizeNode(const CNode& node, bool& fUpdateConnectionTim assert(mapBlocksInFlight.empty()); assert(nPreferredDownload == 0); assert(nPeersWithValidatedDownloads == 0); - assert(g_outbound_peers_with_protect_from_disconnect == 0); - assert(g_wtxid_relay_peers == 0); + assert(m_outbound_peers_with_protect_from_disconnect == 0); + assert(m_wtxid_relay_peers == 0); assert(m_txrequest.Size() == 0); } LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid); @@ -1060,6 +1101,18 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) PeerRef peer = GetPeerRef(nodeid); if (peer == nullptr) return false; stats.m_starting_height = peer->m_starting_height; + // It is common for nodes with good ping times to suddenly become lagged, + // due to a new block arriving or other large transfer. + // Merely reporting pingtime might fool the caller into thinking the node was still responsive, + // since pingtime does not update until the ping is complete, which might take a while. + // So, if a ping is taking an unusually long time in flight, + // the caller can immediately detect that this is happening. + std::chrono::microseconds ping_wait{0}; + if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) { + ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load(); + } + + stats.m_ping_wait_usec = count_microseconds(ping_wait); return true; } @@ -1344,7 +1397,7 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn // The false positive rate of 1/1M should come out to less than 1 // transaction per day that would be inadvertently ignored (which is the // same probability that we have in the reject filter). - g_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001)); + m_recent_confirmed_transactions.reset(new CRollingBloomFilter(48000, 0.000001)); // Stale tip checking and peer eviction are on two different timers, but we // don't want them to get out of sync due to drift in the scheduler, so we @@ -1394,14 +1447,14 @@ void PeerManagerImpl::BlockConnected(const std::shared_ptr<const CBlock>& pblock LogPrint(BCLog::MEMPOOL, "Erased %d orphan tx included or conflicted by block\n", nErased); } - g_last_tip_update = GetTime(); + m_last_tip_update = GetTime(); } { - LOCK(g_cs_recent_confirmed_transactions); + LOCK(m_recent_confirmed_transactions_mutex); for (const auto& ptx : pblock->vtx) { - g_recent_confirmed_transactions->insert(ptx->GetHash()); + m_recent_confirmed_transactions->insert(ptx->GetHash()); if (ptx->GetHash() != ptx->GetWitnessHash()) { - g_recent_confirmed_transactions->insert(ptx->GetWitnessHash()); + m_recent_confirmed_transactions->insert(ptx->GetWitnessHash()); } } } @@ -1424,8 +1477,8 @@ void PeerManagerImpl::BlockDisconnected(const std::shared_ptr<const CBlock> &blo // block's worth of transactions in it, but that should be fine, since // presumably the most common case of relaying a confirmed transaction // should be just after a new block containing it is found. - LOCK(g_cs_recent_confirmed_transactions); - g_recent_confirmed_transactions->reset(); + LOCK(m_recent_confirmed_transactions_mutex); + m_recent_confirmed_transactions->reset(); } // All of the following cache a recent block, and are protected by cs_most_recent_block @@ -1550,7 +1603,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta !::ChainstateActive().IsInitialBlockDownload() && mapBlocksInFlight.count(hash) == mapBlocksInFlight.size()) { if (it != mapBlockSource.end()) { - MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first, m_connman); + MaybeSetPeerAsAnnouncingHeaderAndIDs(it->second.first); } } if (it != mapBlockSource.end()) @@ -1563,7 +1616,7 @@ void PeerManagerImpl::BlockChecked(const CBlock& block, const BlockValidationSta // -bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +bool PeerManagerImpl::AlreadyHaveTx(const GenTxid& gtxid) { assert(recentRejects); if (::ChainActive().Tip()->GetBlockHash() != hashRecentRejectsChainTip) { @@ -1587,11 +1640,11 @@ bool static AlreadyHaveTx(const GenTxid& gtxid, const CTxMemPool& mempool) EXCLU } { - LOCK(g_cs_recent_confirmed_transactions); - if (g_recent_confirmed_transactions->contains(hash)) return true; + LOCK(m_recent_confirmed_transactions_mutex); + if (m_recent_confirmed_transactions->contains(hash)) return true; } - return recentRejects->contains(hash) || mempool.exists(gtxid); + return recentRejects->contains(hash) || m_mempool.exists(gtxid); } bool static AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) @@ -1599,6 +1652,12 @@ bool static AlreadyHaveBlock(const uint256& block_hash) EXCLUSIVE_LOCKS_REQUIRED return g_chainman.m_blockman.LookupBlockIndex(block_hash) != nullptr; } +void PeerManagerImpl::SendPings() +{ + LOCK(m_peer_mutex); + for(auto& it : m_peer_map) it.second->m_ping_queued = true; +} + void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman) { connman.ForEachNode([&txid, &wtxid](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { @@ -1825,10 +1884,9 @@ void static ProcessGetBlockData(CNode& pfrom, Peer& peer, const CChainParams& ch } } -//! Determine whether or not a peer can request a transaction, and return it (or nullptr if not found or not allowed). -static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) LOCKS_EXCLUDED(cs_main) +CTransactionRef PeerManagerImpl::FindTxForGetData(const CNode& peer, const GenTxid& gtxid, const std::chrono::seconds mempool_req, const std::chrono::seconds now) { - auto txinfo = mempool.info(gtxid); + auto txinfo = m_mempool.info(gtxid); if (txinfo.tx) { // If a TX could have been INVed in reply to a MEMPOOL request, // or is older than UNCONDITIONAL_RELAY_DELAY, permit the request @@ -1853,7 +1911,7 @@ static CTransactionRef FindTxForGetData(const CTxMemPool& mempool, const CNode& return {}; } -void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainparams, CConnman& connman, CTxMemPool& mempool, const std::atomic<bool>& interruptMsgProc) EXCLUSIVE_LOCKS_REQUIRED(!cs_main, peer.m_getdata_requests_mutex) +void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic<bool>& interruptMsgProc) { AssertLockNotHeld(cs_main); @@ -1882,17 +1940,17 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa continue; } - CTransactionRef tx = FindTxForGetData(mempool, pfrom, ToGenTxid(inv), mempool_req, now); + CTransactionRef tx = FindTxForGetData(pfrom, ToGenTxid(inv), mempool_req, now); if (tx) { // WTX and WITNESS_TX imply we serialize with witness int nSendFlags = (inv.IsMsgTx() ? SERIALIZE_TRANSACTION_NO_WITNESS : 0); - connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); - mempool.RemoveUnbroadcastTx(tx->GetHash()); + m_connman.PushMessage(&pfrom, msgMaker.Make(nSendFlags, NetMsgType::TX, *tx)); + m_mempool.RemoveUnbroadcastTx(tx->GetHash()); // As we're going to send tx, make sure its unconfirmed parents are made requestable. std::vector<uint256> parent_ids_to_add; { - LOCK(mempool.cs); - auto txiter = mempool.GetIter(tx->GetHash()); + LOCK(m_mempool.cs); + auto txiter = m_mempool.GetIter(tx->GetHash()); if (txiter) { const CTxMemPoolEntry::Parents& parents = (*txiter)->GetMemPoolParentsConst(); parent_ids_to_add.reserve(parents.size()); @@ -1920,7 +1978,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa if (it != peer.m_getdata_requests.end() && !pfrom.fPauseSend) { const CInv &inv = *it++; if (inv.IsGenBlkMsg()) { - ProcessGetBlockData(pfrom, peer, chainparams, inv, connman); + ProcessGetBlockData(pfrom, peer, m_chainparams, inv, m_connman); } // else: If the first item on the queue is an unknown type, we erase it // and continue processing the queue on the next call. @@ -1943,7 +2001,7 @@ void static ProcessGetData(CNode& pfrom, Peer& peer, const CChainParams& chainpa // In normal operation, we often send NOTFOUND messages for parents of // transactions that we relay; if a peer is missing a parent, they may // assume we have them and request the parents from us. - connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); + m_connman.PushMessage(&pfrom, msgMaker.Make(NetMsgType::NOTFOUND, vNotFound)); } } @@ -2102,7 +2160,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, } uint32_t nFetchFlags = GetFetchFlags(pfrom); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex); + MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex); LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", pindex->GetBlockHash().ToString(), pfrom.GetId()); } @@ -2146,10 +2204,10 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, // thus always subject to eviction under the bad/lagging chain logic. // See ChainSyncTimeoutState. if (!pfrom.fDisconnect && pfrom.IsFullOutboundConn() && nodestate->pindexBestKnownBlock != nullptr) { - if (g_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { + if (m_outbound_peers_with_protect_from_disconnect < MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT && nodestate->pindexBestKnownBlock->nChainWork >= ::ChainActive().Tip()->nChainWork && !nodestate->m_chain_sync.m_protect) { LogPrint(BCLog::NET, "Protecting outbound peer=%d from eviction\n", pfrom.GetId()); nodestate->m_chain_sync.m_protect = true; - ++g_outbound_peers_with_protect_from_disconnect; + ++m_outbound_peers_with_protect_from_disconnect; } } } @@ -2178,10 +2236,10 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set) if (orphan_it == mapOrphanTransactions.end()) continue; const CTransactionRef porphanTx = orphan_it->second.tx; - TxValidationState state; - std::list<CTransactionRef> removed_txn; + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), m_mempool, porphanTx, false /* bypass_limits */); + const TxValidationState& state = result.m_state; - if (AcceptToMemoryPool(m_mempool, state, porphanTx, &removed_txn, false /* bypass_limits */)) { + if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { LogPrint(BCLog::MEMPOOL, " accepted orphan tx %s\n", orphanHash.ToString()); RelayTransaction(orphanHash, porphanTx->GetWitnessHash(), m_connman); for (unsigned int i = 0; i < porphanTx->vout.size(); i++) { @@ -2193,7 +2251,7 @@ void PeerManagerImpl::ProcessOrphanTx(std::set<uint256>& orphan_work_set) } } EraseOrphanTx(orphanHash); - for (const CTransactionRef& removedTx : removed_txn) { + for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) { AddToCompactExtraTransactions(removedTx); } break; @@ -2489,6 +2547,9 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, bool fRelay = true; vRecv >> nVersion >> nServiceInt >> nTime >> addrMe; + if (nTime < 0) { + nTime = 0; + } nServices = ServiceFlags(nServiceInt); if (!pfrom.IsInboundConn()) { @@ -2745,12 +2806,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - // Feature negotiation of wtxidrelay must happen between VERSION and VERACK - // to avoid relay problems from switching after a connection is up. + // BIP339 defines feature negotiation of wtxidrelay, which must happen between + // VERSION and VERACK to avoid relay problems from switching after a connection is up. if (msg_type == NetMsgType::WTXIDRELAY) { if (pfrom.fSuccessfullyConnected) { - // Disconnect peers that send wtxidrelay message after VERACK; this - // must be negotiated between VERSION and VERACK. + // Disconnect peers that send a wtxidrelay message after VERACK. LogPrint(BCLog::NET, "wtxidrelay received after verack from peer=%d; disconnecting\n", pfrom.GetId()); pfrom.fDisconnect = true; return; @@ -2759,7 +2819,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LOCK(cs_main); if (!State(pfrom.GetId())->m_wtxid_relay) { State(pfrom.GetId())->m_wtxid_relay = true; - g_wtxid_relay_peers++; + m_wtxid_relay_peers++; } else { LogPrint(BCLog::NET, "ignoring duplicate wtxidrelay from peer=%d\n", pfrom.GetId()); } @@ -2769,10 +2829,11 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } + // BIP155 defines feature negotiation of addrv2 and sendaddrv2, which must happen + // between VERSION and VERACK. if (msg_type == NetMsgType::SENDADDRV2) { if (pfrom.fSuccessfullyConnected) { - // Disconnect peers that send SENDADDRV2 message after VERACK; this - // must be negotiated between VERSION and VERACK. + // Disconnect peers that send a SENDADDRV2 message after VERACK. LogPrint(BCLog::NET, "sendaddrv2 received after verack from peer=%d; disconnecting\n", pfrom.GetId()); pfrom.fDisconnect = true; return; @@ -2901,7 +2962,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, } } else if (inv.IsGenTxMsg()) { const GenTxid gtxid = ToGenTxid(inv); - const bool fAlreadyHave = AlreadyHaveTx(gtxid, m_mempool); + const bool fAlreadyHave = AlreadyHaveTx(gtxid); LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); pfrom.AddKnownTx(inv.hash); @@ -2943,7 +3004,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, { LOCK(peer->m_getdata_requests_mutex); peer->m_getdata_requests.insert(peer->m_getdata_requests.end(), vInv.begin(), vInv.end()); - ProcessGetData(pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc); + ProcessGetData(pfrom, *peer, interruptMsgProc); } return; @@ -3182,7 +3243,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // already; and an adversary can already relay us old transactions // (older than our recency filter) if trying to DoS us, without any need // for witness malleation. - if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid), m_mempool)) { + if (AlreadyHaveTx(GenTxid(/* is_wtxid=*/true, wtxid))) { if (pfrom.HasPermission(PF_FORCERELAY)) { // Always relay transactions received from peers with forcerelay // permission, even if they were already in the mempool, allowing @@ -3197,10 +3258,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } - TxValidationState state; - std::list<CTransactionRef> lRemovedTxn; + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), m_mempool, ptx, false /* bypass_limits */); + const TxValidationState& state = result.m_state; - if (AcceptToMemoryPool(m_mempool, state, ptx, &lRemovedTxn, false /* bypass_limits */)) { + if (result.m_result_type == MempoolAcceptResult::ResultType::VALID) { m_mempool.check(&::ChainstateActive().CoinsTip()); // As this version of the transaction was acceptable, we can forget about any // requests for it. @@ -3223,7 +3284,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, tx.GetHash().ToString(), m_mempool.size(), m_mempool.DynamicMemoryUsage() / 1000); - for (const CTransactionRef& removedTx : lRemovedTxn) { + for (const CTransactionRef& removedTx : result.m_replaced_transactions.value()) { AddToCompactExtraTransactions(removedTx); } @@ -3261,7 +3322,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // protocol for getting all unconfirmed parents. const GenTxid gtxid{/* is_wtxid=*/false, parent_txid}; pfrom.AddKnownTx(parent_txid); - if (!AlreadyHaveTx(gtxid, m_mempool)) AddTxAnnouncement(pfrom, gtxid, current_time); + if (!AlreadyHaveTx(gtxid)) AddTxAnnouncement(pfrom, gtxid, current_time); } AddOrphanTx(ptx, pfrom.GetId()); @@ -3451,7 +3512,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) { std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; - if (!MarkBlockAsInFlight(m_mempool, pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) { + if (!MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); else { @@ -3807,15 +3868,14 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, vRecv >> nonce; // Only process pong message if there is an outstanding ping (old ping without nonce should never pong) - if (pfrom.nPingNonceSent != 0) { - if (nonce == pfrom.nPingNonceSent) { + if (peer->m_ping_nonce_sent != 0) { + if (nonce == peer->m_ping_nonce_sent) { // Matching pong received, this ping is no longer outstanding bPingFinished = true; - const auto ping_time = ping_end - pfrom.m_ping_start.load(); + const auto ping_time = ping_end - peer->m_ping_start.load(); if (ping_time.count() >= 0) { - // Successful ping time measurement, replace previous - pfrom.nPingUsecTime = count_microseconds(ping_time); - pfrom.nMinPingUsecTime = std::min(pfrom.nMinPingUsecTime.load(), count_microseconds(ping_time)); + // Let connman know about this successful ping-pong + pfrom.PongReceived(ping_time); } else { // This should never happen sProblem = "Timing mishap"; @@ -3842,12 +3902,12 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LogPrint(BCLog::NET, "pong peer=%d: %s, %x expected, %x received, %u bytes\n", pfrom.GetId(), sProblem, - pfrom.nPingNonceSent, + peer->m_ping_nonce_sent, nonce, nAvail); } if (bPingFinished) { - pfrom.nPingNonceSent = 0; + peer->m_ping_nonce_sent = 0; } return; } @@ -3966,43 +4026,40 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, return; } -bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode) +bool PeerManagerImpl::MaybeDiscourageAndDisconnect(CNode& pnode, Peer& peer) { - const NodeId peer_id{pnode.GetId()}; - PeerRef peer = GetPeerRef(peer_id); - if (peer == nullptr) return false; - { - LOCK(peer->m_misbehavior_mutex); + LOCK(peer.m_misbehavior_mutex); // There's nothing to do if the m_should_discourage flag isn't set - if (!peer->m_should_discourage) return false; + if (!peer.m_should_discourage) return false; - peer->m_should_discourage = false; + peer.m_should_discourage = false; } // peer.m_misbehavior_mutex if (pnode.HasPermission(PF_NOBAN)) { // We never disconnect or discourage peers for bad behavior if they have the NOBAN permission flag - LogPrintf("Warning: not punishing noban peer %d!\n", peer_id); + LogPrintf("Warning: not punishing noban peer %d!\n", peer.m_id); return false; } if (pnode.IsManualConn()) { // We never disconnect or discourage manual peers for bad behavior - LogPrintf("Warning: not punishing manually connected peer %d!\n", peer_id); + LogPrintf("Warning: not punishing manually connected peer %d!\n", peer.m_id); return false; } if (pnode.addr.IsLocal()) { // We disconnect local peers for bad behavior but don't discourage (since that would discourage // all peers on the same local address) - LogPrintf("Warning: disconnecting but not discouraging local peer %d!\n", peer_id); + LogPrint(BCLog::NET, "Warning: disconnecting but not discouraging %s peer %d!\n", + pnode.m_inbound_onion ? "inbound onion" : "local", peer.m_id); pnode.fDisconnect = true; return true; } // Normal case: Disconnect the peer and discourage all nodes sharing the address - LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer_id); + LogPrint(BCLog::NET, "Disconnecting and discouraging peer %d!\n", peer.m_id); if (m_banman) m_banman->Discourage(pnode.addr); m_connman.DisconnectNode(pnode.addr); return true; @@ -4018,7 +4075,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt { LOCK(peer->m_getdata_requests_mutex); if (!peer->m_getdata_requests.empty()) { - ProcessGetData(*pfrom, *peer, m_chainparams, m_connman, m_mempool, interruptMsgProc); + ProcessGetData(*pfrom, *peer, interruptMsgProc); } } @@ -4045,14 +4102,12 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt } // Don't bother if send buffer is too full to respond anyway - if (pfrom->fPauseSend) - return false; + if (pfrom->fPauseSend) return false; std::list<CNetMessage> msgs; { LOCK(pfrom->cs_vProcessMsg); - if (pfrom->vProcessMsg.empty()) - return false; + if (pfrom->vProcessMsg.empty()) return false; // Just take one message msgs.splice(msgs.begin(), pfrom->vProcessMsg, pfrom->vProcessMsg.begin()); pfrom->nProcessQueueSize -= msgs.front().m_raw_message_size; @@ -4061,6 +4116,10 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt } CNetMessage& msg(msgs.front()); + if (gArgs.GetBoolArg("-capturemessages", false)) { + CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv), /* incoming */ true); + } + msg.SetVersion(pfrom->GetCommonVersion()); const std::string& msg_type = msg.m_command; @@ -4248,8 +4307,8 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers() if (time_in_seconds > m_stale_tip_check_time) { // Check whether our tip is stale, and if so, allow using an extra // outbound peer - if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale(m_chainparams.GetConsensus())) { - LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - g_last_tip_update); + if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) { + LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", time_in_seconds - m_last_tip_update); m_connman.SetTryNewOutboundPeer(true); } else if (m_connman.GetTryNewOutboundPeer()) { m_connman.SetTryNewOutboundPeer(false); @@ -4263,6 +4322,50 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers() } } +void PeerManagerImpl::MaybeSendPing(CNode& node_to, Peer& peer) +{ + // Use mockable time for ping timeouts. + // This means that setmocktime may cause pings to time out. + auto now = GetTime<std::chrono::microseconds>(); + + if (m_connman.RunInactivityChecks(node_to) && peer.m_ping_nonce_sent && + now > peer.m_ping_start.load() + std::chrono::seconds{TIMEOUT_INTERVAL}) { + LogPrint(BCLog::NET, "ping timeout: %fs peer=%d\n", 0.000001 * count_microseconds(now - peer.m_ping_start.load()), peer.m_id); + node_to.fDisconnect = true; + return; + } + + const CNetMsgMaker msgMaker(node_to.GetCommonVersion()); + bool pingSend = false; + + if (peer.m_ping_queued) { + // RPC ping request by user + pingSend = true; + } + + if (peer.m_ping_nonce_sent == 0 && now > peer.m_ping_start.load() + PING_INTERVAL) { + // Ping automatically sent as a latency probe & keepalive. + pingSend = true; + } + + if (pingSend) { + uint64_t nonce = 0; + while (nonce == 0) { + GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); + } + peer.m_ping_queued = false; + peer.m_ping_start = now; + if (node_to.GetCommonVersion() > BIP0031_VERSION) { + peer.m_ping_nonce_sent = nonce; + m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING, nonce)); + } else { + // Peer is too old to support ping command with nonce, pong will never arrive. + peer.m_ping_nonce_sent = 0; + m_connman.PushMessage(&node_to, msgMaker.Make(NetMsgType::PING)); + } + } +} + namespace { class CompareInvMempoolOrder { @@ -4287,11 +4390,12 @@ public: bool PeerManagerImpl::SendMessages(CNode* pto) { PeerRef peer = GetPeerRef(pto->GetId()); + if (!peer) return false; const Consensus::Params& consensusParams = m_chainparams.GetConsensus(); // We must call MaybeDiscourageAndDisconnect first, to ensure that we'll // disconnect misbehaving peers even before the version handshake is complete. - if (MaybeDiscourageAndDisconnect(*pto)) return true; + if (MaybeDiscourageAndDisconnect(*pto, *peer)) return true; // Don't send anything until the version handshake is complete if (!pto->fSuccessfullyConnected || pto->fDisconnect) @@ -4300,34 +4404,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // If we get here, the outgoing message serialization version is set and can't change. const CNetMsgMaker msgMaker(pto->GetCommonVersion()); - // - // Message: ping - // - bool pingSend = false; - if (pto->fPingQueued) { - // RPC ping request by user - pingSend = true; - } - if (pto->nPingNonceSent == 0 && pto->m_ping_start.load() + PING_INTERVAL < GetTime<std::chrono::microseconds>()) { - // Ping automatically sent as a latency probe & keepalive. - pingSend = true; - } - if (pingSend) { - uint64_t nonce = 0; - while (nonce == 0) { - GetRandBytes((unsigned char*)&nonce, sizeof(nonce)); - } - pto->fPingQueued = false; - pto->m_ping_start = GetTime<std::chrono::microseconds>(); - if (pto->GetCommonVersion() > BIP0031_VERSION) { - pto->nPingNonceSent = nonce; - m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING, nonce)); - } else { - // Peer is too old to support ping command with nonce, pong will never arrive. - pto->nPingNonceSent = 0; - m_connman.PushMessage(pto, msgMaker.Make(NetMsgType::PING)); - } - } + MaybeSendPing(*pto, *peer); + + // MaybeSendPing may have marked peer for disconnection + if (pto->fDisconnect) return true; { LOCK(cs_main); @@ -4337,7 +4417,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // Address refresh broadcast auto current_time = GetTime<std::chrono::microseconds>(); - if (pto->RelayAddrsWithConn() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) { + if (fListen && pto->RelayAddrsWithConn() && + !::ChainstateActive().IsInitialBlockDownload() && + pto->m_next_local_addr_send < current_time) { // If we've sent before, clear the bloom filter for the peer, so that our // self-announcement will actually go out. // This might be unnecessary if the bloom filter has already rolled @@ -4347,7 +4429,10 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (pto->m_next_local_addr_send != 0us) { pto->m_addr_known->reset(); } - AdvertiseLocal(pto); + if (Optional<CAddress> local_addr = GetLocalAddrForPeer(pto)) { + FastRandomContext insecure_rand; + pto->PushAddress(*local_addr, insecure_rand); + } pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } @@ -4776,11 +4861,11 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (!pto->fClient && ((fFetch && !pto->m_limited_node) || !::ChainstateActive().IsInitialBlockDownload()) && state.nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) { std::vector<const CBlockIndex*> vToDownload; NodeId staller = -1; - FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller, consensusParams); + FindNextBlocksToDownload(pto->GetId(), MAX_BLOCKS_IN_TRANSIT_PER_PEER - state.nBlocksInFlight, vToDownload, staller); for (const CBlockIndex *pindex : vToDownload) { uint32_t nFetchFlags = GetFetchFlags(*pto); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(m_mempool, pto->GetId(), pindex->GetBlockHash(), pindex); + MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->GetId()); } @@ -4802,7 +4887,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) entry.second.GetHash().ToString(), entry.first); } for (const GenTxid& gtxid : requestable) { - if (!AlreadyHaveTx(gtxid, m_mempool)) { + if (!AlreadyHaveTx(gtxid)) { LogPrint(BCLog::NET, "Requesting %s %s peer=%d\n", gtxid.IsWtxid() ? "wtx" : "tx", gtxid.GetHash().ToString(), pto->GetId()); vGetData.emplace_back(gtxid.IsWtxid() ? MSG_WTX : (MSG_TX | GetFetchFlags(*pto)), gtxid.GetHash()); diff --git a/src/net_processing.h b/src/net_processing.h index eaa3b142a8..d7be453df5 100644 --- a/src/net_processing.h +++ b/src/net_processing.h @@ -30,6 +30,7 @@ struct CNodeStateStats { int nSyncHeight = -1; int nCommonHeight = -1; int m_starting_height = -1; + int64_t m_ping_wait_usec; std::vector<int> vHeightInFlight; }; @@ -47,6 +48,9 @@ public: /** Whether this node ignores txs received over p2p. */ virtual bool IgnoresIncomingTxs() = 0; + /** Send ping message to all peers */ + virtual void SendPings() = 0; + /** Set the best height */ virtual void SetBestHeight(int height) = 0; diff --git a/src/netaddress.h b/src/netaddress.h index b9eade7fd5..d0986557f7 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -36,7 +36,7 @@ static constexpr int ADDRV2_FORMAT = 0x20000000; * @note An address may belong to more than one network, for example `10.0.0.1` * belongs to both `NET_UNROUTABLE` and `NET_IPV4`. * Keep these sequential starting from 0 and `NET_MAX` as the last entry. - * We have loops like `for (int i = 0; i < NET_MAX; i++)` that expect to iterate + * We have loops like `for (int i = 0; i < NET_MAX; ++i)` that expect to iterate * over all enum values and also `GetExtNetwork()` "extends" this enum by * introducing standalone constants starting from `NET_MAX`. */ diff --git a/src/netbase.cpp b/src/netbase.cpp index 264029d8a2..0c5b3a220e 100644 --- a/src/netbase.cpp +++ b/src/netbase.cpp @@ -7,13 +7,17 @@ #include <sync.h> #include <tinyformat.h> +#include <util/sock.h> #include <util/strencodings.h> #include <util/string.h> #include <util/system.h> +#include <util/time.h> #include <atomic> #include <cstdint> +#include <functional> #include <limits> +#include <memory> #ifndef WIN32 #include <fcntl.h> @@ -55,7 +59,7 @@ enum Network ParseNetwork(const std::string& net_in) { std::string GetNetworkName(enum Network net) { switch (net) { - case NET_UNROUTABLE: return "unroutable"; + case NET_UNROUTABLE: return "not_publicly_routable"; case NET_IPV4: return "ipv4"; case NET_IPV6: return "ipv6"; case NET_ONION: return "onion"; @@ -68,6 +72,20 @@ std::string GetNetworkName(enum Network net) assert(false); } +std::vector<std::string> GetNetworkNames(bool append_unroutable) +{ + std::vector<std::string> names; + for (int n = 0; n < NET_MAX; ++n) { + const enum Network network{static_cast<Network>(n)}; + if (network == NET_UNROUTABLE || network == NET_I2P || network == NET_CJDNS || network == NET_INTERNAL) continue; + names.emplace_back(GetNetworkName(network)); + } + if (append_unroutable) { + names.emplace_back(GetNetworkName(NET_UNROUTABLE)); + } + return names; +} + bool static LookupIntern(const std::string& name, std::vector<CNetAddr>& vIP, unsigned int nMaxSolutions, bool fAllowLookup) { vIP.clear(); @@ -271,14 +289,6 @@ CService LookupNumeric(const std::string& name, int portDefault) return addr; } -struct timeval MillisToTimeval(int64_t nTimeout) -{ - struct timeval timeout; - timeout.tv_sec = nTimeout / 1000; - timeout.tv_usec = (nTimeout % 1000) * 1000; - return timeout; -} - /** SOCKS version */ enum SOCKSVersion: uint8_t { SOCKS4 = 0x04, @@ -336,8 +346,7 @@ enum class IntrRecvError { * @param data The buffer where the read bytes should be stored. * @param len The number of bytes to read into the specified buffer. * @param timeout The total timeout in milliseconds for this read. - * @param hSocket The socket (has to be in non-blocking mode) from which to read - * bytes. + * @param sock The socket (has to be in non-blocking mode) from which to read bytes. * * @returns An IntrRecvError indicating the resulting status of this read. * IntrRecvError::OK only if all of the specified number of bytes were @@ -347,7 +356,7 @@ enum class IntrRecvError { * Sockets can be made non-blocking with SetSocketNonBlocking(const * SOCKET&, bool). */ -static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, const SOCKET& hSocket) +static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, const Sock& sock) { int64_t curTime = GetTimeMillis(); int64_t endTime = curTime + timeout; @@ -355,7 +364,7 @@ static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, c // (in millis) to break off in case of an interruption. const int64_t maxWait = 1000; while (len > 0 && curTime < endTime) { - ssize_t ret = recv(hSocket, (char*)data, len, 0); // Optimistically try the recv first + ssize_t ret = sock.Recv(data, len, 0); // Optimistically try the recv first if (ret > 0) { len -= ret; data += ret; @@ -364,25 +373,10 @@ static IntrRecvError InterruptibleRecv(uint8_t* data, size_t len, int timeout, c } else { // Other error or blocking int nErr = WSAGetLastError(); if (nErr == WSAEINPROGRESS || nErr == WSAEWOULDBLOCK || nErr == WSAEINVAL) { - if (!IsSelectableSocket(hSocket)) { - return IntrRecvError::NetworkError; - } // Only wait at most maxWait milliseconds at a time, unless // we're approaching the end of the specified total timeout int timeout_ms = std::min(endTime - curTime, maxWait); -#ifdef USE_POLL - struct pollfd pollfd = {}; - pollfd.fd = hSocket; - pollfd.events = POLLIN; - int nRet = poll(&pollfd, 1, timeout_ms); -#else - struct timeval tval = MillisToTimeval(timeout_ms); - fd_set fdset; - FD_ZERO(&fdset); - FD_SET(hSocket, &fdset); - int nRet = select(hSocket + 1, &fdset, nullptr, nullptr, &tval); -#endif - if (nRet == SOCKET_ERROR) { + if (!sock.Wait(std::chrono::milliseconds{timeout_ms}, Sock::RECV)) { return IntrRecvError::NetworkError; } } else { @@ -436,7 +430,7 @@ static std::string Socks5ErrorString(uint8_t err) * @param port The destination port. * @param auth The credentials with which to authenticate with the specified * SOCKS5 proxy. - * @param hSocket The SOCKS5 proxy socket. + * @param sock The SOCKS5 proxy socket. * * @returns Whether or not the operation succeeded. * @@ -446,7 +440,7 @@ static std::string Socks5ErrorString(uint8_t err) * @see <a href="https://www.ietf.org/rfc/rfc1928.txt">RFC1928: SOCKS Protocol * Version 5</a> */ -static bool Socks5(const std::string& strDest, int port, const ProxyCredentials *auth, const SOCKET& hSocket) +static bool Socks5(const std::string& strDest, int port, const ProxyCredentials* auth, const Sock& sock) { IntrRecvError recvr; LogPrint(BCLog::NET, "SOCKS5 connecting %s\n", strDest); @@ -464,12 +458,12 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vSocks5Init.push_back(0x01); // 1 method identifier follows... vSocks5Init.push_back(SOCKS5Method::NOAUTH); } - ssize_t ret = send(hSocket, (const char*)vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL); + ssize_t ret = sock.Send(vSocks5Init.data(), vSocks5Init.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5Init.size()) { return error("Error sending to proxy"); } uint8_t pchRet1[2]; - if ((recvr = InterruptibleRecv(pchRet1, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) { + if ((recvr = InterruptibleRecv(pchRet1, 2, SOCKS5_RECV_TIMEOUT, sock)) != IntrRecvError::OK) { LogPrintf("Socks5() connect to %s:%d failed: InterruptibleRecv() timeout or other failure\n", strDest, port); return false; } @@ -486,13 +480,13 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vAuth.insert(vAuth.end(), auth->username.begin(), auth->username.end()); vAuth.push_back(auth->password.size()); vAuth.insert(vAuth.end(), auth->password.begin(), auth->password.end()); - ret = send(hSocket, (const char*)vAuth.data(), vAuth.size(), MSG_NOSIGNAL); + ret = sock.Send(vAuth.data(), vAuth.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vAuth.size()) { return error("Error sending authentication to proxy"); } LogPrint(BCLog::PROXY, "SOCKS5 sending proxy authentication %s:%s\n", auth->username, auth->password); uint8_t pchRetA[2]; - if ((recvr = InterruptibleRecv(pchRetA, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) { + if ((recvr = InterruptibleRecv(pchRetA, 2, SOCKS5_RECV_TIMEOUT, sock)) != IntrRecvError::OK) { return error("Error reading proxy authentication response"); } if (pchRetA[0] != 0x01 || pchRetA[1] != 0x00) { @@ -512,12 +506,12 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials vSocks5.insert(vSocks5.end(), strDest.begin(), strDest.end()); vSocks5.push_back((port >> 8) & 0xFF); vSocks5.push_back((port >> 0) & 0xFF); - ret = send(hSocket, (const char*)vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL); + ret = sock.Send(vSocks5.data(), vSocks5.size(), MSG_NOSIGNAL); if (ret != (ssize_t)vSocks5.size()) { return error("Error sending to proxy"); } uint8_t pchRet2[4]; - if ((recvr = InterruptibleRecv(pchRet2, 4, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) { + if ((recvr = InterruptibleRecv(pchRet2, 4, SOCKS5_RECV_TIMEOUT, sock)) != IntrRecvError::OK) { if (recvr == IntrRecvError::Timeout) { /* If a timeout happens here, this effectively means we timed out while connecting * to the remote node. This is very common for Tor, so do not print an @@ -541,16 +535,16 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials uint8_t pchRet3[256]; switch (pchRet2[3]) { - case SOCKS5Atyp::IPV4: recvr = InterruptibleRecv(pchRet3, 4, SOCKS5_RECV_TIMEOUT, hSocket); break; - case SOCKS5Atyp::IPV6: recvr = InterruptibleRecv(pchRet3, 16, SOCKS5_RECV_TIMEOUT, hSocket); break; + case SOCKS5Atyp::IPV4: recvr = InterruptibleRecv(pchRet3, 4, SOCKS5_RECV_TIMEOUT, sock); break; + case SOCKS5Atyp::IPV6: recvr = InterruptibleRecv(pchRet3, 16, SOCKS5_RECV_TIMEOUT, sock); break; case SOCKS5Atyp::DOMAINNAME: { - recvr = InterruptibleRecv(pchRet3, 1, SOCKS5_RECV_TIMEOUT, hSocket); + recvr = InterruptibleRecv(pchRet3, 1, SOCKS5_RECV_TIMEOUT, sock); if (recvr != IntrRecvError::OK) { return error("Error reading from proxy"); } int nRecv = pchRet3[0]; - recvr = InterruptibleRecv(pchRet3, nRecv, SOCKS5_RECV_TIMEOUT, hSocket); + recvr = InterruptibleRecv(pchRet3, nRecv, SOCKS5_RECV_TIMEOUT, sock); break; } default: return error("Error: malformed proxy response"); @@ -558,41 +552,35 @@ static bool Socks5(const std::string& strDest, int port, const ProxyCredentials if (recvr != IntrRecvError::OK) { return error("Error reading from proxy"); } - if ((recvr = InterruptibleRecv(pchRet3, 2, SOCKS5_RECV_TIMEOUT, hSocket)) != IntrRecvError::OK) { + if ((recvr = InterruptibleRecv(pchRet3, 2, SOCKS5_RECV_TIMEOUT, sock)) != IntrRecvError::OK) { return error("Error reading from proxy"); } LogPrint(BCLog::NET, "SOCKS5 connected %s\n", strDest); return true; } -/** - * Try to create a socket file descriptor with specific properties in the - * communications domain (address family) of the specified service. - * - * For details on the desired properties, see the inline comments in the source - * code. - */ -SOCKET CreateSocket(const CService &addrConnect) +std::unique_ptr<Sock> CreateSockTCP(const CService& address_family) { // Create a sockaddr from the specified service. struct sockaddr_storage sockaddr; socklen_t len = sizeof(sockaddr); - if (!addrConnect.GetSockAddr((struct sockaddr*)&sockaddr, &len)) { - LogPrintf("Cannot create socket for %s: unsupported network\n", addrConnect.ToString()); - return INVALID_SOCKET; + if (!address_family.GetSockAddr((struct sockaddr*)&sockaddr, &len)) { + LogPrintf("Cannot create socket for %s: unsupported network\n", address_family.ToString()); + return nullptr; } // Create a TCP socket in the address family of the specified service. SOCKET hSocket = socket(((struct sockaddr*)&sockaddr)->sa_family, SOCK_STREAM, IPPROTO_TCP); - if (hSocket == INVALID_SOCKET) - return INVALID_SOCKET; + if (hSocket == INVALID_SOCKET) { + return nullptr; + } // Ensure that waiting for I/O on this socket won't result in undefined // behavior. if (!IsSelectableSocket(hSocket)) { CloseSocket(hSocket); LogPrintf("Cannot create connection: non-selectable socket created (fd >= FD_SETSIZE ?)\n"); - return INVALID_SOCKET; + return nullptr; } #ifdef SO_NOSIGPIPE @@ -608,11 +596,14 @@ SOCKET CreateSocket(const CService &addrConnect) // Set the non-blocking option on the socket. if (!SetSocketNonBlocking(hSocket, true)) { CloseSocket(hSocket); - LogPrintf("CreateSocket: Setting socket to non-blocking failed, error %s\n", NetworkErrorString(WSAGetLastError())); + LogPrintf("Error setting socket to non-blocking: %s\n", NetworkErrorString(WSAGetLastError())); + return nullptr; } - return hSocket; + return std::make_unique<Sock>(hSocket); } +std::function<std::unique_ptr<Sock>(const CService&)> CreateSock = CreateSockTCP; + template<typename... Args> static void LogConnectFailure(bool manual_connection, const char* fmt, const Args&... args) { std::string error_message = tfm::format(fmt, args...); @@ -786,7 +777,7 @@ bool IsProxy(const CNetAddr &addr) { * @param proxy The SOCKS5 proxy. * @param strDest The destination service to which to connect. * @param port The destination port. - * @param hSocket The socket on which to connect to the SOCKS5 proxy. + * @param sock The socket on which to connect to the SOCKS5 proxy. * @param nTimeout Wait this many milliseconds for the connection to the SOCKS5 * proxy to be established. * @param[out] outProxyConnectionFailed Whether or not the connection to the @@ -794,10 +785,10 @@ bool IsProxy(const CNetAddr &addr) { * * @returns Whether or not the operation succeeded. */ -bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocket, int nTimeout, bool& outProxyConnectionFailed) +bool ConnectThroughProxy(const proxyType& proxy, const std::string& strDest, int port, const Sock& sock, int nTimeout, bool& outProxyConnectionFailed) { // first connect to proxy server - if (!ConnectSocketDirectly(proxy.proxy, hSocket, nTimeout, true)) { + if (!ConnectSocketDirectly(proxy.proxy, sock.Get(), nTimeout, true)) { outProxyConnectionFailed = true; return false; } @@ -806,11 +797,11 @@ bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int ProxyCredentials random_auth; static std::atomic_int counter(0); random_auth.username = random_auth.password = strprintf("%i", counter++); - if (!Socks5(strDest, (uint16_t)port, &random_auth, hSocket)) { + if (!Socks5(strDest, (uint16_t)port, &random_auth, sock)) { return false; } } else { - if (!Socks5(strDest, (uint16_t)port, 0, hSocket)) { + if (!Socks5(strDest, (uint16_t)port, 0, sock)) { return false; } } @@ -869,57 +860,6 @@ bool LookupSubNet(const std::string& strSubnet, CSubNet& ret) return false; } -#ifdef WIN32 -std::string NetworkErrorString(int err) -{ - wchar_t buf[256]; - buf[0] = 0; - if(FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK, - nullptr, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - buf, ARRAYSIZE(buf), nullptr)) - { - return strprintf("%s (%d)", std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf), err); - } - else - { - return strprintf("Unknown error (%d)", err); - } -} -#else -std::string NetworkErrorString(int err) -{ - char buf[256]; - buf[0] = 0; - /* Too bad there are two incompatible implementations of the - * thread-safe strerror. */ - const char *s; -#ifdef STRERROR_R_CHAR_P /* GNU variant can return a pointer outside the passed buffer */ - s = strerror_r(err, buf, sizeof(buf)); -#else /* POSIX variant always returns message in buffer */ - s = buf; - if (strerror_r(err, buf, sizeof(buf))) - buf[0] = 0; -#endif - return strprintf("%s (%d)", s, err); -} -#endif - -bool CloseSocket(SOCKET& hSocket) -{ - if (hSocket == INVALID_SOCKET) - return false; -#ifdef WIN32 - int ret = closesocket(hSocket); -#else - int ret = close(hSocket); -#endif - if (ret) { - LogPrintf("Socket close failed: %d. Error: %s\n", hSocket, NetworkErrorString(WSAGetLastError())); - } - hSocket = INVALID_SOCKET; - return ret != SOCKET_ERROR; -} - bool SetSocketNonBlocking(const SOCKET& hSocket, bool fNonBlocking) { if (fNonBlocking) { diff --git a/src/netbase.h b/src/netbase.h index ac4cd97673..847a72ca8e 100644 --- a/src/netbase.h +++ b/src/netbase.h @@ -12,7 +12,10 @@ #include <compat.h> #include <netaddress.h> #include <serialize.h> +#include <util/sock.h> +#include <functional> +#include <memory> #include <stdint.h> #include <string> #include <vector> @@ -39,6 +42,8 @@ public: enum Network ParseNetwork(const std::string& net); std::string GetNetworkName(enum Network net); +/** Return a vector of publicly routable Network names; optionally append NET_UNROUTABLE. */ +std::vector<std::string> GetNetworkNames(bool append_unroutable = false); bool SetProxy(enum Network net, const proxyType &addrProxy); bool GetProxy(enum Network net, proxyType &proxyInfoOut); bool IsProxy(const CNetAddr &addr); @@ -51,21 +56,25 @@ bool Lookup(const std::string& name, CService& addr, int portDefault, bool fAllo bool Lookup(const std::string& name, std::vector<CService>& vAddr, int portDefault, bool fAllowLookup, unsigned int nMaxSolutions); CService LookupNumeric(const std::string& name, int portDefault = 0); bool LookupSubNet(const std::string& strSubnet, CSubNet& subnet); -SOCKET CreateSocket(const CService &addrConnect); + +/** + * Create a TCP socket in the given address family. + * @param[in] address_family The socket is created in the same address family as this address. + * @return pointer to the created Sock object or unique_ptr that owns nothing in case of failure + */ +std::unique_ptr<Sock> CreateSockTCP(const CService& address_family); + +/** + * Socket factory. Defaults to `CreateSockTCP()`, but can be overridden by unit tests. + */ +extern std::function<std::unique_ptr<Sock>(const CService&)> CreateSock; + bool ConnectSocketDirectly(const CService &addrConnect, const SOCKET& hSocketRet, int nTimeout, bool manual_connection); -bool ConnectThroughProxy(const proxyType &proxy, const std::string& strDest, int port, const SOCKET& hSocketRet, int nTimeout, bool& outProxyConnectionFailed); -/** Return readable error string for a network error code */ -std::string NetworkErrorString(int err); -/** Close socket and set hSocket to INVALID_SOCKET */ -bool CloseSocket(SOCKET& hSocket); +bool ConnectThroughProxy(const proxyType& proxy, const std::string& strDest, int port, const Sock& sock, int nTimeout, bool& outProxyConnectionFailed); /** Disable or enable blocking-mode for a socket */ bool SetSocketNonBlocking(const SOCKET& hSocket, bool fNonBlocking); /** Set the TCP_NODELAY flag on a socket */ bool SetSocketNoDelay(const SOCKET& hSocket); -/** - * Convert milliseconds to a struct timeval for e.g. select. - */ -struct timeval MillisToTimeval(int64_t nTimeout); void InterruptSocks5(bool interrupt); #endif // BITCOIN_NETBASE_H diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp index 56113cb731..06fcc33725 100644 --- a/src/node/coinstats.cpp +++ b/src/node/coinstats.cpp @@ -6,6 +6,7 @@ #include <node/coinstats.h> #include <coins.h> +#include <crypto/muhash.h> #include <hash.h> #include <serialize.h> #include <uint256.h> @@ -24,31 +25,59 @@ static uint64_t GetBogoSize(const CScript& scriptPubKey) scriptPubKey.size() /* scriptPubKey */; } -static void ApplyStats(CCoinsStats& stats, CHashWriter& ss, const uint256& hash, const std::map<uint32_t, Coin>& outputs) +static void ApplyHash(CCoinsStats& stats, CHashWriter& ss, const uint256& hash, const std::map<uint32_t, Coin>& outputs, std::map<uint32_t, Coin>::const_iterator it) { - assert(!outputs.empty()); - ss << hash; - ss << VARINT(outputs.begin()->second.nHeight * 2 + outputs.begin()->second.fCoinBase ? 1u : 0u); - stats.nTransactions++; - for (const auto& output : outputs) { - ss << VARINT(output.first + 1); - ss << output.second.out.scriptPubKey; - ss << VARINT_MODE(output.second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED); - stats.nTransactionOutputs++; - stats.nTotalAmount += output.second.out.nValue; - stats.nBogoSize += GetBogoSize(output.second.out.scriptPubKey); + if (it == outputs.begin()) { + ss << hash; + ss << VARINT(it->second.nHeight * 2 + it->second.fCoinBase ? 1u : 0u); + } + + ss << VARINT(it->first + 1); + ss << it->second.out.scriptPubKey; + ss << VARINT_MODE(it->second.out.nValue, VarIntMode::NONNEGATIVE_SIGNED); + + if (it == std::prev(outputs.end())) { + ss << VARINT(0u); } - ss << VARINT(0u); } -static void ApplyStats(CCoinsStats& stats, std::nullptr_t, const uint256& hash, const std::map<uint32_t, Coin>& outputs) +static void ApplyHash(CCoinsStats& stats, std::nullptr_t, const uint256& hash, const std::map<uint32_t, Coin>& outputs, std::map<uint32_t, Coin>::const_iterator it) {} + +static void ApplyHash(CCoinsStats& stats, MuHash3072& muhash, const uint256& hash, const std::map<uint32_t, Coin>& outputs, std::map<uint32_t, Coin>::const_iterator it) +{ + COutPoint outpoint = COutPoint(hash, it->first); + Coin coin = it->second; + + CDataStream ss(SER_DISK, PROTOCOL_VERSION); + ss << outpoint; + ss << static_cast<uint32_t>(coin.nHeight * 2 + coin.fCoinBase); + ss << coin.out; + muhash.Insert(MakeUCharSpan(ss)); +} + +//! Warning: be very careful when changing this! assumeutxo and UTXO snapshot +//! validation commitments are reliant on the hash constructed by this +//! function. +//! +//! If the construction of this hash is changed, it will invalidate +//! existing UTXO snapshots. This will not result in any kind of consensus +//! failure, but it will force clients that were expecting to make use of +//! assumeutxo to do traditional IBD instead. +//! +//! It is also possible, though very unlikely, that a change in this +//! construction could cause a previously invalid (and potentially malicious) +//! UTXO snapshot to be considered valid. +template <typename T> +static void ApplyStats(CCoinsStats& stats, T& hash_obj, const uint256& hash, const std::map<uint32_t, Coin>& outputs) { assert(!outputs.empty()); stats.nTransactions++; - for (const auto& output : outputs) { + for (auto it = outputs.begin(); it != outputs.end(); ++it) { + ApplyHash(stats, hash_obj, hash, outputs, it); + stats.nTransactionOutputs++; - stats.nTotalAmount += output.second.out.nValue; - stats.nBogoSize += GetBogoSize(output.second.out.scriptPubKey); + stats.nTotalAmount += it->second.out.nValue; + stats.nBogoSize += GetBogoSize(it->second.out.scriptPubKey); } } @@ -104,6 +133,10 @@ bool GetUTXOStats(CCoinsView* view, CCoinsStats& stats, CoinStatsHashType hash_t CHashWriter ss(SER_GETHASH, PROTOCOL_VERSION); return GetUTXOStats(view, stats, ss, interruption_point); } + case(CoinStatsHashType::MUHASH): { + MuHash3072 muhash; + return GetUTXOStats(view, stats, muhash, interruption_point); + } case(CoinStatsHashType::NONE): { return GetUTXOStats(view, stats, nullptr, interruption_point); } @@ -116,10 +149,18 @@ static void PrepareHash(CHashWriter& ss, const CCoinsStats& stats) { ss << stats.hashBlock; } +// MuHash does not need the prepare step +static void PrepareHash(MuHash3072& muhash, CCoinsStats& stats) {} static void PrepareHash(std::nullptr_t, CCoinsStats& stats) {} static void FinalizeHash(CHashWriter& ss, CCoinsStats& stats) { stats.hashSerialized = ss.GetHash(); } +static void FinalizeHash(MuHash3072& muhash, CCoinsStats& stats) +{ + uint256 out; + muhash.Finalize(out); + stats.hashSerialized = out; +} static void FinalizeHash(std::nullptr_t, CCoinsStats& stats) {} diff --git a/src/node/coinstats.h b/src/node/coinstats.h index 7c56bfc2ad..f02b95235f 100644 --- a/src/node/coinstats.h +++ b/src/node/coinstats.h @@ -16,6 +16,7 @@ class CCoinsView; enum class CoinStatsHashType { HASH_SERIALIZED, + MUHASH, NONE, }; diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index b7efd68cfd..ec976fe9bf 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -441,7 +441,7 @@ public: bool checkFinalTx(const CTransaction& tx) override { LOCK(cs_main); - return CheckFinalTx(tx); + return CheckFinalTx(::ChainActive().Tip(), tx); } Optional<int> findLocatorFork(const CBlockLocator& locator) override { diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index d3bb9687a8..3b3fab7b6b 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -50,22 +50,22 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t } if (!node.mempool->exists(hashTx)) { // Transaction is not already in the mempool. - TxValidationState state; if (max_tx_fee > 0) { // First, call ATMP with test_accept and check the fee. If ATMP // fails here, return error immediately. - CAmount fee{0}; - if (!AcceptToMemoryPool(*node.mempool, state, tx, - nullptr /* plTxnReplaced */, false /* bypass_limits */, /* test_accept */ true, &fee)) { - return HandleATMPError(state, err_string); - } else if (fee > max_tx_fee) { + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *node.mempool, tx, false /* bypass_limits */, + true /* test_accept */); + if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { + return HandleATMPError(result.m_state, err_string); + } else if (result.m_base_fees.value() > max_tx_fee) { return TransactionError::MAX_FEE_EXCEEDED; } } // Try to submit the transaction to the mempool. - if (!AcceptToMemoryPool(*node.mempool, state, tx, - nullptr /* plTxnReplaced */, false /* bypass_limits */)) { - return HandleATMPError(state, err_string); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *node.mempool, tx, false /* bypass_limits */, + false /* test_accept */); + if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { + return HandleATMPError(result.m_state, err_string); } // Transaction was accepted to the mempool. diff --git a/src/protocol.cpp b/src/protocol.cpp index 56e738eaa8..0b893b9272 100644 --- a/src/protocol.cpp +++ b/src/protocol.cpp @@ -5,7 +5,6 @@ #include <protocol.h> -#include <util/strencodings.h> #include <util/system.h> static std::atomic<bool> g_initial_block_download_completed(false); @@ -86,7 +85,7 @@ const static std::string allNetMessageTypes[] = { NetMsgType::CFCHECKPT, NetMsgType::WTXIDRELAY, }; -const static std::vector<std::string> allNetMessageTypesVec(allNetMessageTypes, allNetMessageTypes+ARRAYLEN(allNetMessageTypes)); +const static std::vector<std::string> allNetMessageTypesVec(std::begin(allNetMessageTypes), std::end(allNetMessageTypes)); CMessageHeader::CMessageHeader() { diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index d6d5ba6968..bad74fcbcc 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -44,6 +44,7 @@ #include <QApplication> #include <QDebug> +#include <QFontDatabase> #include <QLibraryInfo> #include <QLocale> #include <QMessageBox> @@ -475,6 +476,7 @@ int GuiMain(int argc, char* argv[]) #endif BitcoinApplication app; + QFontDatabase::addApplicationFont(":/fonts/monospace"); /// 2. Parse command-line options. We do this after qt in order to show an error if there are problems parsing these // Command-line options take precedence: diff --git a/src/qt/bitcoin.qrc b/src/qt/bitcoin.qrc index 7115459808..fed373e551 100644 --- a/src/qt/bitcoin.qrc +++ b/src/qt/bitcoin.qrc @@ -83,4 +83,7 @@ <file alias="spinner-034">res/animation/spinner-034.png</file> <file alias="spinner-035">res/animation/spinner-035.png</file> </qresource> + <qresource prefix="/fonts"> + <file alias="monospace">res/fonts/RobotoMono-Bold.ttf</file> + </qresource> </RCC> diff --git a/src/qt/forms/debugwindow.ui b/src/qt/forms/debugwindow.ui index 3831852185..9e828ce0a6 100644 --- a/src/qt/forms/debugwindow.ui +++ b/src/qt/forms/debugwindow.ui @@ -988,7 +988,7 @@ </item> </layout> </widget> - <widget class="QWidget" name="widget_2" native="true"> + <widget class="QWidget" name="peersTabRightPanel" native="true"> <property name="sizePolicy"> <sizepolicy hsizetype="Minimum" vsizetype="Preferred"> <horstretch>0</horstretch> @@ -1079,10 +1079,10 @@ <item row="1" column="0"> <widget class="QLabel" name="peerConnectionTypeLabel"> <property name="toolTip"> - <string>The type of peer connection: %1</string> + <string>The direction and type of peer connection: %1</string> </property> <property name="text"> - <string>Connection Type</string> + <string>Direction/Type</string> </property> </widget> </item> @@ -1198,13 +1198,65 @@ </widget> </item> <item row="6" column="0"> + <widget class="QLabel" name="peerRelayTxesLabel"> + <property name="toolTip"> + <string>Whether the peer requested us to relay transactions.</string> + </property> + <property name="text"> + <string>Wants Tx Relay</string> + </property> + </widget> + </item> + <item row="6" column="1"> + <widget class="QLabel" name="peerRelayTxes"> + <property name="cursor"> + <cursorShape>IBeamCursor</cursorShape> + </property> + <property name="text"> + <string>N/A</string> + </property> + <property name="textFormat"> + <enum>Qt::PlainText</enum> + </property> + <property name="textInteractionFlags"> + <set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set> + </property> + </widget> + </item> + <item row="7" column="0"> + <widget class="QLabel" name="peerHighBandwidthLabel"> + <property name="toolTip"> + <string>High bandwidth BIP152 compact block relay: %1</string> + </property> + <property name="text"> + <string>High Bandwidth</string> + </property> + </widget> + </item> + <item row="7" column="1"> + <widget class="QLabel" name="peerHighBandwidth"> + <property name="cursor"> + <cursorShape>IBeamCursor</cursorShape> + </property> + <property name="text"> + <string>N/A</string> + </property> + <property name="textFormat"> + <enum>Qt::PlainText</enum> + </property> + <property name="textInteractionFlags"> + <set>Qt::LinksAccessibleByMouse|Qt::TextSelectableByKeyboard|Qt::TextSelectableByMouse</set> + </property> + </widget> + </item> + <item row="8" column="0"> <widget class="QLabel" name="label_29"> <property name="text"> <string>Starting Block</string> </property> </widget> </item> - <item row="6" column="1"> + <item row="8" column="1"> <widget class="QLabel" name="peerHeight"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1220,14 +1272,14 @@ </property> </widget> </item> - <item row="7" column="0"> + <item row="9" column="0"> <widget class="QLabel" name="label_27"> <property name="text"> <string>Synced Headers</string> </property> </widget> </item> - <item row="7" column="1"> + <item row="9" column="1"> <widget class="QLabel" name="peerSyncHeight"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1243,14 +1295,14 @@ </property> </widget> </item> - <item row="8" column="0"> + <item row="10" column="0"> <widget class="QLabel" name="label_25"> <property name="text"> <string>Synced Blocks</string> </property> </widget> </item> - <item row="8" column="1"> + <item row="10" column="1"> <widget class="QLabel" name="peerCommonHeight"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1266,14 +1318,14 @@ </property> </widget> </item> - <item row="9" column="0"> + <item row="11" column="0"> <widget class="QLabel" name="label_22"> <property name="text"> <string>Connection Time</string> </property> </widget> </item> - <item row="9" column="1"> + <item row="11" column="1"> <widget class="QLabel" name="peerConnTime"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1289,14 +1341,14 @@ </property> </widget> </item> - <item row="10" column="0"> + <item row="12" column="0"> <widget class="QLabel" name="label_15"> <property name="text"> <string>Last Send</string> </property> </widget> </item> - <item row="10" column="1"> + <item row="12" column="1"> <widget class="QLabel" name="peerLastSend"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1312,14 +1364,14 @@ </property> </widget> </item> - <item row="11" column="0"> + <item row="13" column="0"> <widget class="QLabel" name="label_19"> <property name="text"> <string>Last Receive</string> </property> </widget> </item> - <item row="11" column="1"> + <item row="13" column="1"> <widget class="QLabel" name="peerLastRecv"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1335,14 +1387,14 @@ </property> </widget> </item> - <item row="12" column="0"> + <item row="14" column="0"> <widget class="QLabel" name="label_18"> <property name="text"> <string>Sent</string> </property> </widget> </item> - <item row="12" column="1"> + <item row="14" column="1"> <widget class="QLabel" name="peerBytesSent"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1358,14 +1410,14 @@ </property> </widget> </item> - <item row="13" column="0"> + <item row="15" column="0"> <widget class="QLabel" name="label_20"> <property name="text"> <string>Received</string> </property> </widget> </item> - <item row="13" column="1"> + <item row="15" column="1"> <widget class="QLabel" name="peerBytesRecv"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1381,14 +1433,14 @@ </property> </widget> </item> - <item row="14" column="0"> + <item row="16" column="0"> <widget class="QLabel" name="label_26"> <property name="text"> <string>Ping Time</string> </property> </widget> </item> - <item row="14" column="1"> + <item row="16" column="1"> <widget class="QLabel" name="peerPingTime"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1404,7 +1456,7 @@ </property> </widget> </item> - <item row="15" column="0"> + <item row="17" column="0"> <widget class="QLabel" name="peerPingWaitLabel"> <property name="toolTip"> <string>The duration of a currently outstanding ping.</string> @@ -1414,7 +1466,7 @@ </property> </widget> </item> - <item row="15" column="1"> + <item row="17" column="1"> <widget class="QLabel" name="peerPingWait"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1430,14 +1482,14 @@ </property> </widget> </item> - <item row="16" column="0"> + <item row="18" column="0"> <widget class="QLabel" name="peerMinPingLabel"> <property name="text"> <string>Min Ping</string> </property> </widget> </item> - <item row="16" column="1"> + <item row="18" column="1"> <widget class="QLabel" name="peerMinPing"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1453,14 +1505,14 @@ </property> </widget> </item> - <item row="17" column="0"> + <item row="19" column="0"> <widget class="QLabel" name="label_timeoffset"> <property name="text"> <string>Time Offset</string> </property> </widget> </item> - <item row="17" column="1"> + <item row="19" column="1"> <widget class="QLabel" name="timeoffset"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1476,7 +1528,7 @@ </property> </widget> </item> - <item row="18" column="0"> + <item row="20" column="0"> <widget class="QLabel" name="peerMappedASLabel"> <property name="toolTip"> <string>The mapped Autonomous System used for diversifying peer selection.</string> @@ -1486,7 +1538,7 @@ </property> </widget> </item> - <item row="18" column="1"> + <item row="20" column="1"> <widget class="QLabel" name="peerMappedAS"> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> @@ -1502,7 +1554,7 @@ </property> </widget> </item> - <item row="19" column="0"> + <item row="21" column="0"> <spacer name="verticalSpacer_3"> <property name="orientation"> <enum>Qt::Vertical</enum> diff --git a/src/qt/forms/optionsdialog.ui b/src/qt/forms/optionsdialog.ui index 8181cc47e2..6d279540e9 100644 --- a/src/qt/forms/optionsdialog.ui +++ b/src/qt/forms/optionsdialog.ui @@ -706,6 +706,106 @@ </layout> </item> <item> + <widget class="QGroupBox" name="font_groupBox"> + <property name="title"> + <string>Monospaced font in the Overview tab:</string> + </property> + <layout class="QVBoxLayout" name="font_verticalLayout"> + <item> + <layout class="QHBoxLayout" name="embeddedFont_horizontalLayout"> + <item> + <widget class="QRadioButton" name="embeddedFont_radioButton"> + <property name="text"> + <string>embedded "%1"</string> + </property> + </widget> + </item> + <item> + <spacer name="embeddedFont_horizontalSpacer"> + <property name="orientation"> + <enum>Qt::Horizontal</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>40</width> + <height>20</height> + </size> + </property> + </spacer> + </item> + <item> + <layout class="QVBoxLayout" name="embeddedFont_verticalLayout"> + <item> + <widget class="QLabel" name="embeddedFont_label_1"> + <property name="text"> + <string>111.11111111 BTC</string> + </property> + </widget> + </item> + <item> + <widget class="QLabel" name="embeddedFont_label_9"> + <property name="text"> + <string>909.09090909 BTC</string> + </property> + </widget> + </item> + </layout> + </item> + </layout> + </item> + <item> + <widget class="Line" name="font_line"> + <property name="orientation"> + <enum>Qt::Horizontal</enum> + </property> + </widget> + </item> + <item> + <layout class="QHBoxLayout" name="systemFont_horizontalLayout"> + <item> + <widget class="QRadioButton" name="systemFont_radioButton"> + <property name="text"> + <string>closest matching "%1"</string> + </property> + </widget> + </item> + <item> + <spacer name="systemFont_horizontalSpacer"> + <property name="orientation"> + <enum>Qt::Horizontal</enum> + </property> + <property name="sizeHint" stdset="0"> + <size> + <width>40</width> + <height>20</height> + </size> + </property> + </spacer> + </item> + <item> + <layout class="QVBoxLayout" name="systemFont_verticalLayout"> + <item> + <widget class="QLabel" name="systemFont_label_1"> + <property name="text"> + <string>111.11111111 BTC</string> + </property> + </widget> + </item> + <item> + <widget class="QLabel" name="systemFont_label_9"> + <property name="text"> + <string>909.09090909 BTC</string> + </property> + </widget> + </item> + </layout> + </item> + </layout> + </item> + </layout> + </widget> + </item> + <item> <spacer name="verticalSpacer_Display"> <property name="orientation"> <enum>Qt::Vertical</enum> diff --git a/src/qt/forms/overviewpage.ui b/src/qt/forms/overviewpage.ui index ee9d4a113c..b82143e1ba 100644 --- a/src/qt/forms/overviewpage.ui +++ b/src/qt/forms/overviewpage.ui @@ -116,13 +116,6 @@ </property> <item row="2" column="2"> <widget class="QLabel" name="labelWatchPending"> - <property name="font"> - <font> - <family>Monospace</family> - <weight>75</weight> - <bold>true</bold> - </font> - </property> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> @@ -142,13 +135,6 @@ </item> <item row="2" column="1"> <widget class="QLabel" name="labelUnconfirmed"> - <property name="font"> - <font> - <family>Monospace</family> - <weight>75</weight> - <bold>true</bold> - </font> - </property> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> @@ -168,13 +154,6 @@ </item> <item row="3" column="2"> <widget class="QLabel" name="labelWatchImmature"> - <property name="font"> - <font> - <family>Monospace</family> - <weight>75</weight> - <bold>true</bold> - </font> - </property> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> @@ -227,13 +206,6 @@ </item> <item row="3" column="1"> <widget class="QLabel" name="labelImmature"> - <property name="font"> - <font> - <family>Monospace</family> - <weight>75</weight> - <bold>true</bold> - </font> - </property> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> @@ -273,13 +245,6 @@ </item> <item row="5" column="1"> <widget class="QLabel" name="labelTotal"> - <property name="font"> - <font> - <family>Monospace</family> - <weight>75</weight> - <bold>true</bold> - </font> - </property> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> @@ -299,13 +264,6 @@ </item> <item row="5" column="2"> <widget class="QLabel" name="labelWatchTotal"> - <property name="font"> - <font> - <family>Monospace</family> - <weight>75</weight> - <bold>true</bold> - </font> - </property> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> @@ -342,13 +300,6 @@ </item> <item row="1" column="1"> <widget class="QLabel" name="labelBalance"> - <property name="font"> - <font> - <family>Monospace</family> - <weight>75</weight> - <bold>true</bold> - </font> - </property> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> @@ -368,13 +319,6 @@ </item> <item row="1" column="2"> <widget class="QLabel" name="labelWatchAvailable"> - <property name="font"> - <font> - <family>Monospace</family> - <weight>75</weight> - <bold>true</bold> - </font> - </property> <property name="cursor"> <cursorShape>IBeamCursor</cursorShape> </property> diff --git a/src/qt/guiutil.cpp b/src/qt/guiutil.cpp index 376acf0963..c70bd9f418 100644 --- a/src/qt/guiutil.cpp +++ b/src/qt/guiutil.cpp @@ -78,8 +78,11 @@ QString dateTimeStr(qint64 nTime) return dateTimeStr(QDateTime::fromTime_t((qint32)nTime)); } -QFont fixedPitchFont() +QFont fixedPitchFont(bool use_embedded_font) { + if (use_embedded_font) { + return {"Roboto Mono"}; + } return QFontDatabase::systemFont(QFontDatabase::FixedFont); } @@ -470,120 +473,6 @@ bool LabelOutOfFocusEventFilter::eventFilter(QObject* watched, QEvent* event) return QObject::eventFilter(watched, event); } -void TableViewLastColumnResizingFixer::connectViewHeadersSignals() -{ - connect(tableView->horizontalHeader(), &QHeaderView::sectionResized, this, &TableViewLastColumnResizingFixer::on_sectionResized); - connect(tableView->horizontalHeader(), &QHeaderView::geometriesChanged, this, &TableViewLastColumnResizingFixer::on_geometriesChanged); -} - -// We need to disconnect these while handling the resize events, otherwise we can enter infinite loops. -void TableViewLastColumnResizingFixer::disconnectViewHeadersSignals() -{ - disconnect(tableView->horizontalHeader(), &QHeaderView::sectionResized, this, &TableViewLastColumnResizingFixer::on_sectionResized); - disconnect(tableView->horizontalHeader(), &QHeaderView::geometriesChanged, this, &TableViewLastColumnResizingFixer::on_geometriesChanged); -} - -// Setup the resize mode, handles compatibility for Qt5 and below as the method signatures changed. -// Refactored here for readability. -void TableViewLastColumnResizingFixer::setViewHeaderResizeMode(int logicalIndex, QHeaderView::ResizeMode resizeMode) -{ - tableView->horizontalHeader()->setSectionResizeMode(logicalIndex, resizeMode); -} - -void TableViewLastColumnResizingFixer::resizeColumn(int nColumnIndex, int width) -{ - tableView->setColumnWidth(nColumnIndex, width); - tableView->horizontalHeader()->resizeSection(nColumnIndex, width); -} - -int TableViewLastColumnResizingFixer::getColumnsWidth() -{ - int nColumnsWidthSum = 0; - for (int i = 0; i < columnCount; i++) - { - nColumnsWidthSum += tableView->horizontalHeader()->sectionSize(i); - } - return nColumnsWidthSum; -} - -int TableViewLastColumnResizingFixer::getAvailableWidthForColumn(int column) -{ - int nResult = lastColumnMinimumWidth; - int nTableWidth = tableView->horizontalHeader()->width(); - - if (nTableWidth > 0) - { - int nOtherColsWidth = getColumnsWidth() - tableView->horizontalHeader()->sectionSize(column); - nResult = std::max(nResult, nTableWidth - nOtherColsWidth); - } - - return nResult; -} - -// Make sure we don't make the columns wider than the table's viewport width. -void TableViewLastColumnResizingFixer::adjustTableColumnsWidth() -{ - disconnectViewHeadersSignals(); - resizeColumn(lastColumnIndex, getAvailableWidthForColumn(lastColumnIndex)); - connectViewHeadersSignals(); - - int nTableWidth = tableView->horizontalHeader()->width(); - int nColsWidth = getColumnsWidth(); - if (nColsWidth > nTableWidth) - { - resizeColumn(secondToLastColumnIndex,getAvailableWidthForColumn(secondToLastColumnIndex)); - } -} - -// Make column use all the space available, useful during window resizing. -void TableViewLastColumnResizingFixer::stretchColumnWidth(int column) -{ - disconnectViewHeadersSignals(); - resizeColumn(column, getAvailableWidthForColumn(column)); - connectViewHeadersSignals(); -} - -// When a section is resized this is a slot-proxy for ajustAmountColumnWidth(). -void TableViewLastColumnResizingFixer::on_sectionResized(int logicalIndex, int oldSize, int newSize) -{ - adjustTableColumnsWidth(); - int remainingWidth = getAvailableWidthForColumn(logicalIndex); - if (newSize > remainingWidth) - { - resizeColumn(logicalIndex, remainingWidth); - } -} - -// When the table's geometry is ready, we manually perform the stretch of the "Message" column, -// as the "Stretch" resize mode does not allow for interactive resizing. -void TableViewLastColumnResizingFixer::on_geometriesChanged() -{ - if ((getColumnsWidth() - this->tableView->horizontalHeader()->width()) != 0) - { - disconnectViewHeadersSignals(); - resizeColumn(secondToLastColumnIndex, getAvailableWidthForColumn(secondToLastColumnIndex)); - connectViewHeadersSignals(); - } -} - -/** - * Initializes all internal variables and prepares the - * the resize modes of the last 2 columns of the table and - */ -TableViewLastColumnResizingFixer::TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth, QObject *parent) : - QObject(parent), - tableView(table), - lastColumnMinimumWidth(lastColMinimumWidth), - allColumnsMinimumWidth(allColsMinimumWidth) -{ - columnCount = tableView->horizontalHeader()->count(); - lastColumnIndex = columnCount - 1; - secondToLastColumnIndex = columnCount - 2; - tableView->horizontalHeader()->setMinimumSectionSize(allColumnsMinimumWidth); - setViewHeaderResizeMode(secondToLastColumnIndex, QHeaderView::Interactive); - setViewHeaderResizeMode(lastColumnIndex, QHeaderView::Interactive); -} - #ifdef WIN32 fs::path static StartupShortcutPath() { @@ -766,15 +655,19 @@ QString NetworkToQString(Network net) assert(false); } -QString ConnectionTypeToQString(ConnectionType conn_type, bool relay_txes) +QString ConnectionTypeToQString(ConnectionType conn_type, bool prepend_direction) { + QString prefix; + if (prepend_direction) { + prefix = (conn_type == ConnectionType::INBOUND) ? QObject::tr("Inbound") : QObject::tr("Outbound") + " "; + } switch (conn_type) { - case ConnectionType::INBOUND: return relay_txes ? QObject::tr("Inbound Full Relay") : QObject::tr("Inbound Block Relay"); - case ConnectionType::OUTBOUND_FULL_RELAY: return QObject::tr("Outbound Full Relay"); - case ConnectionType::BLOCK_RELAY: return QObject::tr("Outbound Block Relay"); - case ConnectionType::MANUAL: return QObject::tr("Outbound Manual"); - case ConnectionType::FEELER: return QObject::tr("Outbound Feeler"); - case ConnectionType::ADDR_FETCH: return QObject::tr("Outbound Address Fetch"); + case ConnectionType::INBOUND: return prefix; + case ConnectionType::OUTBOUND_FULL_RELAY: return prefix + QObject::tr("Full Relay"); + case ConnectionType::BLOCK_RELAY: return prefix + QObject::tr("Block Relay"); + case ConnectionType::MANUAL: return prefix + QObject::tr("Manual"); + case ConnectionType::FEELER: return prefix + QObject::tr("Feeler"); + case ConnectionType::ADDR_FETCH: return prefix + QObject::tr("Address Fetch"); } // no default case, so the compiler can warn about missing cases assert(false); } diff --git a/src/qt/guiutil.h b/src/qt/guiutil.h index edfb5b13a2..7984aa1141 100644 --- a/src/qt/guiutil.h +++ b/src/qt/guiutil.h @@ -53,7 +53,7 @@ namespace GUIUtil QString dateTimeStr(qint64 nTime); // Return a monospace font - QFont fixedPitchFont(); + QFont fixedPitchFont(bool use_embedded_font = false); // Set up widget for address void setupAddressWidget(QValidatedLineEdit *widget, QWidget *parent); @@ -181,45 +181,6 @@ namespace GUIUtil bool eventFilter(QObject* watched, QEvent* event) override; }; - /** - * Makes a QTableView last column feel as if it was being resized from its left border. - * Also makes sure the column widths are never larger than the table's viewport. - * In Qt, all columns are resizable from the right, but it's not intuitive resizing the last column from the right. - * Usually our second to last columns behave as if stretched, and when on stretch mode, columns aren't resizable - * interactively or programmatically. - * - * This helper object takes care of this issue. - * - */ - class TableViewLastColumnResizingFixer: public QObject - { - Q_OBJECT - - public: - TableViewLastColumnResizingFixer(QTableView* table, int lastColMinimumWidth, int allColsMinimumWidth, QObject *parent); - void stretchColumnWidth(int column); - - private: - QTableView* tableView; - int lastColumnMinimumWidth; - int allColumnsMinimumWidth; - int lastColumnIndex; - int columnCount; - int secondToLastColumnIndex; - - void adjustTableColumnsWidth(); - int getAvailableWidthForColumn(int column); - int getColumnsWidth(); - void connectViewHeadersSignals(); - void disconnectViewHeadersSignals(); - void setViewHeaderResizeMode(int logicalIndex, QHeaderView::ResizeMode resizeMode); - void resizeColumn(int nColumnIndex, int width); - - private Q_SLOTS: - void on_sectionResized(int logicalIndex, int oldSize, int newSize); - void on_geometriesChanged(); - }; - bool GetStartOnSystemStartup(); bool SetStartOnSystemStartup(bool fAutoStart); @@ -233,7 +194,7 @@ namespace GUIUtil QString NetworkToQString(Network net); /** Convert enum ConnectionType to QString */ - QString ConnectionTypeToQString(ConnectionType conn_type, bool relay_txes); + QString ConnectionTypeToQString(ConnectionType conn_type, bool prepend_direction); /** Convert seconds into a QString with days, hours, mins, secs */ QString formatDurationStr(int secs); diff --git a/src/qt/networkstyle.cpp b/src/qt/networkstyle.cpp index b1081f6aee..ee70c1bc30 100644 --- a/src/qt/networkstyle.cpp +++ b/src/qt/networkstyle.cpp @@ -22,7 +22,6 @@ static const struct { {"signet", QAPP_APP_NAME_SIGNET, 35, 15}, {"regtest", QAPP_APP_NAME_REGTEST, 160, 30}, }; -static const unsigned network_styles_count = sizeof(network_styles)/sizeof(*network_styles); // titleAddText needs to be const char* for tr() NetworkStyle::NetworkStyle(const QString &_appName, const int iconColorHueShift, const int iconColorSaturationReduction, const char *_titleAddText): @@ -81,14 +80,12 @@ NetworkStyle::NetworkStyle(const QString &_appName, const int iconColorHueShift, const NetworkStyle* NetworkStyle::instantiate(const std::string& networkId) { std::string titleAddText = networkId == CBaseChainParams::MAIN ? "" : strprintf("[%s]", networkId); - for (unsigned x=0; x<network_styles_count; ++x) - { - if (networkId == network_styles[x].networkId) - { + for (const auto& network_style : network_styles) { + if (networkId == network_style.networkId) { return new NetworkStyle( - network_styles[x].appName, - network_styles[x].iconColorHueShift, - network_styles[x].iconColorSaturationReduction, + network_style.appName, + network_style.iconColorHueShift, + network_style.iconColorSaturationReduction, titleAddText.c_str()); } } diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index 7b8d7871ec..e6b9488344 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -144,6 +144,20 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) : ui->minimizeToTray->setEnabled(false); } + QFont embedded_font{GUIUtil::fixedPitchFont(true)}; + ui->embeddedFont_radioButton->setText(ui->embeddedFont_radioButton->text().arg(QFontInfo(embedded_font).family())); + embedded_font.setWeight(QFont::Bold); + ui->embeddedFont_label_1->setFont(embedded_font); + ui->embeddedFont_label_9->setFont(embedded_font); + + QFont system_font{GUIUtil::fixedPitchFont(false)}; + ui->systemFont_radioButton->setText(ui->systemFont_radioButton->text().arg(QFontInfo(system_font).family())); + system_font.setWeight(QFont::Bold); + ui->systemFont_label_1->setFont(system_font); + ui->systemFont_label_9->setFont(system_font); + // Checking the embeddedFont_radioButton automatically unchecks the systemFont_radioButton. + ui->systemFont_radioButton->setChecked(true); + GUIUtil::handleCloseWindowShortcut(this); } @@ -246,6 +260,7 @@ void OptionsDialog::setMapper() mapper->addMapping(ui->lang, OptionsModel::Language); mapper->addMapping(ui->unit, OptionsModel::DisplayUnit); mapper->addMapping(ui->thirdPartyTxUrls, OptionsModel::ThirdPartyTxUrls); + mapper->addMapping(ui->embeddedFont_radioButton, OptionsModel::UseEmbeddedMonospacedFont); } void OptionsDialog::setOkButtonState(bool fState) diff --git a/src/qt/optionsmodel.cpp b/src/qt/optionsmodel.cpp index 1e0391a35c..d51a5b06ff 100644 --- a/src/qt/optionsmodel.cpp +++ b/src/qt/optionsmodel.cpp @@ -163,6 +163,12 @@ void OptionsModel::Init(bool resetSettings) addOverriddenOption("-lang"); language = settings.value("language").toString(); + + if (!settings.contains("UseEmbeddedMonospacedFont")) { + settings.setValue("UseEmbeddedMonospacedFont", "true"); + } + m_use_embedded_monospaced_font = settings.value("UseEmbeddedMonospacedFont").toBool(); + Q_EMIT useEmbeddedMonospacedFontChanged(m_use_embedded_monospaced_font); } /** Helper function to copy contents from one QSettings to another. @@ -326,6 +332,8 @@ QVariant OptionsModel::data(const QModelIndex & index, int role) const return strThirdPartyTxUrls; case Language: return settings.value("language"); + case UseEmbeddedMonospacedFont: + return m_use_embedded_monospaced_font; case CoinControlFeatures: return fCoinControlFeatures; case Prune: @@ -453,6 +461,11 @@ bool OptionsModel::setData(const QModelIndex & index, const QVariant & value, in setRestartRequired(true); } break; + case UseEmbeddedMonospacedFont: + m_use_embedded_monospaced_font = value.toBool(); + settings.setValue("UseEmbeddedMonospacedFont", m_use_embedded_monospaced_font); + Q_EMIT useEmbeddedMonospacedFontChanged(m_use_embedded_monospaced_font); + break; case CoinControlFeatures: fCoinControlFeatures = value.toBool(); settings.setValue("fCoinControlFeatures", fCoinControlFeatures); diff --git a/src/qt/optionsmodel.h b/src/qt/optionsmodel.h index f7171951a1..4d012a9b8f 100644 --- a/src/qt/optionsmodel.h +++ b/src/qt/optionsmodel.h @@ -59,6 +59,7 @@ public: DisplayUnit, // BitcoinUnits::Unit ThirdPartyTxUrls, // QString Language, // QString + UseEmbeddedMonospacedFont, // bool CoinControlFeatures, // bool ThreadsScriptVerif, // int Prune, // bool @@ -84,6 +85,7 @@ public: bool getMinimizeOnClose() const { return fMinimizeOnClose; } int getDisplayUnit() const { return nDisplayUnit; } QString getThirdPartyTxUrls() const { return strThirdPartyTxUrls; } + bool getUseEmbeddedMonospacedFont() const { return m_use_embedded_monospaced_font; } bool getCoinControlFeatures() const { return fCoinControlFeatures; } const QString& getOverriddenByCommandLine() { return strOverriddenByCommandLine; } @@ -107,6 +109,7 @@ private: QString language; int nDisplayUnit; QString strThirdPartyTxUrls; + bool m_use_embedded_monospaced_font; bool fCoinControlFeatures; /* settings that were overridden by command-line */ QString strOverriddenByCommandLine; @@ -120,6 +123,7 @@ Q_SIGNALS: void displayUnitChanged(int unit); void coinControlFeaturesChanged(bool); void showTrayIconChanged(bool); + void useEmbeddedMonospacedFontChanged(bool); }; #endif // BITCOIN_QT_OPTIONSMODEL_H diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp index bc542a0833..7f12b1d2b5 100644 --- a/src/qt/overviewpage.cpp +++ b/src/qt/overviewpage.cpp @@ -257,6 +257,9 @@ void OverviewPage::setClientModel(ClientModel *model) // Show warning, for example if this is a prerelease version connect(model, &ClientModel::alertsChanged, this, &OverviewPage::updateAlerts); updateAlerts(model->getStatusBarWarnings()); + + connect(model->getOptionsModel(), &OptionsModel::useEmbeddedMonospacedFontChanged, this, &OverviewPage::setMonospacedFont); + setMonospacedFont(model->getOptionsModel()->getUseEmbeddedMonospacedFont()); } } @@ -321,3 +324,17 @@ void OverviewPage::showOutOfSyncWarning(bool fShow) ui->labelWalletStatus->setVisible(fShow); ui->labelTransactionsStatus->setVisible(fShow); } + +void OverviewPage::setMonospacedFont(bool use_embedded_font) +{ + QFont f = GUIUtil::fixedPitchFont(use_embedded_font); + f.setWeight(QFont::Bold); + ui->labelBalance->setFont(f); + ui->labelUnconfirmed->setFont(f); + ui->labelImmature->setFont(f); + ui->labelTotal->setFont(f); + ui->labelWatchAvailable->setFont(f); + ui->labelWatchPending->setFont(f); + ui->labelWatchImmature->setFont(f); + ui->labelWatchTotal->setFont(f); +} diff --git a/src/qt/overviewpage.h b/src/qt/overviewpage.h index 578ef601fb..5158c81678 100644 --- a/src/qt/overviewpage.h +++ b/src/qt/overviewpage.h @@ -61,6 +61,7 @@ private Q_SLOTS: void updateAlerts(const QString &warnings); void updateWatchOnlyLabels(bool showWatchOnly); void handleOutOfSyncWarningClicks(); + void setMonospacedFont(bool use_embedded_font); }; #endif // BITCOIN_QT_OVERVIEWPAGE_H diff --git a/src/qt/peertablemodel.cpp b/src/qt/peertablemodel.cpp index bad81d894c..5f518a67cd 100644 --- a/src/qt/peertablemodel.cpp +++ b/src/qt/peertablemodel.cpp @@ -29,6 +29,8 @@ bool NodeLessThan::operator()(const CNodeCombinedStats &left, const CNodeCombine return pLeft->nodeid < pRight->nodeid; case PeerTableModel::Address: return pLeft->addrName.compare(pRight->addrName) < 0; + case PeerTableModel::ConnectionType: + return pLeft->m_conn_type < pRight->m_conn_type; case PeerTableModel::Network: return pLeft->m_network < pRight->m_network; case PeerTableModel::Ping: @@ -163,6 +165,8 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const case Address: // prepend to peer address down-arrow symbol for inbound connection and up-arrow for outbound connection return QString(rec->nodeStats.fInbound ? "↓ " : "↑ ") + QString::fromStdString(rec->nodeStats.addrName); + case ConnectionType: + return GUIUtil::ConnectionTypeToQString(rec->nodeStats.m_conn_type, /* prepend_direction */ false); case Network: return GUIUtil::NetworkToQString(rec->nodeStats.m_network); case Ping: @@ -176,6 +180,7 @@ QVariant PeerTableModel::data(const QModelIndex &index, int role) const } } else if (role == Qt::TextAlignmentRole) { switch (index.column()) { + case ConnectionType: case Network: return QVariant(Qt::AlignCenter); case Ping: diff --git a/src/qt/peertablemodel.h b/src/qt/peertablemodel.h index 7bff239507..0823235ec0 100644 --- a/src/qt/peertablemodel.h +++ b/src/qt/peertablemodel.h @@ -59,12 +59,13 @@ public: enum ColumnIndex { NetNodeId = 0, - Address = 1, - Network = 2, - Ping = 3, - Sent = 4, - Received = 5, - Subversion = 6 + Address, + ConnectionType, + Network, + Ping, + Sent, + Received, + Subversion }; enum { @@ -87,7 +88,7 @@ public Q_SLOTS: private: interfaces::Node& m_node; - const QStringList columns{tr("Peer Id"), tr("Address"), tr("Network"), tr("Ping"), tr("Sent"), tr("Received"), tr("User Agent")}; + const QStringList columns{tr("Peer Id"), tr("Address"), tr("Type"), tr("Network"), tr("Ping"), tr("Sent"), tr("Received"), tr("User Agent")}; std::unique_ptr<PeerTablePriv> priv; QTimer *timer; }; diff --git a/src/qt/platformstyle.cpp b/src/qt/platformstyle.cpp index c6b80fd340..aab8d8e4af 100644 --- a/src/qt/platformstyle.cpp +++ b/src/qt/platformstyle.cpp @@ -23,7 +23,6 @@ static const struct { /* Other: linux, unix, ... */ {"other", true, true, false} }; -static const unsigned platform_styles_count = sizeof(platform_styles)/sizeof(*platform_styles); namespace { /* Local functions for colorizing single-color images */ @@ -121,15 +120,13 @@ QIcon PlatformStyle::TextColorIcon(const QIcon& icon) const const PlatformStyle *PlatformStyle::instantiate(const QString &platformId) { - for (unsigned x=0; x<platform_styles_count; ++x) - { - if (platformId == platform_styles[x].platformId) - { + for (const auto& platform_style : platform_styles) { + if (platformId == platform_style.platformId) { return new PlatformStyle( - platform_styles[x].platformId, - platform_styles[x].imagesOnButtons, - platform_styles[x].colorizeIcons, - platform_styles[x].useExtraSpacing); + platform_style.platformId, + platform_style.imagesOnButtons, + platform_style.colorizeIcons, + platform_style.useExtraSpacing); } } return nullptr; diff --git a/src/qt/receivecoinsdialog.cpp b/src/qt/receivecoinsdialog.cpp index 0aea920c86..62adaa4e9f 100644 --- a/src/qt/receivecoinsdialog.cpp +++ b/src/qt/receivecoinsdialog.cpp @@ -19,12 +19,12 @@ #include <QCursor> #include <QMessageBox> #include <QScrollBar> +#include <QSettings> #include <QTextDocument> ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWidget *parent) : QDialog(parent, GUIUtil::dialog_flags), ui(new Ui::ReceiveCoinsDialog), - columnResizingFixer(nullptr), model(nullptr), platformStyle(_platformStyle) { @@ -44,6 +44,7 @@ ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWid // context menu actions QAction *copyURIAction = new QAction(tr("Copy URI"), this); + QAction* copyAddressAction = new QAction(tr("Copy address"), this); QAction *copyLabelAction = new QAction(tr("Copy label"), this); QAction *copyMessageAction = new QAction(tr("Copy message"), this); QAction *copyAmountAction = new QAction(tr("Copy amount"), this); @@ -51,6 +52,7 @@ ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWid // context menu contextMenu = new QMenu(this); contextMenu->addAction(copyURIAction); + contextMenu->addAction(copyAddressAction); contextMenu->addAction(copyLabelAction); contextMenu->addAction(copyMessageAction); contextMenu->addAction(copyAmountAction); @@ -58,11 +60,28 @@ ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWid // context menu signals connect(ui->recentRequestsView, &QWidget::customContextMenuRequested, this, &ReceiveCoinsDialog::showMenu); connect(copyURIAction, &QAction::triggered, this, &ReceiveCoinsDialog::copyURI); + connect(copyAddressAction, &QAction::triggered, this, &ReceiveCoinsDialog::copyAddress); connect(copyLabelAction, &QAction::triggered, this, &ReceiveCoinsDialog::copyLabel); connect(copyMessageAction, &QAction::triggered, this, &ReceiveCoinsDialog::copyMessage); connect(copyAmountAction, &QAction::triggered, this, &ReceiveCoinsDialog::copyAmount); connect(ui->clearButton, &QPushButton::clicked, this, &ReceiveCoinsDialog::clear); + + QTableView* tableView = ui->recentRequestsView; + tableView->verticalHeader()->hide(); + tableView->setAlternatingRowColors(true); + tableView->setSelectionBehavior(QAbstractItemView::SelectRows); + tableView->setSelectionMode(QAbstractItemView::ContiguousSelection); + + QSettings settings; + if (!tableView->horizontalHeader()->restoreState(settings.value("RecentRequestsViewHeaderState").toByteArray())) { + tableView->setColumnWidth(RecentRequestsTableModel::Date, DATE_COLUMN_WIDTH); + tableView->setColumnWidth(RecentRequestsTableModel::Label, LABEL_COLUMN_WIDTH); + tableView->setColumnWidth(RecentRequestsTableModel::Amount, AMOUNT_MINIMUM_COLUMN_WIDTH); + tableView->horizontalHeader()->setMinimumSectionSize(MINIMUM_COLUMN_WIDTH); + tableView->horizontalHeader()->setStretchLastSection(true); + } + tableView->horizontalHeader()->setSortIndicator(RecentRequestsTableModel::Date, Qt::DescendingOrder); } void ReceiveCoinsDialog::setModel(WalletModel *_model) @@ -76,22 +95,10 @@ void ReceiveCoinsDialog::setModel(WalletModel *_model) updateDisplayUnit(); QTableView* tableView = ui->recentRequestsView; - - tableView->verticalHeader()->hide(); - tableView->setHorizontalScrollBarPolicy(Qt::ScrollBarAlwaysOff); tableView->setModel(_model->getRecentRequestsTableModel()); - tableView->setAlternatingRowColors(true); - tableView->setSelectionBehavior(QAbstractItemView::SelectRows); - tableView->setSelectionMode(QAbstractItemView::ContiguousSelection); - tableView->setColumnWidth(RecentRequestsTableModel::Date, DATE_COLUMN_WIDTH); - tableView->setColumnWidth(RecentRequestsTableModel::Label, LABEL_COLUMN_WIDTH); - tableView->setColumnWidth(RecentRequestsTableModel::Amount, AMOUNT_MINIMUM_COLUMN_WIDTH); - connect(tableView->selectionModel(), &QItemSelectionModel::selectionChanged, this, &ReceiveCoinsDialog::recentRequestsView_selectionChanged); - // Last 2 columns are set by the columnResizingFixer, when the table geometry is ready. - columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(tableView, AMOUNT_MINIMUM_COLUMN_WIDTH, DATE_COLUMN_WIDTH, this); if (model->wallet().getDefaultAddressType() == OutputType::BECH32) { ui->useBech32->setCheckState(Qt::Checked); @@ -111,6 +118,8 @@ void ReceiveCoinsDialog::setModel(WalletModel *_model) ReceiveCoinsDialog::~ReceiveCoinsDialog() { + QSettings settings; + settings.setValue("RecentRequestsViewHeaderState", ui->recentRequestsView->horizontalHeader()->saveState()); delete ui; } @@ -235,14 +244,6 @@ void ReceiveCoinsDialog::on_removeRequestButton_clicked() model->getRecentRequestsTableModel()->removeRows(firstIndex.row(), selection.length(), firstIndex.parent()); } -// We override the virtual resizeEvent of the QWidget to adjust tables column -// sizes as the tables width is proportional to the dialogs width. -void ReceiveCoinsDialog::resizeEvent(QResizeEvent *event) -{ - QWidget::resizeEvent(event); - columnResizingFixer->stretchColumnWidth(RecentRequestsTableModel::Message); -} - QModelIndex ReceiveCoinsDialog::selectedRow() { if(!model || !model->getRecentRequestsTableModel() || !ui->recentRequestsView->selectionModel()) @@ -287,6 +288,19 @@ void ReceiveCoinsDialog::copyURI() GUIUtil::setClipboard(uri); } +// context menu action: copy address +void ReceiveCoinsDialog::copyAddress() +{ + const QModelIndex sel = selectedRow(); + if (!sel.isValid()) { + return; + } + + const RecentRequestsTableModel* const submodel = model->getRecentRequestsTableModel(); + const QString address = submodel->entry(sel.row()).recipient.address; + GUIUtil::setClipboard(address); +} + // context menu action: copy label void ReceiveCoinsDialog::copyLabel() { diff --git a/src/qt/receivecoinsdialog.h b/src/qt/receivecoinsdialog.h index 9b89bd6a8b..f12cd8ce0c 100644 --- a/src/qt/receivecoinsdialog.h +++ b/src/qt/receivecoinsdialog.h @@ -51,14 +51,12 @@ public Q_SLOTS: private: Ui::ReceiveCoinsDialog *ui; - GUIUtil::TableViewLastColumnResizingFixer *columnResizingFixer; WalletModel *model; QMenu *contextMenu; const PlatformStyle *platformStyle; QModelIndex selectedRow(); void copyColumnToClipboard(int column); - virtual void resizeEvent(QResizeEvent *event) override; private Q_SLOTS: void on_receiveButton_clicked(); @@ -69,6 +67,7 @@ private Q_SLOTS: void updateDisplayUnit(); void showMenu(const QPoint &point); void copyURI(); + void copyAddress(); void copyLabel(); void copyMessage(); void copyAmount(); diff --git a/src/qt/res/fonts/RobotoMono-Bold.ttf b/src/qt/res/fonts/RobotoMono-Bold.ttf Binary files differnew file mode 100644 index 0000000000..900fce6848 --- /dev/null +++ b/src/qt/res/fonts/RobotoMono-Bold.ttf diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index a252685d2f..4a4b557acc 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -462,7 +462,7 @@ RPCConsole::RPCConsole(interfaces::Node& node, const PlatformStyle *_platformSty constexpr QChar nonbreaking_hyphen(8209); const std::vector<QString> CONNECTION_TYPE_DOC{ - tr("Inbound Full/Block Relay: initiated by peer"), + tr("Inbound: initiated by peer"), tr("Outbound Full Relay: default"), tr("Outbound Block Relay: does not relay transactions or addresses"), tr("Outbound Manual: added using RPC %1 or %2/%3 configuration options") @@ -473,6 +473,11 @@ RPCConsole::RPCConsole(interfaces::Node& node, const PlatformStyle *_platformSty tr("Outbound Address Fetch: short-lived, for soliciting addresses")}; const QString list{"<ul><li>" + Join(CONNECTION_TYPE_DOC, QString("</li><li>")) + "</li></ul>"}; ui->peerConnectionTypeLabel->setToolTip(ui->peerConnectionTypeLabel->toolTip().arg(list)); + const QString hb_list{"<ul><li>\"" + + tr("To") + "\" – " + tr("we selected the peer for high bandwidth relay") + "</li><li>\"" + + tr("From") + "\" – " + tr("the peer selected us for high bandwidth relay") + "</li><li>\"" + + tr("No") + "\" – " + tr("no high bandwidth relay selected") + "</li></ul>"}; + ui->peerHighBandwidthLabel->setToolTip(ui->peerHighBandwidthLabel->toolTip().arg(hb_list)); ui->dataDir->setToolTip(ui->dataDir->toolTip().arg(QString(nonbreaking_hyphen) + "datadir")); ui->blocksDir->setToolTip(ui->blocksDir->toolTip().arg(QString(nonbreaking_hyphen) + "blocksdir")); ui->openDebugLogfileButton->setToolTip(ui->openDebugLogfileButton->toolTip().arg(PACKAGE_NAME)); @@ -1097,7 +1102,7 @@ void RPCConsole::updateDetailWidget() { const QList<QModelIndex> selected_peers = GUIUtil::getEntryData(ui->peerWidget, PeerTableModel::NetNodeId); if (!clientModel || !clientModel->getPeerTableModel() || selected_peers.size() != 1) { - ui->detailWidget->hide(); + ui->peersTabRightPanel->hide(); ui->peerHeading->setText(tr("Select a peer to view detailed information.")); return; } @@ -1109,18 +1114,23 @@ void RPCConsole::updateDetailWidget() peerAddrDetails += "<br />" + tr("via %1").arg(QString::fromStdString(stats->nodeStats.addrLocal)); ui->peerHeading->setText(peerAddrDetails); ui->peerServices->setText(GUIUtil::formatServicesStr(stats->nodeStats.nServices)); + ui->peerRelayTxes->setText(stats->nodeStats.fRelayTxes ? "Yes" : "No"); + QString bip152_hb_settings; + if (stats->nodeStats.m_bip152_highbandwidth_to) bip152_hb_settings += "To"; + if (stats->nodeStats.m_bip152_highbandwidth_from) bip152_hb_settings += (bip152_hb_settings == "" ? "From" : "/From"); + if (bip152_hb_settings == "") bip152_hb_settings = "No"; + ui->peerHighBandwidth->setText(bip152_hb_settings); ui->peerLastSend->setText(stats->nodeStats.nLastSend ? GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nLastSend) : tr("never")); ui->peerLastRecv->setText(stats->nodeStats.nLastRecv ? GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nLastRecv) : tr("never")); ui->peerBytesSent->setText(GUIUtil::formatBytes(stats->nodeStats.nSendBytes)); ui->peerBytesRecv->setText(GUIUtil::formatBytes(stats->nodeStats.nRecvBytes)); ui->peerConnTime->setText(GUIUtil::formatDurationStr(GetSystemTimeInSeconds() - stats->nodeStats.nTimeConnected)); ui->peerPingTime->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_usec)); - ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStats.m_ping_wait_usec)); ui->peerMinPing->setText(GUIUtil::formatPingTime(stats->nodeStats.m_min_ping_usec)); ui->timeoffset->setText(GUIUtil::formatTimeOffset(stats->nodeStats.nTimeOffset)); ui->peerVersion->setText(QString::number(stats->nodeStats.nVersion)); ui->peerSubversion->setText(QString::fromStdString(stats->nodeStats.cleanSubVer)); - ui->peerConnectionType->setText(GUIUtil::ConnectionTypeToQString(stats->nodeStats.m_conn_type, stats->nodeStats.fRelayTxes)); + ui->peerConnectionType->setText(GUIUtil::ConnectionTypeToQString(stats->nodeStats.m_conn_type, /* prepend_direction */ true)); ui->peerNetwork->setText(GUIUtil::NetworkToQString(stats->nodeStats.m_network)); if (stats->nodeStats.m_permissionFlags == PF_NONE) { ui->peerPermissions->setText(tr("N/A")); @@ -1149,9 +1159,10 @@ void RPCConsole::updateDetailWidget() ui->peerCommonHeight->setText(tr("Unknown")); ui->peerHeight->setText(QString::number(stats->nodeStateStats.m_starting_height)); + ui->peerPingWait->setText(GUIUtil::formatPingTime(stats->nodeStateStats.m_ping_wait_usec)); } - ui->detailWidget->show(); + ui->peersTabRightPanel->show(); } void RPCConsole::resizeEvent(QResizeEvent *event) diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 0cf6480b82..b568f41158 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -31,6 +31,7 @@ #include <QMenu> #include <QPoint> #include <QScrollBar> +#include <QSettings> #include <QTableView> #include <QTimer> #include <QUrl> @@ -126,27 +127,40 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa vlayout->setContentsMargins(0,0,0,0); vlayout->setSpacing(0); - QTableView *view = new QTableView(this); + transactionView = new QTableView(this); + transactionView->setObjectName("transactionView"); vlayout->addLayout(hlayout); vlayout->addWidget(createDateRangeWidget()); - vlayout->addWidget(view); + vlayout->addWidget(transactionView); vlayout->setSpacing(0); - int width = view->verticalScrollBar()->sizeHint().width(); + int width = transactionView->verticalScrollBar()->sizeHint().width(); // Cover scroll bar width with spacing if (platformStyle->getUseExtraSpacing()) { hlayout->addSpacing(width+2); } else { hlayout->addSpacing(width); } - // Always show scroll bar - view->setVerticalScrollBarPolicy(Qt::ScrollBarAlwaysOn); - view->setTabKeyNavigation(false); - view->setContextMenuPolicy(Qt::CustomContextMenu); - - view->installEventFilter(this); - - transactionView = view; - transactionView->setObjectName("transactionView"); + transactionView->setVerticalScrollBarPolicy(Qt::ScrollBarAlwaysOn); + transactionView->setTabKeyNavigation(false); + transactionView->setContextMenuPolicy(Qt::CustomContextMenu); + transactionView->installEventFilter(this); + transactionView->setAlternatingRowColors(true); + transactionView->setSelectionBehavior(QAbstractItemView::SelectRows); + transactionView->setSelectionMode(QAbstractItemView::ExtendedSelection); + transactionView->setSortingEnabled(true); + transactionView->verticalHeader()->hide(); + + QSettings settings; + if (!transactionView->horizontalHeader()->restoreState(settings.value("TransactionViewHeaderState").toByteArray())) { + transactionView->setColumnWidth(TransactionTableModel::Status, STATUS_COLUMN_WIDTH); + transactionView->setColumnWidth(TransactionTableModel::Watchonly, WATCHONLY_COLUMN_WIDTH); + transactionView->setColumnWidth(TransactionTableModel::Date, DATE_COLUMN_WIDTH); + transactionView->setColumnWidth(TransactionTableModel::Type, TYPE_COLUMN_WIDTH); + transactionView->setColumnWidth(TransactionTableModel::Amount, AMOUNT_MINIMUM_COLUMN_WIDTH); + transactionView->horizontalHeader()->setMinimumSectionSize(MINIMUM_COLUMN_WIDTH); + transactionView->horizontalHeader()->setStretchLastSection(true); + } + transactionView->horizontalHeader()->setSortIndicator(TransactionTableModel::Date, Qt::DescendingOrder); // Actions abandonAction = new QAction(tr("Abandon transaction"), this); @@ -158,7 +172,6 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa QAction *copyTxIDAction = new QAction(tr("Copy transaction ID"), this); QAction *copyTxHexAction = new QAction(tr("Copy raw transaction"), this); QAction *copyTxPlainText = new QAction(tr("Copy full transaction details"), this); - QAction *editLabelAction = new QAction(tr("Edit label"), this); QAction *showDetailsAction = new QAction(tr("Show transaction details"), this); contextMenu = new QMenu(this); @@ -173,7 +186,6 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa contextMenu->addSeparator(); contextMenu->addAction(bumpFeeAction); contextMenu->addAction(abandonAction); - contextMenu->addAction(editLabelAction); connect(dateWidget, static_cast<void (QComboBox::*)(int)>(&QComboBox::activated), this, &TransactionView::chooseDate); connect(typeWidget, static_cast<void (QComboBox::*)(int)>(&QComboBox::activated), this, &TransactionView::chooseType); @@ -183,8 +195,8 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa connect(search_widget, &QLineEdit::textChanged, prefix_typing_delay, static_cast<void (QTimer::*)()>(&QTimer::start)); connect(prefix_typing_delay, &QTimer::timeout, this, &TransactionView::changedSearch); - connect(view, &QTableView::doubleClicked, this, &TransactionView::doubleClicked); - connect(view, &QTableView::customContextMenuRequested, this, &TransactionView::contextualMenu); + connect(transactionView, &QTableView::doubleClicked, this, &TransactionView::doubleClicked); + connect(transactionView, &QTableView::customContextMenuRequested, this, &TransactionView::contextualMenu); connect(bumpFeeAction, &QAction::triggered, this, &TransactionView::bumpFee); connect(abandonAction, &QAction::triggered, this, &TransactionView::abandonTx); @@ -194,7 +206,6 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa connect(copyTxIDAction, &QAction::triggered, this, &TransactionView::copyTxID); connect(copyTxHexAction, &QAction::triggered, this, &TransactionView::copyTxHex); connect(copyTxPlainText, &QAction::triggered, this, &TransactionView::copyTxPlainText); - connect(editLabelAction, &QAction::triggered, this, &TransactionView::editLabel); connect(showDetailsAction, &QAction::triggered, this, &TransactionView::showDetails); // Double-clicking on a transaction on the transaction history page shows details connect(this, &TransactionView::doubleClicked, this, &TransactionView::showDetails); @@ -204,6 +215,12 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa }); } +TransactionView::~TransactionView() +{ + QSettings settings; + settings.setValue("TransactionViewHeaderState", transactionView->horizontalHeader()->saveState()); +} + void TransactionView::setModel(WalletModel *_model) { this->model = _model; @@ -214,25 +231,8 @@ void TransactionView::setModel(WalletModel *_model) transactionProxyModel->setDynamicSortFilter(true); transactionProxyModel->setSortCaseSensitivity(Qt::CaseInsensitive); transactionProxyModel->setFilterCaseSensitivity(Qt::CaseInsensitive); - transactionProxyModel->setSortRole(Qt::EditRole); - - transactionView->setHorizontalScrollBarPolicy(Qt::ScrollBarAlwaysOff); transactionView->setModel(transactionProxyModel); - transactionView->setAlternatingRowColors(true); - transactionView->setSelectionBehavior(QAbstractItemView::SelectRows); - transactionView->setSelectionMode(QAbstractItemView::ExtendedSelection); - transactionView->horizontalHeader()->setSortIndicator(TransactionTableModel::Date, Qt::DescendingOrder); - transactionView->setSortingEnabled(true); - transactionView->verticalHeader()->hide(); - - transactionView->setColumnWidth(TransactionTableModel::Status, STATUS_COLUMN_WIDTH); - transactionView->setColumnWidth(TransactionTableModel::Watchonly, WATCHONLY_COLUMN_WIDTH); - transactionView->setColumnWidth(TransactionTableModel::Date, DATE_COLUMN_WIDTH); - transactionView->setColumnWidth(TransactionTableModel::Type, TYPE_COLUMN_WIDTH); - transactionView->setColumnWidth(TransactionTableModel::Amount, AMOUNT_MINIMUM_COLUMN_WIDTH); - - columnResizingFixer = new GUIUtil::TableViewLastColumnResizingFixer(transactionView, AMOUNT_MINIMUM_COLUMN_WIDTH, MINIMUM_COLUMN_WIDTH, this); if (_model->getOptionsModel()) { @@ -474,52 +474,6 @@ void TransactionView::copyTxPlainText() GUIUtil::copyEntryData(transactionView, 0, TransactionTableModel::TxPlainTextRole); } -void TransactionView::editLabel() -{ - if(!transactionView->selectionModel() ||!model) - return; - QModelIndexList selection = transactionView->selectionModel()->selectedRows(); - if(!selection.isEmpty()) - { - AddressTableModel *addressBook = model->getAddressTableModel(); - if(!addressBook) - return; - QString address = selection.at(0).data(TransactionTableModel::AddressRole).toString(); - if(address.isEmpty()) - { - // If this transaction has no associated address, exit - return; - } - // Is address in address book? Address book can miss address when a transaction is - // sent from outside the UI. - int idx = addressBook->lookupAddress(address); - if(idx != -1) - { - // Edit sending / receiving address - QModelIndex modelIdx = addressBook->index(idx, 0, QModelIndex()); - // Determine type of address, launch appropriate editor dialog type - QString type = modelIdx.data(AddressTableModel::TypeRole).toString(); - - EditAddressDialog dlg( - type == AddressTableModel::Receive - ? EditAddressDialog::EditReceivingAddress - : EditAddressDialog::EditSendingAddress, this); - dlg.setModel(addressBook); - dlg.loadRow(idx); - dlg.exec(); - } - else - { - // Add sending address - EditAddressDialog dlg(EditAddressDialog::NewSendingAddress, - this); - dlg.setModel(addressBook); - dlg.setAddress(address); - dlg.exec(); - } - } -} - void TransactionView::showDetails() { if(!transactionView->selectionModel()) @@ -623,14 +577,6 @@ void TransactionView::focusTransaction(const uint256& txid) } } -// We override the virtual resizeEvent of the QWidget to adjust tables column -// sizes as the tables width is proportional to the dialogs width. -void TransactionView::resizeEvent(QResizeEvent* event) -{ - QWidget::resizeEvent(event); - columnResizingFixer->stretchColumnWidth(TransactionTableModel::ToAddress); -} - // Need to override default Ctrl+C action for amount as default behaviour is just to copy DisplayRole text bool TransactionView::eventFilter(QObject *obj, QEvent *event) { diff --git a/src/qt/transactionview.h b/src/qt/transactionview.h index b268823066..cd40813461 100644 --- a/src/qt/transactionview.h +++ b/src/qt/transactionview.h @@ -35,6 +35,7 @@ class TransactionView : public QWidget public: explicit TransactionView(const PlatformStyle *platformStyle, QWidget *parent = nullptr); + ~TransactionView(); void setModel(WalletModel *model); @@ -82,10 +83,6 @@ private: QWidget *createDateRangeWidget(); - GUIUtil::TableViewLastColumnResizingFixer *columnResizingFixer{nullptr}; - - virtual void resizeEvent(QResizeEvent* event) override; - bool eventFilter(QObject *obj, QEvent *event) override; private Q_SLOTS: @@ -93,7 +90,6 @@ private Q_SLOTS: void dateRangeChanged(); void showDetails(); void copyAddress(); - void editLabel(); void copyLabel(); void copyAmount(); void copyTxID(); diff --git a/src/random.cpp b/src/random.cpp index af9504e0ce..9900825abb 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -38,7 +38,6 @@ #include <sys/random.h> #endif #ifdef HAVE_SYSCTL_ARND -#include <util/strencodings.h> // for ARRAYLEN #include <sys/sysctl.h> #endif @@ -333,7 +332,7 @@ void GetOSRand(unsigned char *ent32) int have = 0; do { size_t len = NUM_OS_RANDOM_BYTES - have; - if (sysctl(name, ARRAYLEN(name), ent32 + have, &len, nullptr, 0) != 0) { + if (sysctl(name, std::size(name), ent32 + have, &len, nullptr, 0) != 0) { RandFailure(); } have += len; diff --git a/src/rest.cpp b/src/rest.cpp index f872f6e59d..71426a4dc4 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -19,7 +19,6 @@ #include <txmempool.h> #include <util/check.h> #include <util/ref.h> -#include <util/strencodings.h> #include <validation.h> #include <version.h> @@ -117,9 +116,10 @@ static RetFormat ParseDataFormat(std::string& param, const std::string& strReq) param = strReq.substr(0, pos); const std::string suff(strReq, pos + 1); - for (unsigned int i = 0; i < ARRAYLEN(rf_names); i++) - if (suff == rf_names[i].name) - return rf_names[i].rf; + for (const auto& rf_name : rf_names) { + if (suff == rf_name.name) + return rf_name.rf; + } /* If no suffix is found, return original string. */ param = strReq; @@ -129,12 +129,13 @@ static RetFormat ParseDataFormat(std::string& param, const std::string& strReq) static std::string AvailableDataFormatsString() { std::string formats; - for (unsigned int i = 0; i < ARRAYLEN(rf_names); i++) - if (strlen(rf_names[i].name) > 0) { + for (const auto& rf_name : rf_names) { + if (strlen(rf_name.name) > 0) { formats.append("."); - formats.append(rf_names[i].name); + formats.append(rf_name.name); formats.append(", "); } + } if (formats.length() > 0) return formats.substr(0, formats.length() - 2); @@ -695,6 +696,7 @@ void InterruptREST() void StopREST() { - for (unsigned int i = 0; i < ARRAYLEN(uri_prefixes); i++) - UnregisterHTTPHandler(uri_prefixes[i].prefix, false); + for (const auto& up : uri_prefixes) { + UnregisterHTTPHandler(up.prefix, false); + } } diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index a83b45ae1d..5dc33d7a98 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -17,8 +17,8 @@ #include <node/coinstats.h> #include <node/context.h> #include <node/utxo_snapshot.h> -#include <policy/fees.h> #include <policy/feerate.h> +#include <policy/fees.h> #include <policy/policy.h> #include <policy/rbf.h> #include <primitives/transaction.h> @@ -156,21 +156,11 @@ UniValue blockheaderToJSON(const CBlockIndex* tip, const CBlockIndex* blockindex UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIndex* blockindex, bool txDetails) { - // Serialize passed information without accessing chain state of the active chain! - AssertLockNotHeld(cs_main); // For performance reasons + UniValue result = blockheaderToJSON(tip, blockindex); - UniValue result(UniValue::VOBJ); - result.pushKV("hash", blockindex->GetBlockHash().GetHex()); - const CBlockIndex* pnext; - int confirmations = ComputeNextBlockAndDepth(tip, blockindex, pnext); - result.pushKV("confirmations", confirmations); result.pushKV("strippedsize", (int)::GetSerializeSize(block, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS)); result.pushKV("size", (int)::GetSerializeSize(block, PROTOCOL_VERSION)); result.pushKV("weight", (int)::GetBlockWeight(block)); - result.pushKV("height", blockindex->nHeight); - result.pushKV("version", block.nVersion); - result.pushKV("versionHex", strprintf("%08x", block.nVersion)); - result.pushKV("merkleroot", block.hashMerkleRoot.GetHex()); UniValue txs(UniValue::VARR); if (txDetails) { CBlockUndo blockUndo; @@ -189,18 +179,7 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIn } } result.pushKV("tx", txs); - result.pushKV("time", block.GetBlockTime()); - result.pushKV("mediantime", (int64_t)blockindex->GetMedianTimePast()); - result.pushKV("nonce", (uint64_t)block.nNonce); - result.pushKV("bits", strprintf("%08x", block.nBits)); - result.pushKV("difficulty", GetDifficulty(blockindex)); - result.pushKV("chainwork", blockindex->nChainWork.GetHex()); - result.pushKV("nTx", (uint64_t)blockindex->nTx); - if (blockindex->pprev) - result.pushKV("previousblockhash", blockindex->pprev->GetBlockHash().GetHex()); - if (pnext) - result.pushKV("nextblockhash", pnext->GetBlockHash().GetHex()); return result; } @@ -1047,13 +1026,26 @@ static RPCHelpMan pruneblockchain() }; } +CoinStatsHashType ParseHashType(const std::string& hash_type_input) +{ + if (hash_type_input == "hash_serialized_2") { + return CoinStatsHashType::HASH_SERIALIZED; + } else if (hash_type_input == "muhash") { + return CoinStatsHashType::MUHASH; + } else if (hash_type_input == "none") { + return CoinStatsHashType::NONE; + } else { + throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%s is not a valid hash_type", hash_type_input)); + } +} + static RPCHelpMan gettxoutsetinfo() { return RPCHelpMan{"gettxoutsetinfo", "\nReturns statistics about the unspent transaction output set.\n" "Note this call may take some time.\n", { - {"hash_type", RPCArg::Type::STR, /* default */ "hash_serialized_2", "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'none'."}, + {"hash_type", RPCArg::Type::STR, /* default */ "hash_serialized_2", "Which UTXO set hash should be calculated. Options: 'hash_serialized_2' (the legacy algorithm), 'muhash', 'none'."}, }, RPCResult{ RPCResult::Type::OBJ, "", "", @@ -1063,7 +1055,8 @@ static RPCHelpMan gettxoutsetinfo() {RPCResult::Type::NUM, "transactions", "The number of transactions with unspent outputs"}, {RPCResult::Type::NUM, "txouts", "The number of unspent transaction outputs"}, {RPCResult::Type::NUM, "bogosize", "A meaningless metric for UTXO set size"}, - {RPCResult::Type::STR_HEX, "hash_serialized_2", "The serialized hash (only present if 'hash_serialized_2' hash_type is chosen)"}, + {RPCResult::Type::STR_HEX, "hash_serialized_2", /* optional */ true, "The serialized hash (only present if 'hash_serialized_2' hash_type is chosen)"}, + {RPCResult::Type::STR_HEX, "muhash", /* optional */ true, "The serialized hash (only present if 'muhash' hash_type is chosen)"}, {RPCResult::Type::NUM, "disk_size", "The estimated size of the chainstate on disk"}, {RPCResult::Type::STR_AMOUNT, "total_amount", "The total amount"}, }}, @@ -1078,7 +1071,7 @@ static RPCHelpMan gettxoutsetinfo() CCoinsStats stats; ::ChainstateActive().ForceFlushStateToDisk(); - const CoinStatsHashType hash_type = ParseHashType(request.params[0], CoinStatsHashType::HASH_SERIALIZED); + const CoinStatsHashType hash_type{request.params[0].isNull() ? CoinStatsHashType::HASH_SERIALIZED : ParseHashType(request.params[0].get_str())}; CCoinsView* coins_view = WITH_LOCK(cs_main, return &ChainstateActive().CoinsDB()); NodeContext& node = EnsureNodeContext(request.context); @@ -1091,6 +1084,9 @@ static RPCHelpMan gettxoutsetinfo() if (hash_type == CoinStatsHashType::HASH_SERIALIZED) { ret.pushKV("hash_serialized_2", stats.hashSerialized.GetHex()); } + if (hash_type == CoinStatsHashType::MUHASH) { + ret.pushKV("muhash", stats.hashSerialized.GetHex()); + } ret.pushKV("disk_size", stats.nDiskSize); ret.pushKV("total_amount", ValueFromAmount(stats.nTotalAmount)); } else { @@ -1500,6 +1496,7 @@ UniValue MempoolInfoToJSON(const CTxMemPool& pool) ret.pushKV("size", (int64_t)pool.size()); ret.pushKV("bytes", (int64_t)pool.GetTotalTxSize()); ret.pushKV("usage", (int64_t)pool.DynamicMemoryUsage()); + ret.pushKV("total_fee", ValueFromAmount(pool.GetTotalFee())); size_t maxmempool = gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000; ret.pushKV("maxmempool", (int64_t) maxmempool); ret.pushKV("mempoolminfee", ValueFromAmount(std::max(pool.GetMinFee(maxmempool), ::minRelayTxFee).GetFeePerK())); @@ -1520,6 +1517,7 @@ static RPCHelpMan getmempoolinfo() {RPCResult::Type::NUM, "size", "Current tx count"}, {RPCResult::Type::NUM, "bytes", "Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted"}, {RPCResult::Type::NUM, "usage", "Total memory usage for the mempool"}, + {RPCResult::Type::STR_AMOUNT, "total_fee", "Total fees for the mempool in " + CURRENCY_UNIT + ", ignoring modified fees through prioritizetransaction"}, {RPCResult::Type::NUM, "maxmempool", "Maximum memory usage for the mempool"}, {RPCResult::Type::STR_AMOUNT, "mempoolminfee", "Minimum fee rate in " + CURRENCY_UNIT + "/kB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee"}, {RPCResult::Type::STR_AMOUNT, "minrelaytxfee", "Current minimum relay fee for transactions"}, @@ -2413,10 +2411,21 @@ static RPCHelpMan dumptxoutset() FILE* file{fsbridge::fopen(temppath, "wb")}; CAutoFile afile{file, SER_DISK, CLIENT_VERSION}; + NodeContext& node = EnsureNodeContext(request.context); + UniValue result = CreateUTXOSnapshot(node, node.chainman->ActiveChainstate(), afile); + fs::rename(temppath, path); + + result.pushKV("path", path.string()); + return result; +}, + }; +} + +UniValue CreateUTXOSnapshot(NodeContext& node, CChainState& chainstate, CAutoFile& afile) +{ std::unique_ptr<CCoinsViewCursor> pcursor; CCoinsStats stats; CBlockIndex* tip; - NodeContext& node = EnsureNodeContext(request.context); { // We need to lock cs_main to ensure that the coinsdb isn't written to @@ -2433,13 +2442,13 @@ static RPCHelpMan dumptxoutset() // LOCK(::cs_main); - ::ChainstateActive().ForceFlushStateToDisk(); + chainstate.ForceFlushStateToDisk(); - if (!GetUTXOStats(&::ChainstateActive().CoinsDB(), stats, CoinStatsHashType::NONE, node.rpc_interruption_point)) { + if (!GetUTXOStats(&chainstate.CoinsDB(), stats, CoinStatsHashType::NONE, node.rpc_interruption_point)) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } - pcursor = std::unique_ptr<CCoinsViewCursor>(::ChainstateActive().CoinsDB().Cursor()); + pcursor = std::unique_ptr<CCoinsViewCursor>(chainstate.CoinsDB().Cursor()); tip = g_chainman.m_blockman.LookupBlockIndex(stats.hashBlock); CHECK_NONFATAL(tip); } @@ -2464,16 +2473,13 @@ static RPCHelpMan dumptxoutset() } afile.fclose(); - fs::rename(temppath, path); UniValue result(UniValue::VOBJ); result.pushKV("coins_written", stats.coins_count); result.pushKV("base_hash", tip->GetBlockHash().ToString()); result.pushKV("base_height", tip->nHeight); - result.pushKV("path", path.string()); + return result; -}, - }; } void RegisterBlockchainRPCCommands(CRPCTable &t) diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h index e4ce80400e..d8cae4dd24 100644 --- a/src/rpc/blockchain.h +++ b/src/rpc/blockchain.h @@ -6,6 +6,7 @@ #define BITCOIN_RPC_BLOCKCHAIN_H #include <amount.h> +#include <streams.h> #include <sync.h> #include <stdint.h> @@ -16,6 +17,7 @@ extern RecursiveMutex cs_main; class CBlock; class CBlockIndex; class CBlockPolicyEstimator; +class CChainState; class CTxMemPool; class ChainstateManager; class UniValue; @@ -57,4 +59,10 @@ CTxMemPool& EnsureMemPool(const util::Ref& context); ChainstateManager& EnsureChainman(const util::Ref& context); CBlockPolicyEstimator& EnsureFeeEstimator(const util::Ref& context); +/** + * Helper to create UTXO snapshots given a chainstate and a file handle. + * @return a UniValue map containing metadata about the snapshot. + */ +UniValue CreateUTXOSnapshot(NodeContext& node, CChainState& chainstate, CAutoFile& afile); + #endif diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index b75a7b8d26..38a0bddddb 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -365,13 +365,13 @@ static RPCHelpMan signmessagewithprivkey() static RPCHelpMan setmocktime() { return RPCHelpMan{"setmocktime", - "\nSet the local time to given timestamp (-regtest only)\n", - { - {"timestamp", RPCArg::Type::NUM, RPCArg::Optional::NO, UNIX_EPOCH_TIME + "\n" - " Pass 0 to go back to using the system time."}, - }, - RPCResult{RPCResult::Type::NONE, "", ""}, - RPCExamples{""}, + "\nSet the local time to given timestamp (-regtest only)\n", + { + {"timestamp", RPCArg::Type::NUM, RPCArg::Optional::NO, UNIX_EPOCH_TIME + "\n" + "Pass 0 to go back to using the system time."}, + }, + RPCResult{RPCResult::Type::NONE, "", ""}, + RPCExamples{""}, [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { if (!Params().IsMockableChain()) { @@ -386,7 +386,10 @@ static RPCHelpMan setmocktime() LOCK(cs_main); RPCTypeCheck(request.params, {UniValue::VNUM}); - int64_t time = request.params[0].get_int64(); + const int64_t time{request.params[0].get_int64()}; + if (time < 0) { + throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("Mocktime can not be negative: %s.", time)); + } SetMockTime(time); if (request.context.Has<NodeContext>()) { for (const auto& chain_client : request.context.Get<NodeContext>().chain_clients) { diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 47d77b341a..0224ee697a 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -77,13 +77,12 @@ static RPCHelpMan ping() [&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue { NodeContext& node = EnsureNodeContext(request.context); - if(!node.connman) + if (!node.peerman) { throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled"); + } // Request that each node send a ping during next message processing pass - node.connman->ForEachNode([](CNode* pnode) { - pnode->fPingQueued = true; - }); + node.peerman->SendPings(); return NullUniValue; }, }; @@ -104,7 +103,7 @@ static RPCHelpMan getpeerinfo() {RPCResult::Type::STR, "addr", "(host:port) The IP address and port of the peer"}, {RPCResult::Type::STR, "addrbind", "(ip:port) Bind address of the connection to the peer"}, {RPCResult::Type::STR, "addrlocal", "(ip:port) Local address as reported by the peer"}, - {RPCResult::Type::STR, "network", "Network (ipv4, ipv6, or onion) the peer connected through"}, + {RPCResult::Type::STR, "network", "Network (" + Join(GetNetworkNames(/* append_unroutable */ true), ", ") + ")"}, {RPCResult::Type::NUM, "mapped_as", "The AS in the BGP route to the peer used for diversifying\n" "peer selection (only available if the asmap config flag is set)"}, {RPCResult::Type::STR_HEX, "services", "The services offered"}, @@ -209,8 +208,8 @@ static RPCHelpMan getpeerinfo() if (stats.m_min_ping_usec < std::numeric_limits<int64_t>::max()) { obj.pushKV("minping", ((double)stats.m_min_ping_usec) / 1e6); } - if (stats.m_ping_wait_usec > 0) { - obj.pushKV("pingwait", ((double)stats.m_ping_wait_usec) / 1e6); + if (fStateStats && statestats.m_ping_wait_usec > 0) { + obj.pushKV("pingwait", ((double)statestats.m_ping_wait_usec) / 1e6); } obj.pushKV("version", stats.nVersion); // Use the sanitized form of subver here, to avoid tricksy remote peers from @@ -587,7 +586,7 @@ static RPCHelpMan getnetworkinfo() { {RPCResult::Type::OBJ, "", "", { - {RPCResult::Type::STR, "name", "network (ipv4, ipv6 or onion)"}, + {RPCResult::Type::STR, "name", "network (" + Join(GetNetworkNames(), ", ") + ")"}, {RPCResult::Type::BOOL, "limited", "is the network limited using -onlynet?"}, {RPCResult::Type::BOOL, "reachable", "is the network reachable?"}, {RPCResult::Type::STR, "proxy", "(\"host:port\") the proxy that is used for this network, or empty if none"}, diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index ac42404470..47c776bbd1 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -946,44 +946,35 @@ static RPCHelpMan testmempoolaccept() result_0.pushKV("txid", tx->GetHash().GetHex()); result_0.pushKV("wtxid", tx->GetWitnessHash().GetHex()); - TxValidationState state; - bool test_accept_res; - CAmount fee{0}; - { - LOCK(cs_main); - test_accept_res = AcceptToMemoryPool(mempool, state, std::move(tx), - nullptr /* plTxnReplaced */, false /* bypass_limits */, /* test_accept */ true, &fee); - } - - // Check that fee does not exceed maximum fee - if (test_accept_res && max_raw_tx_fee && fee > max_raw_tx_fee) { - result_0.pushKV("allowed", false); - result_0.pushKV("reject-reason", "max-fee-exceeded"); - result.push_back(std::move(result_0)); - return result; - } - result_0.pushKV("allowed", test_accept_res); + const MempoolAcceptResult accept_result = WITH_LOCK(cs_main, return AcceptToMemoryPool(::ChainstateActive(), mempool, std::move(tx), + false /* bypass_limits */, /* test_accept */ true)); // Only return the fee and vsize if the transaction would pass ATMP. // These can be used to calculate the feerate. - if (test_accept_res) { - result_0.pushKV("vsize", virtual_size); - UniValue fees(UniValue::VOBJ); - fees.pushKV("base", ValueFromAmount(fee)); - result_0.pushKV("fees", fees); + if (accept_result.m_result_type == MempoolAcceptResult::ResultType::VALID) { + const CAmount fee = accept_result.m_base_fees.value(); + // Check that fee does not exceed maximum fee + if (max_raw_tx_fee && fee > max_raw_tx_fee) { + result_0.pushKV("allowed", false); + result_0.pushKV("reject-reason", "max-fee-exceeded"); + } else { + result_0.pushKV("allowed", true); + result_0.pushKV("vsize", virtual_size); + UniValue fees(UniValue::VOBJ); + fees.pushKV("base", ValueFromAmount(fee)); + result_0.pushKV("fees", fees); + } + result.push_back(std::move(result_0)); } else { - if (state.IsInvalid()) { - if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { - result_0.pushKV("reject-reason", "missing-inputs"); - } else { - result_0.pushKV("reject-reason", strprintf("%s", state.GetRejectReason())); - } + result_0.pushKV("allowed", false); + const TxValidationState state = accept_result.m_state; + if (state.GetResult() == TxValidationResult::TX_MISSING_INPUTS) { + result_0.pushKV("reject-reason", "missing-inputs"); } else { result_0.pushKV("reject-reason", state.GetRejectReason()); } + result.push_back(std::move(result_0)); } - - result.push_back(std::move(result_0)); return result; }, }; diff --git a/src/rpc/util.cpp b/src/rpc/util.cpp index bfdba5253c..e890c0108a 100644 --- a/src/rpc/util.cpp +++ b/src/rpc/util.cpp @@ -113,23 +113,6 @@ std::vector<unsigned char> ParseHexO(const UniValue& o, std::string strKey) return ParseHexV(find_value(o, strKey), strKey); } -CoinStatsHashType ParseHashType(const UniValue& param, const CoinStatsHashType default_type) -{ - if (param.isNull()) { - return default_type; - } else { - std::string hash_type_input = param.get_str(); - - if (hash_type_input == "hash_serialized_2") { - return CoinStatsHashType::HASH_SERIALIZED; - } else if (hash_type_input == "none") { - return CoinStatsHashType::NONE; - } else { - throw JSONRPCError(RPC_INVALID_PARAMETER, strprintf("%d is not a valid hash_type", hash_type_input)); - } - } -} - std::string HelpExampleCli(const std::string& methodname, const std::string& args) { return "> bitcoin-cli " + methodname + " " + args + "\n"; diff --git a/src/rpc/util.h b/src/rpc/util.h index 444a013ca1..c54ce85f60 100644 --- a/src/rpc/util.h +++ b/src/rpc/util.h @@ -77,8 +77,6 @@ extern uint256 ParseHashO(const UniValue& o, std::string strKey); extern std::vector<unsigned char> ParseHexV(const UniValue& v, std::string strName); extern std::vector<unsigned char> ParseHexO(const UniValue& o, std::string strKey); -CoinStatsHashType ParseHashType(const UniValue& param, const CoinStatsHashType default_type); - extern CAmount AmountFromValue(const UniValue& value); extern std::string HelpExampleCli(const std::string& methodname, const std::string& args); extern std::string HelpExampleRpc(const std::string& methodname, const std::string& args); diff --git a/src/script/bitcoinconsensus.h b/src/script/bitcoinconsensus.h index c5dceac848..b6939127e1 100644 --- a/src/script/bitcoinconsensus.h +++ b/src/script/bitcoinconsensus.h @@ -11,14 +11,12 @@ #if defined(BUILD_BITCOIN_INTERNAL) && defined(HAVE_CONFIG_H) #include <config/bitcoin-config.h> #if defined(_WIN32) - #if defined(DLL_EXPORT) - #if defined(HAVE_FUNC_ATTRIBUTE_DLLEXPORT) - #define EXPORT_SYMBOL __declspec(dllexport) - #else - #define EXPORT_SYMBOL - #endif + #if defined(HAVE_DLLEXPORT_ATTRIBUTE) + #define EXPORT_SYMBOL __declspec(dllexport) + #else + #define EXPORT_SYMBOL #endif - #elif defined(HAVE_FUNC_ATTRIBUTE_VISIBILITY) + #elif defined(HAVE_DEFAULT_VISIBILITY_ATTRIBUTE) #define EXPORT_SYMBOL __attribute__ ((visibility ("default"))) #endif #elif defined(MSC_VER) && !defined(STATIC_LIBBITCOINCONSENSUS) diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 9e4b8a9dd6..6ab01882ac 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -179,6 +179,9 @@ public: /** Get the descriptor string form including private data (if available in arg). */ virtual bool ToPrivateString(const SigningProvider& arg, std::string& out) const = 0; + /** Get the descriptor string form with the xpub at the last hardened derivation */ + virtual bool ToNormalizedString(const SigningProvider& arg, std::string& out, bool priv) const = 0; + /** Derive a private key, if private data is available in arg. */ virtual bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const = 0; }; @@ -212,6 +215,21 @@ public: ret = "[" + OriginString() + "]" + std::move(sub); return true; } + bool ToNormalizedString(const SigningProvider& arg, std::string& ret, bool priv) const override + { + std::string sub; + if (!m_provider->ToNormalizedString(arg, sub, priv)) return false; + // If m_provider is a BIP32PubkeyProvider, we may get a string formatted like a OriginPubkeyProvider + // In that case, we need to strip out the leading square bracket and fingerprint from the substring, + // and append that to our own origin string. + if (sub[0] == '[') { + sub = sub.substr(9); + ret = "[" + OriginString() + std::move(sub); + } else { + ret = "[" + OriginString() + "]" + std::move(sub); + } + return true; + } bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const override { return m_provider->GetPrivKey(pos, arg, key); @@ -243,6 +261,12 @@ public: ret = EncodeSecret(key); return true; } + bool ToNormalizedString(const SigningProvider& arg, std::string& ret, bool priv) const override + { + if (priv) return ToPrivateString(arg, ret); + ret = ToString(); + return true; + } bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const override { return arg.GetKey(m_pubkey.GetID(), key); @@ -386,6 +410,56 @@ public: } return true; } + bool ToNormalizedString(const SigningProvider& arg, std::string& out, bool priv) const override + { + // For hardened derivation type, just return the typical string, nothing to normalize + if (m_derive == DeriveType::HARDENED) { + if (priv) return ToPrivateString(arg, out); + out = ToString(); + return true; + } + // Step backwards to find the last hardened step in the path + int i = (int)m_path.size() - 1; + for (; i >= 0; --i) { + if (m_path.at(i) >> 31) { + break; + } + } + // Either no derivation or all unhardened derivation + if (i == -1) { + if (priv) return ToPrivateString(arg, out); + out = ToString(); + return true; + } + // Derive the xpub at the last hardened step + CExtKey xprv; + if (!GetExtKey(arg, xprv)) return false; + KeyOriginInfo origin; + int k = 0; + for (; k <= i; ++k) { + // Derive + xprv.Derive(xprv, m_path.at(k)); + // Add to the path + origin.path.push_back(m_path.at(k)); + // First derivation element, get the fingerprint for origin + if (k == 0) { + std::copy(xprv.vchFingerprint, xprv.vchFingerprint + 4, origin.fingerprint); + } + } + // Build the remaining path + KeyPath end_path; + for (; k < (int)m_path.size(); ++k) { + end_path.push_back(m_path.at(k)); + } + // Build the string + std::string origin_str = HexStr(origin.fingerprint) + FormatHDKeypath(origin.path); + out = "[" + origin_str + "]" + (priv ? EncodeExtKey(xprv) : EncodeExtPubKey(xprv.Neuter())) + FormatHDKeypath(end_path); + if (IsRange()) { + out += "/*"; + assert(m_derive == DeriveType::UNHARDENED); + } + return true; + } bool GetPrivKey(int pos, const SigningProvider& arg, CKey& key) const override { CExtKey extkey; @@ -449,7 +523,7 @@ public: return false; } - bool ToStringHelper(const SigningProvider* arg, std::string& out, bool priv) const + bool ToStringHelper(const SigningProvider* arg, std::string& out, bool priv, bool normalized) const { std::string extra = ToStringExtra(); size_t pos = extra.size() > 0 ? 1 : 0; @@ -457,7 +531,9 @@ public: for (const auto& pubkey : m_pubkey_args) { if (pos++) ret += ","; std::string tmp; - if (priv) { + if (normalized) { + if (!pubkey->ToNormalizedString(*arg, tmp, priv)) return false; + } else if (priv) { if (!pubkey->ToPrivateString(*arg, tmp)) return false; } else { tmp = pubkey->ToString(); @@ -467,7 +543,7 @@ public: if (m_subdescriptor_arg) { if (pos++) ret += ","; std::string tmp; - if (!m_subdescriptor_arg->ToStringHelper(arg, tmp, priv)) return false; + if (!m_subdescriptor_arg->ToStringHelper(arg, tmp, priv, normalized)) return false; ret += std::move(tmp); } out = std::move(ret) + ")"; @@ -477,13 +553,20 @@ public: std::string ToString() const final { std::string ret; - ToStringHelper(nullptr, ret, false); + ToStringHelper(nullptr, ret, false, false); return AddChecksum(ret); } bool ToPrivateString(const SigningProvider& arg, std::string& out) const final { - bool ret = ToStringHelper(&arg, out, true); + bool ret = ToStringHelper(&arg, out, true, false); + out = AddChecksum(out); + return ret; + } + + bool ToNormalizedString(const SigningProvider& arg, std::string& out, bool priv) const override final + { + bool ret = ToStringHelper(&arg, out, priv, true); out = AddChecksum(out); return ret; } diff --git a/src/script/descriptor.h b/src/script/descriptor.h index 17b43e7c81..46d51fa587 100644 --- a/src/script/descriptor.h +++ b/src/script/descriptor.h @@ -93,6 +93,9 @@ struct Descriptor { /** Convert the descriptor to a private string. This fails if the provided provider does not have the relevant private keys. */ virtual bool ToPrivateString(const SigningProvider& provider, std::string& out) const = 0; + /** Convert the descriptor to a normalized string. Normalized descriptors have the xpub at the last hardened step. This fails if the provided provider does not have the private keys to derive that xpub. */ + virtual bool ToNormalizedString(const SigningProvider& provider, std::string& out, bool priv) const = 0; + /** Expand a descriptor at a specified position. * * @param[in] pos The position at which to expand the descriptor. If IsRange() is false, this is ignored. diff --git a/src/script/sigcache.cpp b/src/script/sigcache.cpp index cf47d37e70..c6d898a25a 100644 --- a/src/script/sigcache.cpp +++ b/src/script/sigcache.cpp @@ -12,8 +12,10 @@ #include <cuckoocache.h> -#include <boost/thread/lock_types.hpp> -#include <boost/thread/shared_mutex.hpp> +#include <algorithm> +#include <mutex> +#include <shared_mutex> +#include <vector> namespace { /** @@ -29,7 +31,7 @@ private: CSHA256 m_salted_hasher_schnorr; typedef CuckooCache::cache<uint256, SignatureCacheHasher> map_type; map_type setValid; - boost::shared_mutex cs_sigcache; + std::shared_mutex cs_sigcache; public: CSignatureCache() @@ -64,13 +66,13 @@ public: bool Get(const uint256& entry, const bool erase) { - boost::shared_lock<boost::shared_mutex> lock(cs_sigcache); + std::shared_lock<std::shared_mutex> lock(cs_sigcache); return setValid.contains(entry, erase); } void Set(const uint256& entry) { - boost::unique_lock<boost::shared_mutex> lock(cs_sigcache); + std::unique_lock<std::shared_mutex> lock(cs_sigcache); setValid.insert(entry); } uint32_t setup_bytes(size_t n) diff --git a/src/script/sign.cpp b/src/script/sign.cpp index 8afbe9ebed..dba5ce621a 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -106,8 +106,7 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator std::vector<valtype> vSolutions; whichTypeRet = Solver(scriptPubKey, vSolutions); - switch (whichTypeRet) - { + switch (whichTypeRet) { case TxoutType::NONSTANDARD: case TxoutType::NULL_DATA: case TxoutType::WITNESS_UNKNOWN: @@ -173,10 +172,8 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator // Could not find witnessScript, add to missing sigdata.missing_witness_script = uint256(vSolutions[0]); return false; - - default: - return false; - } + } // no default case, so the compiler can warn about missing cases + assert(false); } static CScript PushAll(const std::vector<valtype>& values) diff --git a/src/script/standard.cpp b/src/script/standard.cpp index 7967c01858..4d882cd1f1 100644 --- a/src/script/standard.cpp +++ b/src/script/standard.cpp @@ -45,8 +45,7 @@ WitnessV0ScriptHash::WitnessV0ScriptHash(const CScript& in) std::string GetTxnOutputType(TxoutType t) { - switch (t) - { + switch (t) { case TxoutType::NONSTANDARD: return "nonstandard"; case TxoutType::PUBKEY: return "pubkey"; case TxoutType::PUBKEYHASH: return "pubkeyhash"; @@ -182,7 +181,8 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) std::vector<valtype> vSolutions; TxoutType whichType = Solver(scriptPubKey, vSolutions); - if (whichType == TxoutType::PUBKEY) { + switch (whichType) { + case TxoutType::PUBKEY: { CPubKey pubKey(vSolutions[0]); if (!pubKey.IsValid()) return false; @@ -190,26 +190,28 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) addressRet = PKHash(pubKey); return true; } - else if (whichType == TxoutType::PUBKEYHASH) - { + case TxoutType::PUBKEYHASH: { addressRet = PKHash(uint160(vSolutions[0])); return true; } - else if (whichType == TxoutType::SCRIPTHASH) - { + case TxoutType::SCRIPTHASH: { addressRet = ScriptHash(uint160(vSolutions[0])); return true; - } else if (whichType == TxoutType::WITNESS_V0_KEYHASH) { + } + case TxoutType::WITNESS_V0_KEYHASH: { WitnessV0KeyHash hash; std::copy(vSolutions[0].begin(), vSolutions[0].end(), hash.begin()); addressRet = hash; return true; - } else if (whichType == TxoutType::WITNESS_V0_SCRIPTHASH) { + } + case TxoutType::WITNESS_V0_SCRIPTHASH: { WitnessV0ScriptHash hash; std::copy(vSolutions[0].begin(), vSolutions[0].end(), hash.begin()); addressRet = hash; return true; - } else if (whichType == TxoutType::WITNESS_UNKNOWN || whichType == TxoutType::WITNESS_V1_TAPROOT) { + } + case TxoutType::WITNESS_UNKNOWN: + case TxoutType::WITNESS_V1_TAPROOT: { WitnessUnknown unk; unk.version = vSolutions[0][0]; std::copy(vSolutions[1].begin(), vSolutions[1].end(), unk.program); @@ -217,8 +219,13 @@ bool ExtractDestination(const CScript& scriptPubKey, CTxDestination& addressRet) addressRet = unk; return true; } - // Multisig txns have more than one address... - return false; + case TxoutType::MULTISIG: + // Multisig txns have more than one address... + case TxoutType::NULL_DATA: + case TxoutType::NONSTANDARD: + return false; + } // no default case, so the compiler can warn about missing cases + assert(false); } bool ExtractDestinations(const CScript& scriptPubKey, TxoutType& typeRet, std::vector<CTxDestination>& addressRet, int& nRequiredRet) diff --git a/src/test/base32_tests.cpp b/src/test/base32_tests.cpp index 3f7c5e99ee..3b44564ddb 100644 --- a/src/test/base32_tests.cpp +++ b/src/test/base32_tests.cpp @@ -17,7 +17,7 @@ BOOST_AUTO_TEST_CASE(base32_testvectors) static const std::string vstrIn[] = {"","f","fo","foo","foob","fooba","foobar"}; static const std::string vstrOut[] = {"","my======","mzxq====","mzxw6===","mzxw6yq=","mzxw6ytb","mzxw6ytboi======"}; static const std::string vstrOutNoPadding[] = {"","my","mzxq","mzxw6","mzxw6yq","mzxw6ytb","mzxw6ytboi"}; - for (unsigned int i=0; i<sizeof(vstrIn)/sizeof(vstrIn[0]); i++) + for (unsigned int i=0; i<std::size(vstrIn); i++) { std::string strEnc = EncodeBase32(vstrIn[i]); BOOST_CHECK_EQUAL(strEnc, vstrOut[i]); diff --git a/src/test/base64_tests.cpp b/src/test/base64_tests.cpp index bb8d102bd0..714fccffaa 100644 --- a/src/test/base64_tests.cpp +++ b/src/test/base64_tests.cpp @@ -16,7 +16,7 @@ BOOST_AUTO_TEST_CASE(base64_testvectors) { static const std::string vstrIn[] = {"","f","fo","foo","foob","fooba","foobar"}; static const std::string vstrOut[] = {"","Zg==","Zm8=","Zm9v","Zm9vYg==","Zm9vYmE=","Zm9vYmFy"}; - for (unsigned int i=0; i<sizeof(vstrIn)/sizeof(vstrIn[0]); i++) + for (unsigned int i=0; i<std::size(vstrIn); i++) { std::string strEnc = EncodeBase64(vstrIn[i]); BOOST_CHECK_EQUAL(strEnc, vstrOut[i]); diff --git a/src/test/cuckoocache_tests.cpp b/src/test/cuckoocache_tests.cpp index 75c7e47e64..35b66cfc53 100644 --- a/src/test/cuckoocache_tests.cpp +++ b/src/test/cuckoocache_tests.cpp @@ -1,15 +1,18 @@ // Copyright (c) 2012-2020 The Bitcoin Core developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <boost/test/unit_test.hpp> -#include <boost/thread/lock_types.hpp> -#include <boost/thread/shared_mutex.hpp> #include <cuckoocache.h> -#include <deque> #include <random.h> #include <script/sigcache.h> #include <test/util/setup_common.h> + +#include <boost/test/unit_test.hpp> + +#include <deque> +#include <mutex> +#include <shared_mutex> #include <thread> +#include <vector> /** Test Suite for CuckooCache * @@ -201,11 +204,11 @@ static void test_cache_erase_parallel(size_t megabytes) * "future proofed". */ std::vector<uint256> hashes_insert_copy = hashes; - boost::shared_mutex mtx; + std::shared_mutex mtx; { /** Grab lock to make sure we release inserts */ - boost::unique_lock<boost::shared_mutex> l(mtx); + std::unique_lock<std::shared_mutex> l(mtx); /** Insert the first half */ for (uint32_t i = 0; i < (n_insert / 2); ++i) set.insert(hashes_insert_copy[i]); @@ -219,7 +222,7 @@ static void test_cache_erase_parallel(size_t megabytes) /** Each thread is emplaced with x copy-by-value */ threads.emplace_back([&, x] { - boost::shared_lock<boost::shared_mutex> l(mtx); + std::shared_lock<std::shared_mutex> l(mtx); size_t ntodo = (n_insert/4)/3; size_t start = ntodo*x; size_t end = ntodo*(x+1); @@ -234,7 +237,7 @@ static void test_cache_erase_parallel(size_t megabytes) for (std::thread& t : threads) t.join(); /** Grab lock to make sure we observe erases */ - boost::unique_lock<boost::shared_mutex> l(mtx); + std::unique_lock<std::shared_mutex> l(mtx); /** Insert the second half */ for (uint32_t i = (n_insert / 2); i < n_insert; ++i) set.insert(hashes_insert_copy[i]); diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index cf6009d591..0d480e35ea 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -85,7 +85,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) // Mock an outbound peer CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::OUTBOUND_FULL_RELAY); + CNode dummyNode1(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr1, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false); dummyNode1.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); @@ -136,7 +136,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerManager &peerLogic, CConnmanTest* connman) { CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE); - vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr, 0, 0, CAddress(), "", ConnectionType::OUTBOUND_FULL_RELAY)); + vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false)); CNode &node = *vNodes.back(); node.SetCommonVersion(PROTOCOL_VERSION); @@ -229,7 +229,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) banman->ClearBanned(); CAddress addr1(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode1(id++, NODE_NETWORK, INVALID_SOCKET, addr1, 0, 0, CAddress(), "", ConnectionType::INBOUND); + CNode dummyNode1(id++, NODE_NETWORK, INVALID_SOCKET, addr1, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false); dummyNode1.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode1); dummyNode1.fSuccessfullyConnected = true; @@ -242,7 +242,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) BOOST_CHECK(!banman->IsDiscouraged(ip(0xa0b0c001|0x0000ff00))); // Different IP, not discouraged CAddress addr2(ip(0xa0b0c002), NODE_NONE); - CNode dummyNode2(id++, NODE_NETWORK, INVALID_SOCKET, addr2, 1, 1, CAddress(), "", ConnectionType::INBOUND); + CNode dummyNode2(id++, NODE_NETWORK, INVALID_SOCKET, addr2, /* nKeyedNetGroupIn */ 1, /* nLocalHostNonceIn */ 1, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false); dummyNode2.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode2); dummyNode2.fSuccessfullyConnected = true; @@ -279,7 +279,7 @@ BOOST_AUTO_TEST_CASE(DoS_bantime) SetMockTime(nStartTime); // Overrides future calls to GetTime() CAddress addr(ip(0xa0b0c001), NODE_NONE); - CNode dummyNode(id++, NODE_NETWORK, INVALID_SOCKET, addr, 4, 4, CAddress(), "", ConnectionType::INBOUND); + CNode dummyNode(id++, NODE_NETWORK, INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 4, /* nLocalHostNonceIn */ 4, CAddress(), /* pszDest */ "", ConnectionType::INBOUND, /* inbound_onion */ false); dummyNode.SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(&dummyNode); dummyNode.fSuccessfullyConnected = true; diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 20132d5782..acbd6a01ee 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -65,7 +65,7 @@ std::string UseHInsteadOfApostrophe(const std::string& desc) const std::set<std::vector<uint32_t>> ONLY_EMPTY{{}}; -void DoCheck(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, +void DoCheck(const std::string& prv, const std::string& pub, const std::string& norm_prv, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, bool replace_apostrophe_with_h_in_prv=false, bool replace_apostrophe_with_h_in_pub=false) { FlatSigningProvider keys_priv, keys_pub; @@ -112,6 +112,17 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st BOOST_CHECK(EqualDescriptor(prv, prv1)); BOOST_CHECK(!parse_pub->ToPrivateString(keys_pub, prv1)); + // Check that private can produce the normalized descriptors + std::string norm1; + BOOST_CHECK(parse_priv->ToNormalizedString(keys_priv, norm1, false)); + BOOST_CHECK(EqualDescriptor(norm1, norm_pub)); + BOOST_CHECK(parse_pub->ToNormalizedString(keys_priv, norm1, false)); + BOOST_CHECK(EqualDescriptor(norm1, norm_pub)); + BOOST_CHECK(parse_priv->ToNormalizedString(keys_priv, norm1, true)); + BOOST_CHECK(EqualDescriptor(norm1, norm_prv)); + BOOST_CHECK(parse_pub->ToNormalizedString(keys_priv, norm1, true)); + BOOST_CHECK(EqualDescriptor(norm1, norm_prv)); + // Check whether IsRange on both returns the expected result BOOST_CHECK_EQUAL(parse_pub->IsRange(), (flags & RANGE) != 0); BOOST_CHECK_EQUAL(parse_priv->IsRange(), (flags & RANGE) != 0); @@ -251,29 +262,29 @@ void DoCheck(const std::string& prv, const std::string& pub, int flags, const st BOOST_CHECK_MESSAGE(left_paths.empty(), "Not all expected key paths found: " + prv); } -void Check(const std::string& prv, const std::string& pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY) +void Check(const std::string& prv, const std::string& pub, const std::string& norm_prv, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const Optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY) { bool found_apostrophes_in_prv = false; bool found_apostrophes_in_pub = false; // Do not replace apostrophes with 'h' in prv and pub - DoCheck(prv, pub, flags, scripts, type, paths); + DoCheck(prv, pub, norm_prv, norm_pub, flags, scripts, type, paths); // Replace apostrophes with 'h' in prv but not in pub, if apostrophes are found in prv if (prv.find('\'') != std::string::npos) { found_apostrophes_in_prv = true; - DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false); + DoCheck(prv, pub, norm_prv, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */false); } // Replace apostrophes with 'h' in pub but not in prv, if apostrophes are found in pub if (pub.find('\'') != std::string::npos) { found_apostrophes_in_pub = true; - DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true); + DoCheck(prv, pub, norm_prv, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */false, /*replace_apostrophe_with_h_in_pub = */true); } // Replace apostrophes with 'h' both in prv and in pub, if apostrophes are found in both if (found_apostrophes_in_prv && found_apostrophes_in_pub) { - DoCheck(prv, pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true); + DoCheck(prv, pub, norm_prv, norm_pub, flags, scripts, type, paths, /* replace_apostrophe_with_h_in_prv = */true, /*replace_apostrophe_with_h_in_pub = */true); } } @@ -284,50 +295,51 @@ BOOST_FIXTURE_TEST_SUITE(descriptor_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(descriptor_test) { // Basic single-key compressed - Check("combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac","76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac","00149a1c78a507689f6f54b847ad1cef1e614ee23f1e","a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, nullopt); - Check("pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac"}}, nullopt); - Check("pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac"}}, OutputType::LEGACY, {{1,0x80000002UL,3,0x80000004UL}}); - Check("wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"00149a1c78a507689f6f54b847ad1cef1e614ee23f1e"}}, OutputType::BECH32); - Check("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, OutputType::P2SH_SEGWIT); + Check("combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "combo(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "combo(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac","76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac","00149a1c78a507689f6f54b847ad1cef1e614ee23f1e","a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, nullopt); + Check("pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bdac"}}, nullopt); + Check("pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "pkh([deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"76a9149a1c78a507689f6f54b847ad1cef1e614ee23f1e88ac"}}, OutputType::LEGACY, {{1,0x80000002UL,3,0x80000004UL}}); + Check("wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"00149a1c78a507689f6f54b847ad1cef1e614ee23f1e"}}, OutputType::BECH32); + Check("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a91484ab21b1b2fd065d4504ff693d832434b6108d7b87"}}, OutputType::P2SH_SEGWIT); CheckUnparsable("sh(wpkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY2))", "sh(wpkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5))", "Pubkey '03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5' is invalid"); // Invalid pubkey CheckUnparsable("pkh(deadbeef/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh(deadbeef/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "Key origin start '[ character expected but not found, got 'd' instead"); // Missing start bracket in key origin CheckUnparsable("pkh([deadbeef]/1/2'/3/4']L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "pkh([deadbeef]/1/2'/3/4']03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "Multiple ']' characters found for a single pubkey"); // Multiple end brackets in key origin // Basic single-key uncompressed - Check("combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac","76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, nullopt); - Check("pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac"}}, nullopt); - Check("pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, OutputType::LEGACY); + Check("combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "combo(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "combo(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)",SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac","76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, nullopt); + Check("pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235ac"}}, nullopt); + Check("pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "pkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "pkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"76a914b5bd079c4d57cc7fc28ecf8213a6b791625b818388ac"}}, OutputType::LEGACY); CheckUnparsable("wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "Uncompressed keys are not allowed"); // No uncompressed keys in witness CheckUnparsable("wsh(pk(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "wsh(pk(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "Uncompressed keys are not allowed"); // No uncompressed keys in witness CheckUnparsable("sh(wpkh(5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss))", "sh(wpkh(04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235))", "Uncompressed keys are not allowed"); // No uncompressed keys in witness // Some unconventional single-key constructions - Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}}, OutputType::LEGACY); - Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}}, OutputType::LEGACY); - Check("wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa"}}, OutputType::BECH32); - Check("wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b"}}, OutputType::BECH32); - Check("sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a91472d0c5a3bfad8c3e7bd5303a72b94240e80b6f1787"}}, OutputType::P2SH_SEGWIT); - Check("sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a914b61b92e2ca21bac1e72a3ab859a742982bea960a87"}}, OutputType::P2SH_SEGWIT); + Check("sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "sh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141857af51a5e516552b3086430fd8ce55f7c1a52487"}}, OutputType::LEGACY); + Check("sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "sh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "sh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"a9141a31ad23bf49c247dd531a623c2ef57da3c400c587"}}, OutputType::LEGACY); + Check("wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"00202e271faa2325c199d25d22e1ead982e45b64eeb4f31e73dbdf41bd4b5fec23fa"}}, OutputType::BECH32); + Check("wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", "wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))", "wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", SIGNABLE, {{"0020338e023079b91c58571b20e602d7805fb808c22473cbc391a41b1bd3a192e75b"}}, OutputType::BECH32); + Check("sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", "sh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a91472d0c5a3bfad8c3e7bd5303a72b94240e80b6f1787"}}, OutputType::P2SH_SEGWIT); + Check("sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", "sh(wsh(pkh(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "sh(wsh(pkh(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", SIGNABLE, {{"a914b61b92e2ca21bac1e72a3ab859a742982bea960a87"}}, OutputType::P2SH_SEGWIT); // Versions with BIP32 derivations - Check("combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", SIGNABLE, {{"2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac","76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac","001431a507b815593dfc51ffc7245ae7e5aee304246e","a9142aafb926eb247cb18240a7f4c07983ad1f37922687"}}, nullopt); - Check("pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", DEFAULT, {{"210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac"}}, nullopt, {{0}}); - Check("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, OutputType::LEGACY, {{0xFFFFFFFFUL,0}}); - Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, OutputType::BECH32, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}}); - Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED | DERIVE_HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, OutputType::P2SH_SEGWIT, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); - Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, nullopt, {{0}, {1}}); + Check("combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", "combo([01234567]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([01234567]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", SIGNABLE, {{"2102d2b36900396c9282fa14628566582f206a5dd0bcc8d5e892611806cafb0301f0ac","76a91431a507b815593dfc51ffc7245ae7e5aee304246e88ac","001431a507b815593dfc51ffc7245ae7e5aee304246e","a9142aafb926eb247cb18240a7f4c07983ad1f37922687"}}, nullopt); + Check("pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", "pk(xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0)", "pk(xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0)", DEFAULT, {{"210379e45b3cf75f9c5f9befd8e9506fb962f6a9d185ac87001ec44a8d3df8d4a9e3ac"}}, nullopt, {{0}}); + Check("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", "pkh([bd16bee5/2147483647']xprv9vHkqa6XAPwKqSKSEJMcAB3yoCZhaSVsGZbSkFY5L3Lfjjk8sjZucbsbvEw5o3QrSA69nPfZDCgFnNnLhQ2ohpZuwummndnPasDw2Qr6dC2/0)", "pkh([bd16bee5/2147483647']xpub69H7F5dQzmVd3vPuLKtcXJziMEQByuDidnX3YdwgtNsecY5HRGtAAQC5mXTt4dsv9RzyjgDjAQs9VGVV6ydYCHnprc9vvaA5YtqWyL6hyds/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, OutputType::LEGACY, {{0xFFFFFFFFUL,0}}); + Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", "wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, OutputType::BECH32, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}}); + Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED | DERIVE_HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, OutputType::P2SH_SEGWIT, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); + Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", "combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, nullopt, {{0}, {1}}); CheckUnparsable("combo([012345678]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([012345678]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", "Fingerprint is not 4 bytes (9 characters instead of 8 characters)"); // Too long key fingerprint CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483648)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483648)", "Key path value 2147483648 is out of range"); // BIP 32 path element overflow CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/1aa)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1aa)", "Key path value '1aa' is not a valid uint32"); // Path is not valid uint + Check("pkh([01234567/10/20]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0)", "pkh([01234567/10/20]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0)", "pkh([01234567/10/20/2147483647']xprv9vHkqa6XAPwKqSKSEJMcAB3yoCZhaSVsGZbSkFY5L3Lfjjk8sjZucbsbvEw5o3QrSA69nPfZDCgFnNnLhQ2ohpZuwummndnPasDw2Qr6dC2/0)", "pkh([01234567/10/20/2147483647']xpub69H7F5dQzmVd3vPuLKtcXJziMEQByuDidnX3YdwgtNsecY5HRGtAAQC5mXTt4dsv9RzyjgDjAQs9VGVV6ydYCHnprc9vvaA5YtqWyL6hyds/0)", HARDENED, {{"76a914ebdc90806a9c4356c1c88e42216611e1cb4c1c1788ac"}}, OutputType::LEGACY, {{10, 20, 0xFFFFFFFFUL, 0}}); // Multisig constructions - Check("multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); - Check("sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); - Check("sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); - Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); - Check("sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", RANGE, {{"5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae"}, {"52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae"}, {"5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae"}}, nullopt, {{0}, {1}, {2}, {0, 0, 0}, {0, 0, 1}, {0, 0, 2}}); - Check("wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", HARDENED | RANGE | DERIVE_HARDENED, {{"0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f"},{"002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203"},{"0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c"}}, OutputType::BECH32, {{0xFFFFFFFFUL,0}, {1,2,0}, {1,2,1}, {1,2,2}, {10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); - Check("sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", SIGNABLE, {{"a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87"}}, OutputType::P2SH_SEGWIT); + Check("multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "multi(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "multi(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); + Check("sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", "sortedmulti(1,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss)", "sortedmulti(1,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); + Check("sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", "sortedmulti(1,5KYZdUEo39z3FPrtuX2QbbwGnNP5zTd7yyr2SC1j299sBCnWjss,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)", "sortedmulti(1,04a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea235,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", SIGNABLE, {{"512103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd4104a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd5b8dec5235a0fa8722476c7709c02559e3aa73aa03918ba2d492eea75abea23552ae"}}, nullopt); + Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", "sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); + Check("sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", "sortedmulti(2,xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc/*,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0/0/*)", "sortedmulti(2,xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL/*,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0/0/*)", RANGE, {{"5221025d5fc65ebb8d44a5274b53bac21ff8307fec2334a32df05553459f8b1f7fe1b62102fbd47cc8034098f0e6a94c6aeee8528abf0a2153a5d8e46d325b7284c046784652ae"}, {"52210264fd4d1f5dea8ded94c61e9641309349b62f27fbffe807291f664e286bfbe6472103f4ece6dfccfa37b211eb3d0af4d0c61dba9ef698622dc17eecdf764beeb005a652ae"}, {"5221022ccabda84c30bad578b13c89eb3b9544ce149787e5b538175b1d1ba259cbb83321024d902e1a2fc7a8755ab5b694c575fce742c48d9ff192e63df5193e4c7afe1f9c52ae"}}, nullopt, {{0}, {1}, {2}, {0, 0, 0}, {0, 0, 1}, {0, 0, 2}}); + Check("wsh(multi(2,xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "wsh(multi(2,[bd16bee5/2147483647']xprv9vHkqa6XAPwKqSKSEJMcAB3yoCZhaSVsGZbSkFY5L3Lfjjk8sjZucbsbvEw5o3QrSA69nPfZDCgFnNnLhQ2ohpZuwummndnPasDw2Qr6dC2/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[bd16bee5/2147483647']xpub69H7F5dQzmVd3vPuLKtcXJziMEQByuDidnX3YdwgtNsecY5HRGtAAQC5mXTt4dsv9RzyjgDjAQs9VGVV6ydYCHnprc9vvaA5YtqWyL6hyds/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", HARDENED | RANGE | DERIVE_HARDENED, {{"0020b92623201f3bb7c3771d45b2ad1d0351ea8fbf8cfe0a0e570264e1075fa1948f"},{"002036a08bbe4923af41cf4316817c93b8d37e2f635dd25cfff06bd50df6ae7ea203"},{"0020a96e7ab4607ca6b261bfe3245ffda9c746b28d3f59e83d34820ec0e2b36c139c"}}, OutputType::BECH32, {{0xFFFFFFFFUL,0}, {1,2,0}, {1,2,1}, {1,2,2}, {10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); + Check("sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", "sh(wsh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9)))","sh(wsh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232)))", SIGNABLE, {{"a9147fc63e13dc25e8a95a3cee3d9a714ac3afd96f1e87"}}, OutputType::P2SH_SEGWIT); CheckUnparsable("sh(multi(16,KzoAz5CanayRKex3fSLQ2BwJpN7U52gZvxMyk78nDMHuqrUxuSJy,KwGNz6YCCQtYvFzMtrC6D3tKTKdBBboMrLTsjr2NYVBwapCkn7Mr,KxogYhiNfwxuswvXV66eFyKcCpm7dZ7TqHVqujHAVUjJxyivxQ9X,L2BUNduTSyZwZjwNHynQTF14mv2uz2NRq5n5sYWTb4FkkmqgEE9f,L1okJGHGn1kFjdXHKxXjwVVtmCMR2JA5QsbKCSpSb7ReQjezKeoD,KxDCNSST75HFPaW5QKpzHtAyaCQC7p9Vo3FYfi2u4dXD1vgMiboK,L5edQjFtnkcf5UWURn6UuuoFrabgDQUHdheKCziwN42aLwS3KizU,KzF8UWFcEC7BYTq8Go1xVimMkDmyNYVmXV5PV7RuDicvAocoPB8i,L3nHUboKG2w4VSJ5jYZ5CBM97oeK6YuKvfZxrefdShECcjEYKMWZ,KyjHo36dWkYhimKmVVmQTq3gERv3pnqA4xFCpvUgbGDJad7eS8WE,KwsfyHKRUTZPQtysN7M3tZ4GXTnuov5XRgjdF2XCG8faAPmFruRF,KzCUbGhN9LJhdeFfL9zQgTJMjqxdBKEekRGZX24hXdgCNCijkkap,KzgpMBwwsDLwkaC5UrmBgCYaBD2WgZ7PBoGYXR8KT7gCA9UTN5a3,KyBXTPy4T7YG4q9tcAM3LkvfRpD1ybHMvcJ2ehaWXaSqeGUxEdkP,KzJDe9iwJRPtKP2F2AoN6zBgzS7uiuAwhWCfGdNeYJ3PC1HNJ8M8,L1xbHrxynrqLKkoYc4qtoQPx6uy5qYXR5ZDYVYBSRmCV5piU3JG9))","sh(multi(16,03669b8afcec803a0d323e9a17f3ea8e68e8abe5a278020a929adbec52421adbd0,0260b2003c386519fc9eadf2b5cf124dd8eea4c4e68d5e154050a9346ea98ce600,0362a74e399c39ed5593852a30147f2959b56bb827dfa3e60e464b02ccf87dc5e8,0261345b53de74a4d721ef877c255429961b7e43714171ac06168d7e08c542a8b8,02da72e8b46901a65d4374fe6315538d8f368557dda3a1dcf9ea903f3afe7314c8,0318c82dd0b53fd3a932d16e0ba9e278fcc937c582d5781be626ff16e201f72286,0297ccef1ef99f9d73dec9ad37476ddb232f1238aff877af19e72ba04493361009,02e502cfd5c3f972fe9a3e2a18827820638f96b6f347e54d63deb839011fd5765d,03e687710f0e3ebe81c1037074da939d409c0025f17eb86adb9427d28f0f7ae0e9,02c04d3a5274952acdbc76987f3184b346a483d43be40874624b29e3692c1df5af,02ed06e0f418b5b43a7ec01d1d7d27290fa15f75771cb69b642a51471c29c84acd,036d46073cbb9ffee90473f3da429abc8de7f8751199da44485682a989a4bebb24,02f5d1ff7c9029a80a4e36b9a5497027ef7f3e73384a4a94fbfe7c4e9164eec8bc,02e41deffd1b7cce11cde209a781adcffdabd1b91c0ba0375857a2bfd9302419f3,02d76625f7956a7fc505ab02556c23ee72d832f1bac391bcd2d3abce5710a13d06,0399eb0a5487515802dc14544cf10b3666623762fbed2ec38a3975716e2c29c232))", "P2SH script is too large, 547 bytes is larger than 520 bytes"); // P2SH does not fit 16 compressed pubkeys in a redeemscript CheckUnparsable("wsh(multi(2,[aaaaaaaa][aaaaaaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[aaaaaaaa][aaaaaaaa]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "Multiple ']' characters found for a single pubkey"); // Double key origin descriptor CheckUnparsable("wsh(multi(2,[aaaagaaa]xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483647'/0,xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*,xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "wsh(multi(2,[aaagaaaa]xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483647'/0,xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*,xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "Fingerprint 'aaagaaaa' is not hex"); // Non hex fingerprint @@ -350,8 +362,8 @@ BOOST_AUTO_TEST_CASE(descriptor_test) CheckUnparsable("wsh(wsh(pk(L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)))", "wsh(wsh(pk(03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)))", "Cannot have wsh within wsh"); // Cannot embed P2WSH inside P2WSH // Checksums - Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); - Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); + Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", "sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfy", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5t", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); + Check("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", "sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))", DEFAULT, {{"a91445a9a622a8b0a1269944be477640eedc447bbd8487"}}, OutputType::LEGACY, {{0x8000006FUL,222},{0}}); CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#", "Expected 8 character checksum, not 0 characters"); // Empty checksum CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxfyq", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5tq", "Expected 8 character checksum, not 9 characters"); // Too long checksum CheckUnparsable("sh(multi(2,[00000000/111'/222]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc,xprv9uPDJpEQgRQfDcW7BkF7eTya6RPxXeJCqCJGHuCJ4GiRVLzkTXBAJMu2qaMWPrS7AANYqdq6vcBcBUdJCVVFceUvJFjaPdGZ2y9WACViL4L/0))#ggrsrxf", "sh(multi(2,[00000000/111'/222]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL,xpub68NZiKmJWnxxS6aaHmn81bvJeTESw724CRDs6HbuccFQN9Ku14VQrADWgqbhhTHBaohPX4CjNLf9fq9MYo6oDaPPLPxSb7gwQN3ih19Zm4Y/0))#tjg09x5", "Expected 8 character checksum, not 7 characters"); // Too short checksum diff --git a/src/test/fs_tests.cpp b/src/test/fs_tests.cpp index ec487aa3ff..e52cd5230c 100644 --- a/src/test/fs_tests.cpp +++ b/src/test/fs_tests.cpp @@ -5,6 +5,7 @@ #include <fs.h> #include <test/util/setup_common.h> #include <util/system.h> +#include <util/getuniquepath.h> #include <boost/test/unit_test.hpp> @@ -69,6 +70,21 @@ BOOST_AUTO_TEST_CASE(fsbridge_fstream) BOOST_CHECK_EQUAL(tmpfile1, fsbridge::AbsPathJoin(tmpfile1, "")); BOOST_CHECK_EQUAL(tmpfile1, fsbridge::AbsPathJoin(tmpfile1, {})); } + { + fs::path p1 = GetUniquePath(tmpfolder); + fs::path p2 = GetUniquePath(tmpfolder); + fs::path p3 = GetUniquePath(tmpfolder); + + // Ensure that the parent path is always the same. + BOOST_CHECK_EQUAL(tmpfolder, p1.parent_path()); + BOOST_CHECK_EQUAL(tmpfolder, p2.parent_path()); + BOOST_CHECK_EQUAL(tmpfolder, p3.parent_path()); + + // Ensure that generated paths are actually different. + BOOST_CHECK(p1 != p2); + BOOST_CHECK(p2 != p3); + BOOST_CHECK(p1 != p3); + } } -BOOST_AUTO_TEST_SUITE_END() +BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp index c2bb3a1a4e..17ac48fca7 100644 --- a/src/test/fuzz/crypto.cpp +++ b/src/test/fuzz/crypto.cpp @@ -4,7 +4,6 @@ #include <crypto/hmac_sha256.h> #include <crypto/hmac_sha512.h> -#include <crypto/muhash.h> #include <crypto/ripemd160.h> #include <crypto/sha1.h> #include <crypto/sha256.h> @@ -36,7 +35,6 @@ FUZZ_TARGET(crypto) CSHA512 sha512; SHA3_256 sha3; CSipHasher sip_hasher{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>()}; - MuHash3072 muhash; while (fuzzed_data_provider.ConsumeBool()) { CallOneOf( @@ -63,12 +61,6 @@ FUZZ_TARGET(crypto) (void)Hash(data); (void)Hash160(data); (void)sha512.Size(); - - if (fuzzed_data_provider.ConsumeBool()) { - muhash *= MuHash3072(data); - } else { - muhash /= MuHash3072(data); - } }, [&] { (void)hash160.Reset(); @@ -78,7 +70,6 @@ FUZZ_TARGET(crypto) (void)sha256.Reset(); (void)sha3.Reset(); (void)sha512.Reset(); - muhash = MuHash3072(); }, [&] { CallOneOf( @@ -122,10 +113,6 @@ FUZZ_TARGET(crypto) [&] { data.resize(SHA3_256::OUTPUT_SIZE); sha3.Finalize(data); - }, - [&] { - uint256 out; - muhash.Finalize(out); }); }); } diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 74dec6475e..ba5f0c1a75 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -30,8 +30,6 @@ #include <stdint.h> #include <unistd.h> -#include <vector> - #include <test/fuzz/fuzz.h> void initialize_deserialize() @@ -71,7 +69,7 @@ T Deserialize(CDataStream ds) } template <typename T> -void DeserializeFromFuzzingInput(const std::vector<uint8_t>& buffer, T& obj, const Optional<int> protocol_version = nullopt) +void DeserializeFromFuzzingInput(FuzzBufferType buffer, T& obj, const Optional<int> protocol_version = nullopt) { CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION); if (protocol_version) { diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp index fd87667755..edb270d437 100644 --- a/src/test/fuzz/fuzz.cpp +++ b/src/test/fuzz/fuzz.cpp @@ -13,15 +13,15 @@ const std::function<void(const std::string&)> G_TEST_LOG_FUN{}; -std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize>>& FuzzTargets() +std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize, TypeHidden>>& FuzzTargets() { - static std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize>> g_fuzz_targets; + static std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize, TypeHidden>> g_fuzz_targets; return g_fuzz_targets; } -void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, TypeInitialize init) +void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, TypeInitialize init, TypeHidden hidden) { - const auto it_ins = FuzzTargets().try_emplace(name, std::move(target), std::move(init)); + const auto it_ins = FuzzTargets().try_emplace(name, std::move(target), std::move(init), hidden); Assert(it_ins.second); } @@ -31,6 +31,7 @@ void initialize() { if (std::getenv("PRINT_ALL_FUZZ_TARGETS_AND_ABORT")) { for (const auto& t : FuzzTargets()) { + if (std::get<2>(t.second)) continue; std::cout << t.first << std::endl; } Assert(false); @@ -59,8 +60,7 @@ static bool read_stdin(std::vector<uint8_t>& data) extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) { static const auto& test_one_input = *Assert(g_test_one_input); - const std::vector<uint8_t> input(data, data + size); - test_one_input(input); + test_one_input({data, size}); return 0; } @@ -72,7 +72,7 @@ extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) } #if defined(PROVIDE_MAIN_FUNCTION) -__attribute__((weak)) int main(int argc, char** argv) +int main(int argc, char** argv) { initialize(); static const auto& test_one_input = *Assert(g_test_one_input); diff --git a/src/test/fuzz/fuzz.h b/src/test/fuzz/fuzz.h index 52841e069a..4abc52c15a 100644 --- a/src/test/fuzz/fuzz.h +++ b/src/test/fuzz/fuzz.h @@ -5,29 +5,36 @@ #ifndef BITCOIN_TEST_FUZZ_FUZZ_H #define BITCOIN_TEST_FUZZ_FUZZ_H +#include <span.h> + #include <cstdint> #include <functional> #include <string_view> -#include <vector> -using TypeTestOneInput = std::function<void(const std::vector<uint8_t>&)>; +using FuzzBufferType = Span<const uint8_t>; + +using TypeTestOneInput = std::function<void(FuzzBufferType)>; using TypeInitialize = std::function<void()>; +using TypeHidden = bool; -void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, TypeInitialize init); +void FuzzFrameworkRegisterTarget(std::string_view name, TypeTestOneInput target, TypeInitialize init, TypeHidden hidden); -inline void FuzzFrameworkEmptyFun() {} +inline void FuzzFrameworkEmptyInitFun() {} #define FUZZ_TARGET(name) \ - FUZZ_TARGET_INIT(name, FuzzFrameworkEmptyFun) - -#define FUZZ_TARGET_INIT(name, init_fun) \ - void name##_fuzz_target(const std::vector<uint8_t>&); \ - struct name##_Before_Main { \ - name##_Before_Main() \ - { \ - FuzzFrameworkRegisterTarget(#name, name##_fuzz_target, init_fun); \ - } \ - } const static g_##name##_before_main; \ - void name##_fuzz_target(const std::vector<uint8_t>& buffer) + FUZZ_TARGET_INIT(name, FuzzFrameworkEmptyInitFun) + +#define FUZZ_TARGET_INIT(name, init_fun) \ + FUZZ_TARGET_INIT_HIDDEN(name, init_fun, false) + +#define FUZZ_TARGET_INIT_HIDDEN(name, init_fun, hidden) \ + void name##_fuzz_target(FuzzBufferType); \ + struct name##_Before_Main { \ + name##_Before_Main() \ + { \ + FuzzFrameworkRegisterTarget(#name, name##_fuzz_target, init_fun, hidden); \ + } \ + } const static g_##name##_before_main; \ + void name##_fuzz_target(FuzzBufferType buffer) #endif // BITCOIN_TEST_FUZZ_FUZZ_H diff --git a/src/test/fuzz/muhash.cpp b/src/test/fuzz/muhash.cpp index 8f843ca773..2d761cef15 100644 --- a/src/test/fuzz/muhash.cpp +++ b/src/test/fuzz/muhash.cpp @@ -41,6 +41,11 @@ FUZZ_TARGET(muhash) muhash.Finalize(out2); assert(out == out2); + MuHash3072 muhash3; + muhash3 *= muhash; + uint256 out3; + muhash3.Finalize(out3); + assert(out == out3); // Test that removing all added elements brings the object back to it's initial state muhash /= muhash; @@ -50,4 +55,9 @@ FUZZ_TARGET(muhash) muhash2.Finalize(out2); assert(out == out2); + + muhash3.Remove(data); + muhash3.Remove(data2); + muhash3.Finalize(out3); + assert(out == out3); } diff --git a/src/test/fuzz/netaddress.cpp b/src/test/fuzz/netaddress.cpp index 6e9bb47ff6..a42080eb66 100644 --- a/src/test/fuzz/netaddress.cpp +++ b/src/test/fuzz/netaddress.cpp @@ -9,7 +9,6 @@ #include <cassert> #include <cstdint> -#include <netinet/in.h> #include <vector> FUZZ_TARGET(netaddress) diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index e7cc0f5297..442e32d4ca 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -30,14 +30,32 @@ #include <iostream> #include <memory> #include <string> -#include <vector> namespace { const TestingSetup* g_setup; } // namespace +size_t& GetNumMsgTypes() +{ + static size_t g_num_msg_types{0}; + return g_num_msg_types; +} +#define FUZZ_TARGET_MSG(msg_type) \ + struct msg_type##_Count_Before_Main { \ + msg_type##_Count_Before_Main() \ + { \ + ++GetNumMsgTypes(); \ + } \ + } const static g_##msg_type##_count_before_main; \ + FUZZ_TARGET_INIT(process_message_##msg_type, initialize_process_message) \ + { \ + fuzz_target(buffer, #msg_type); \ + } + void initialize_process_message() { + Assert(GetNumMsgTypes() == getAllNetMessageTypes().size()); // If this fails, add or remove the message type below + static const auto testing_setup = MakeFuzzingContext<const TestingSetup>(); g_setup = testing_setup.get(); for (int i = 0; i < 2 * COINBASE_MATURITY; i++) { @@ -46,7 +64,7 @@ void initialize_process_message() SyncWithValidationInterfaceQueue(); } -void fuzz_target(const std::vector<uint8_t>& buffer, const std::string& LIMIT_TO_MESSAGE_TYPE) +void fuzz_target(FuzzBufferType buffer, const std::string& LIMIT_TO_MESSAGE_TYPE) { FuzzedDataProvider fuzzed_data_provider(buffer.data(), buffer.size()); @@ -61,7 +79,7 @@ void fuzz_target(const std::vector<uint8_t>& buffer, const std::string& LIMIT_TO } CNode& p2p_node = *ConsumeNodeAsUniquePtr(fuzzed_data_provider).release(); - const bool successfully_connected{true}; + const bool successfully_connected{fuzzed_data_provider.ConsumeBool()}; p2p_node.fSuccessfullyConnected = successfully_connected; connman.AddTestNode(p2p_node); g_setup->m_node.peerman->InitializeNode(&p2p_node); @@ -87,27 +105,37 @@ void fuzz_target(const std::vector<uint8_t>& buffer, const std::string& LIMIT_TO } FUZZ_TARGET_INIT(process_message, initialize_process_message) { fuzz_target(buffer, ""); } -FUZZ_TARGET_INIT(process_message_addr, initialize_process_message) { fuzz_target(buffer, "addr"); } -FUZZ_TARGET_INIT(process_message_block, initialize_process_message) { fuzz_target(buffer, "block"); } -FUZZ_TARGET_INIT(process_message_blocktxn, initialize_process_message) { fuzz_target(buffer, "blocktxn"); } -FUZZ_TARGET_INIT(process_message_cmpctblock, initialize_process_message) { fuzz_target(buffer, "cmpctblock"); } -FUZZ_TARGET_INIT(process_message_feefilter, initialize_process_message) { fuzz_target(buffer, "feefilter"); } -FUZZ_TARGET_INIT(process_message_filteradd, initialize_process_message) { fuzz_target(buffer, "filteradd"); } -FUZZ_TARGET_INIT(process_message_filterclear, initialize_process_message) { fuzz_target(buffer, "filterclear"); } -FUZZ_TARGET_INIT(process_message_filterload, initialize_process_message) { fuzz_target(buffer, "filterload"); } -FUZZ_TARGET_INIT(process_message_getaddr, initialize_process_message) { fuzz_target(buffer, "getaddr"); } -FUZZ_TARGET_INIT(process_message_getblocks, initialize_process_message) { fuzz_target(buffer, "getblocks"); } -FUZZ_TARGET_INIT(process_message_getblocktxn, initialize_process_message) { fuzz_target(buffer, "getblocktxn"); } -FUZZ_TARGET_INIT(process_message_getdata, initialize_process_message) { fuzz_target(buffer, "getdata"); } -FUZZ_TARGET_INIT(process_message_getheaders, initialize_process_message) { fuzz_target(buffer, "getheaders"); } -FUZZ_TARGET_INIT(process_message_headers, initialize_process_message) { fuzz_target(buffer, "headers"); } -FUZZ_TARGET_INIT(process_message_inv, initialize_process_message) { fuzz_target(buffer, "inv"); } -FUZZ_TARGET_INIT(process_message_mempool, initialize_process_message) { fuzz_target(buffer, "mempool"); } -FUZZ_TARGET_INIT(process_message_notfound, initialize_process_message) { fuzz_target(buffer, "notfound"); } -FUZZ_TARGET_INIT(process_message_ping, initialize_process_message) { fuzz_target(buffer, "ping"); } -FUZZ_TARGET_INIT(process_message_pong, initialize_process_message) { fuzz_target(buffer, "pong"); } -FUZZ_TARGET_INIT(process_message_sendcmpct, initialize_process_message) { fuzz_target(buffer, "sendcmpct"); } -FUZZ_TARGET_INIT(process_message_sendheaders, initialize_process_message) { fuzz_target(buffer, "sendheaders"); } -FUZZ_TARGET_INIT(process_message_tx, initialize_process_message) { fuzz_target(buffer, "tx"); } -FUZZ_TARGET_INIT(process_message_verack, initialize_process_message) { fuzz_target(buffer, "verack"); } -FUZZ_TARGET_INIT(process_message_version, initialize_process_message) { fuzz_target(buffer, "version"); } +FUZZ_TARGET_MSG(addr); +FUZZ_TARGET_MSG(addrv2); +FUZZ_TARGET_MSG(block); +FUZZ_TARGET_MSG(blocktxn); +FUZZ_TARGET_MSG(cfcheckpt); +FUZZ_TARGET_MSG(cfheaders); +FUZZ_TARGET_MSG(cfilter); +FUZZ_TARGET_MSG(cmpctblock); +FUZZ_TARGET_MSG(feefilter); +FUZZ_TARGET_MSG(filteradd); +FUZZ_TARGET_MSG(filterclear); +FUZZ_TARGET_MSG(filterload); +FUZZ_TARGET_MSG(getaddr); +FUZZ_TARGET_MSG(getblocks); +FUZZ_TARGET_MSG(getblocktxn); +FUZZ_TARGET_MSG(getcfcheckpt); +FUZZ_TARGET_MSG(getcfheaders); +FUZZ_TARGET_MSG(getcfilters); +FUZZ_TARGET_MSG(getdata); +FUZZ_TARGET_MSG(getheaders); +FUZZ_TARGET_MSG(headers); +FUZZ_TARGET_MSG(inv); +FUZZ_TARGET_MSG(mempool); +FUZZ_TARGET_MSG(merkleblock); +FUZZ_TARGET_MSG(notfound); +FUZZ_TARGET_MSG(ping); +FUZZ_TARGET_MSG(pong); +FUZZ_TARGET_MSG(sendaddrv2); +FUZZ_TARGET_MSG(sendcmpct); +FUZZ_TARGET_MSG(sendheaders); +FUZZ_TARGET_MSG(tx); +FUZZ_TARGET_MSG(verack); +FUZZ_TARGET_MSG(version); +FUZZ_TARGET_MSG(wtxidrelay); diff --git a/src/test/fuzz/process_messages.cpp b/src/test/fuzz/process_messages.cpp index 810f0aac92..ef45196671 100644 --- a/src/test/fuzz/process_messages.cpp +++ b/src/test/fuzz/process_messages.cpp @@ -46,7 +46,7 @@ FUZZ_TARGET_INIT(process_messages, initialize_process_messages) peers.push_back(ConsumeNodeAsUniquePtr(fuzzed_data_provider, i).release()); CNode& p2p_node = *peers.back(); - const bool successfully_connected{true}; + const bool successfully_connected{fuzzed_data_provider.ConsumeBool()}; p2p_node.fSuccessfullyConnected = successfully_connected; p2p_node.fPauseSend = false; g_setup->m_node.peerman->InitializeNode(&p2p_node); diff --git a/src/test/fuzz/script_assets_test_minimizer.cpp b/src/test/fuzz/script_assets_test_minimizer.cpp index 2091ad5d91..8d9a939dab 100644 --- a/src/test/fuzz/script_assets_test_minimizer.cpp +++ b/src/test/fuzz/script_assets_test_minimizer.cpp @@ -28,12 +28,12 @@ // // (normal build) // $ mkdir dump -// $ for N in $(seq 1 10); do TEST_DUMP_DIR=dump test/functional/feature_taproot --dumptests; done +// $ for N in $(seq 1 10); do TEST_DUMP_DIR=dump test/functional/feature_taproot.py --dumptests; done // $ ... // -// (fuzz test build) +// (libFuzzer build) // $ mkdir dump-min -// $ ./src/test/fuzz/script_assets_test_minimizer -merge=1 dump-min/ dump/ +// $ FUZZ=script_assets_test_minimizer ./src/test/fuzz/fuzz -merge=1 -use_value_profile=1 dump-min/ dump/ // $ (echo -en '[\n'; cat dump-min/* | head -c -2; echo -en '\n]') >script_assets_test.json namespace { @@ -190,7 +190,7 @@ ECCVerifyHandle handle; } // namespace -FUZZ_TARGET(script_assets_test_minimizer) +FUZZ_TARGET_INIT_HIDDEN(script_assets_test_minimizer, FuzzFrameworkEmptyInitFun, /* hidden */ true) { if (buffer.size() < 2 || buffer.back() != '\n' || buffer[buffer.size() - 2] != ',') return; const std::string str((const char*)buffer.data(), buffer.size() - 2); diff --git a/src/test/fuzz/string.cpp b/src/test/fuzz/string.cpp index 282a2cd8ca..ec8a3b23db 100644 --- a/src/test/fuzz/string.cpp +++ b/src/test/fuzz/string.cpp @@ -67,6 +67,7 @@ FUZZ_TARGET(string) } OutputType output_type; (void)ParseOutputType(random_string_1, output_type); + (void)RemovePrefix(random_string_1, random_string_2); (void)ResolveErrMsg(random_string_1, random_string_2); try { (void)RPCConvertNamedValues(random_string_1, random_string_vector); @@ -78,7 +79,9 @@ FUZZ_TARGET(string) } (void)SanitizeString(random_string_1); (void)SanitizeString(random_string_1, fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 3)); +#ifndef WIN32 (void)ShellEscape(random_string_1); +#endif // WIN32 int port_out; std::string host_out; SplitHostPort(random_string_1, port_out, host_out); diff --git a/src/test/fuzz/system.cpp b/src/test/fuzz/system.cpp index 47b38b6d23..3621702e45 100644 --- a/src/test/fuzz/system.cpp +++ b/src/test/fuzz/system.cpp @@ -54,7 +54,7 @@ FUZZ_TARGET(system) if (args_manager.GetArgFlags(argument_name) != nullopt) { return; } - args_manager.AddArg(argument_name, fuzzed_data_provider.ConsumeRandomLengthString(16), fuzzed_data_provider.ConsumeIntegral<unsigned int>(), options_category); + args_manager.AddArg(argument_name, fuzzed_data_provider.ConsumeRandomLengthString(16), fuzzed_data_provider.ConsumeIntegral<unsigned int>() & ~ArgsManager::COMMAND, options_category); }, [&] { // Avoid hitting: diff --git a/src/test/hash_tests.cpp b/src/test/hash_tests.cpp index 87f6470afa..41a626c0ea 100644 --- a/src/test/hash_tests.cpp +++ b/src/test/hash_tests.cpp @@ -107,14 +107,14 @@ BOOST_AUTO_TEST_CASE(siphash) // Check test vectors from spec, one byte at a time CSipHasher hasher2(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL); - for (uint8_t x=0; x<ARRAYLEN(siphash_4_2_testvec); ++x) + for (uint8_t x=0; x<std::size(siphash_4_2_testvec); ++x) { BOOST_CHECK_EQUAL(hasher2.Finalize(), siphash_4_2_testvec[x]); hasher2.Write(&x, 1); } // Check test vectors from spec, eight bytes at a time CSipHasher hasher3(0x0706050403020100ULL, 0x0F0E0D0C0B0A0908ULL); - for (uint8_t x=0; x<ARRAYLEN(siphash_4_2_testvec); x+=8) + for (uint8_t x=0; x<std::size(siphash_4_2_testvec); x+=8) { BOOST_CHECK_EQUAL(hasher3.Finalize(), siphash_4_2_testvec[x]); hasher3.Write(uint64_t(x)|(uint64_t(x+1)<<8)|(uint64_t(x+2)<<16)|(uint64_t(x+3)<<24)| diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index e967273636..aa628371e6 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -28,7 +28,7 @@ struct MinerTestingSetup : public TestingSetup { void TestPackageSelection(const CChainParams& chainparams, const CScript& scriptPubKey, const std::vector<CTransactionRef>& txFirst) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_node.mempool->cs); bool TestSequenceLocks(const CTransaction& tx, int flags) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_node.mempool->cs) { - return CheckSequenceLocks(*m_node.mempool, tx, flags); + return CheckSequenceLocks(::ChainstateActive(), *m_node.mempool, tx, flags); } BlockAssembler AssemblerForTest(const CChainParams& params); }; @@ -123,6 +123,7 @@ void MinerTestingSetup::TestPackageSelection(const CChainParams& chainparams, co m_node.mempool->addUnchecked(entry.Fee(50000).Time(GetTime()).SpendsCoinbase(false).FromTx(tx)); std::unique_ptr<CBlockTemplate> pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey); + BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 4U); BOOST_CHECK(pblocktemplate->block.vtx[1]->GetHash() == hashParentTx); BOOST_CHECK(pblocktemplate->block.vtx[2]->GetHash() == hashHighFeeTx); BOOST_CHECK(pblocktemplate->block.vtx[3]->GetHash() == hashMediumFeeTx); @@ -157,6 +158,7 @@ void MinerTestingSetup::TestPackageSelection(const CChainParams& chainparams, co hashLowFeeTx = tx.GetHash(); m_node.mempool->addUnchecked(entry.Fee(feeToUse+2).FromTx(tx)); pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey); + BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 6U); BOOST_CHECK(pblocktemplate->block.vtx[4]->GetHash() == hashFreeTx); BOOST_CHECK(pblocktemplate->block.vtx[5]->GetHash() == hashLowFeeTx); @@ -191,6 +193,7 @@ void MinerTestingSetup::TestPackageSelection(const CChainParams& chainparams, co tx.vout[0].nValue = 100000000 - 10000; // 10k satoshi fee m_node.mempool->addUnchecked(entry.Fee(10000).FromTx(tx)); pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey); + BOOST_REQUIRE_EQUAL(pblocktemplate->block.vtx.size(), 9U); BOOST_CHECK(pblocktemplate->block.vtx[8]->GetHash() == hashLowFeeTx2); } @@ -216,11 +219,10 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) // We can't make transactions until we have inputs // Therefore, load 110 blocks :) - static_assert(sizeof(blockinfo) / sizeof(*blockinfo) == 110, "Should have 110 blocks to import"); + static_assert(std::size(blockinfo) == 110, "Should have 110 blocks to import"); int baseheight = 0; std::vector<CTransactionRef> txFirst; - for (unsigned int i = 0; i < sizeof(blockinfo)/sizeof(*blockinfo); ++i) - { + for (const auto& bi : blockinfo) { CBlock *pblock = &pblocktemplate->block; // pointer for convenience { LOCK(cs_main); @@ -229,7 +231,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; txCoinbase.vin[0].scriptSig = CScript(); - txCoinbase.vin[0].scriptSig.push_back(blockinfo[i].extranonce); + txCoinbase.vin[0].scriptSig.push_back(bi.extranonce); txCoinbase.vin[0].scriptSig.push_back(::ChainActive().Height()); txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this) txCoinbase.vout[0].scriptPubKey = CScript(); @@ -239,7 +241,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) if (txFirst.size() < 4) txFirst.push_back(pblock->vtx[0]); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); - pblock->nNonce = blockinfo[i].nonce; + pblock->nNonce = bi.nonce; } std::shared_ptr<const CBlock> shared_pblock = std::make_shared<const CBlock>(*pblock); BOOST_CHECK(Assert(m_node.chainman)->ProcessNewBlock(chainparams, shared_pblock, true, nullptr)); @@ -435,7 +437,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) tx.nLockTime = 0; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); - BOOST_CHECK(CheckFinalTx(CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail BOOST_CHECK(SequenceLocks(CTransaction(tx), flags, prevheights, CreateBlockIndex(::ChainActive().Tip()->nHeight + 2))); // Sequence locks pass on 2nd block @@ -445,7 +447,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) prevheights[0] = baseheight + 2; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(CheckFinalTx(CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) @@ -461,7 +463,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) tx.nLockTime = ::ChainActive().Tip()->nHeight + 1; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(!CheckFinalTx(CTransaction(tx), flags)); // Locktime fails + BOOST_CHECK(!CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime fails BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass BOOST_CHECK(IsFinalTx(CTransaction(tx), ::ChainActive().Tip()->nHeight + 2, ::ChainActive().Tip()->GetMedianTimePast())); // Locktime passes on 2nd block @@ -472,7 +474,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) prevheights[0] = baseheight + 4; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(!CheckFinalTx(CTransaction(tx), flags)); // Locktime fails + BOOST_CHECK(!CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime fails BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass BOOST_CHECK(IsFinalTx(CTransaction(tx), ::ChainActive().Tip()->nHeight + 2, ::ChainActive().Tip()->GetMedianTimePast() + 1)); // Locktime passes 1 second later @@ -481,7 +483,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) prevheights[0] = ::ChainActive().Tip()->nHeight + 1; tx.nLockTime = 0; tx.vin[0].nSequence = 0; - BOOST_CHECK(CheckFinalTx(CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass tx.vin[0].nSequence = 1; BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index a1b41e17ed..1c7c35528e 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -192,14 +192,15 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) id++, NODE_NETWORK, hSocket, addr, /* nKeyedNetGroupIn = */ 0, /* nLocalHostNonceIn = */ 0, - CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY); + CAddress(), pszDest, ConnectionType::OUTBOUND_FULL_RELAY, + /* inbound_onion = */ false); BOOST_CHECK(pnode1->IsFullOutboundConn() == true); BOOST_CHECK(pnode1->IsManualConn() == false); BOOST_CHECK(pnode1->IsBlockOnlyConn() == false); BOOST_CHECK(pnode1->IsFeelerConn() == false); BOOST_CHECK(pnode1->IsAddrFetchConn() == false); BOOST_CHECK(pnode1->IsInboundConn() == false); - BOOST_CHECK(pnode1->IsInboundOnion() == false); + BOOST_CHECK(pnode1->m_inbound_onion == false); BOOST_CHECK_EQUAL(pnode1->ConnectedThroughNetwork(), Network::NET_IPV4); std::unique_ptr<CNode> pnode2 = MakeUnique<CNode>( @@ -214,7 +215,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) BOOST_CHECK(pnode2->IsFeelerConn() == false); BOOST_CHECK(pnode2->IsAddrFetchConn() == false); BOOST_CHECK(pnode2->IsInboundConn() == true); - BOOST_CHECK(pnode2->IsInboundOnion() == false); + BOOST_CHECK(pnode2->m_inbound_onion == false); BOOST_CHECK_EQUAL(pnode2->ConnectedThroughNetwork(), Network::NET_IPV4); std::unique_ptr<CNode> pnode3 = MakeUnique<CNode>( @@ -229,7 +230,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) BOOST_CHECK(pnode3->IsFeelerConn() == false); BOOST_CHECK(pnode3->IsAddrFetchConn() == false); BOOST_CHECK(pnode3->IsInboundConn() == false); - BOOST_CHECK(pnode3->IsInboundOnion() == false); + BOOST_CHECK(pnode3->m_inbound_onion == false); BOOST_CHECK_EQUAL(pnode3->ConnectedThroughNetwork(), Network::NET_IPV4); std::unique_ptr<CNode> pnode4 = MakeUnique<CNode>( @@ -244,7 +245,7 @@ BOOST_AUTO_TEST_CASE(cnode_simple_test) BOOST_CHECK(pnode4->IsFeelerConn() == false); BOOST_CHECK(pnode4->IsAddrFetchConn() == false); BOOST_CHECK(pnode4->IsInboundConn() == true); - BOOST_CHECK(pnode4->IsInboundOnion() == true); + BOOST_CHECK(pnode4->m_inbound_onion == true); BOOST_CHECK_EQUAL(pnode4->ConnectedThroughNetwork(), Network::NET_ONION); } @@ -679,7 +680,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) in_addr ipv4AddrPeer; ipv4AddrPeer.s_addr = 0xa0b0c001; CAddress addr = CAddress(CService(ipv4AddrPeer, 7777), NODE_NETWORK); - std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, INVALID_SOCKET, addr, 0, 0, CAddress{}, std::string{}, ConnectionType::OUTBOUND_FULL_RELAY); + std::unique_ptr<CNode> pnode = MakeUnique<CNode>(0, NODE_NETWORK, INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress{}, /* pszDest */ std::string{}, ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false); pnode->fSuccessfullyConnected.store(true); // the peer claims to be reaching us via IPv6 @@ -690,7 +691,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) pnode->SetAddrLocal(addrLocal); // before patch, this causes undefined behavior detectable with clang's -fsanitize=memory - AdvertiseLocal(&*pnode); + GetLocalAddrForPeer(&*pnode); // suppress no-checks-run warning; if this test fails, it's by triggering a sanitizer BOOST_CHECK(1); @@ -793,7 +794,7 @@ std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(const int n_c candidates.push_back({ /* id */ id, /* nTimeConnected */ static_cast<int64_t>(random_context.randrange(100)), - /* nMinPingUsecTime */ static_cast<int64_t>(random_context.randrange(100)), + /* m_min_ping_time */ static_cast<int64_t>(random_context.randrange(100)), /* nLastBlockTime */ static_cast<int64_t>(random_context.randrange(100)), /* nLastTXTime */ static_cast<int64_t>(random_context.randrange(100)), /* fRelevantServices */ random_context.randbool(), @@ -853,7 +854,7 @@ BOOST_AUTO_TEST_CASE(node_eviction_test) // from eviction. BOOST_CHECK(!IsEvicted( number_of_nodes, [](NodeEvictionCandidate& candidate) { - candidate.nMinPingUsecTime = candidate.id; + candidate.m_min_ping_time = candidate.id; }, {0, 1, 2, 3, 4, 5, 6, 7}, random_context)); @@ -900,7 +901,7 @@ BOOST_AUTO_TEST_CASE(node_eviction_test) BOOST_CHECK(!IsEvicted( number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { candidate.nKeyedNetGroup = number_of_nodes - candidate.id; // 4 protected - candidate.nMinPingUsecTime = candidate.id; // 8 protected + candidate.m_min_ping_time = candidate.id; // 8 protected candidate.nLastTXTime = number_of_nodes - candidate.id; // 4 protected candidate.nLastBlockTime = number_of_nodes - candidate.id; // 4 protected }, diff --git a/src/test/sanity_tests.cpp b/src/test/sanity_tests.cpp index 740b2c72db..3e4b963fe3 100644 --- a/src/test/sanity_tests.cpp +++ b/src/test/sanity_tests.cpp @@ -5,6 +5,7 @@ #include <compat/sanity.h> #include <key.h> #include <test/util/setup_common.h> +#include <util/time.h> #include <boost/test/unit_test.hpp> @@ -15,6 +16,7 @@ BOOST_AUTO_TEST_CASE(basic_sanity) BOOST_CHECK_MESSAGE(glibc_sanity_test() == true, "libc sanity test"); BOOST_CHECK_MESSAGE(glibcxx_sanity_test() == true, "stdlib sanity test"); BOOST_CHECK_MESSAGE(ECC_InitSanityCheck() == true, "secp256k1 sanity test"); + BOOST_CHECK_MESSAGE(ChronoSanityCheck() == true, "chrono epoch test"); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/script_standard_tests.cpp b/src/test/script_standard_tests.cpp index 366385b619..4dc0dd5f51 100644 --- a/src/test/script_standard_tests.cpp +++ b/src/test/script_standard_tests.cpp @@ -107,6 +107,22 @@ BOOST_AUTO_TEST_CASE(script_standard_Solver_success) BOOST_CHECK_EQUAL(solutions.size(), 1U); BOOST_CHECK(solutions[0] == ToByteVector(scriptHash)); + // TxoutType::WITNESS_V1_TAPROOT + s.clear(); + s << OP_1 << ToByteVector(uint256::ZERO); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::WITNESS_V1_TAPROOT); + BOOST_CHECK_EQUAL(solutions.size(), 2U); + BOOST_CHECK(solutions[0] == std::vector<unsigned char>{1}); + BOOST_CHECK(solutions[1] == ToByteVector(uint256::ZERO)); + + // TxoutType::WITNESS_UNKNOWN + s.clear(); + s << OP_16 << ToByteVector(uint256::ONE); + BOOST_CHECK_EQUAL(Solver(s, solutions), TxoutType::WITNESS_UNKNOWN); + BOOST_CHECK_EQUAL(solutions.size(), 2U); + BOOST_CHECK(solutions[0] == std::vector<unsigned char>{16}); + BOOST_CHECK(solutions[1] == ToByteVector(uint256::ONE)); + // TxoutType::NONSTANDARD s.clear(); s << OP_9 << OP_ADD << OP_11 << OP_EQUAL; diff --git a/src/test/scriptnum_tests.cpp b/src/test/scriptnum_tests.cpp index 281018be9f..746d4d3c6b 100644 --- a/src/test/scriptnum_tests.cpp +++ b/src/test/scriptnum_tests.cpp @@ -164,9 +164,9 @@ static void RunOperators(const int64_t& num1, const int64_t& num2) BOOST_AUTO_TEST_CASE(creation) { - for(size_t i = 0; i < sizeof(values) / sizeof(values[0]); ++i) + for(size_t i = 0; i < std::size(values); ++i) { - for(size_t j = 0; j < sizeof(offsets) / sizeof(offsets[0]); ++j) + for(size_t j = 0; j < std::size(offsets); ++j) { RunCreate(values[i]); RunCreate(values[i] + offsets[j]); @@ -177,9 +177,9 @@ BOOST_AUTO_TEST_CASE(creation) BOOST_AUTO_TEST_CASE(operators) { - for(size_t i = 0; i < sizeof(values) / sizeof(values[0]); ++i) + for(size_t i = 0; i < std::size(values); ++i) { - for(size_t j = 0; j < sizeof(offsets) / sizeof(offsets[0]); ++j) + for(size_t j = 0; j < std::size(offsets); ++j) { RunOperators(values[i], values[i]); RunOperators(values[i], -values[i]); diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index bc862de78a..2eb980e8cd 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -88,7 +88,7 @@ void static RandomScript(CScript &script) { script = CScript(); int ops = (InsecureRandRange(10)); for (int i=0; i<ops; i++) - script << oplist[InsecureRandRange(sizeof(oplist)/sizeof(oplist[0]))]; + script << oplist[InsecureRandRange(std::size(oplist))]; } void static RandomTransaction(CMutableTransaction &tx, bool fSingle) { diff --git a/src/test/sock_tests.cpp b/src/test/sock_tests.cpp new file mode 100644 index 0000000000..ed9780dfb5 --- /dev/null +++ b/src/test/sock_tests.cpp @@ -0,0 +1,149 @@ +// Copyright (c) 2021-2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <compat.h> +#include <test/util/setup_common.h> +#include <util/sock.h> +#include <util/system.h> + +#include <boost/test/unit_test.hpp> + +#include <thread> + +using namespace std::chrono_literals; + +BOOST_FIXTURE_TEST_SUITE(sock_tests, BasicTestingSetup) + +static bool SocketIsClosed(const SOCKET& s) +{ + // Notice that if another thread is running and creates its own socket after `s` has been + // closed, it may be assigned the same file descriptor number. In this case, our test will + // wrongly pretend that the socket is not closed. + int type; + socklen_t len = sizeof(type); + return getsockopt(s, SOL_SOCKET, SO_TYPE, (sockopt_arg_type)&type, &len) == SOCKET_ERROR; +} + +static SOCKET CreateSocket() +{ + const SOCKET s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP); + BOOST_REQUIRE(s != static_cast<SOCKET>(SOCKET_ERROR)); + return s; +} + +BOOST_AUTO_TEST_CASE(constructor_and_destructor) +{ + const SOCKET s = CreateSocket(); + Sock* sock = new Sock(s); + BOOST_CHECK_EQUAL(sock->Get(), s); + BOOST_CHECK(!SocketIsClosed(s)); + delete sock; + BOOST_CHECK(SocketIsClosed(s)); +} + +BOOST_AUTO_TEST_CASE(move_constructor) +{ + const SOCKET s = CreateSocket(); + Sock* sock1 = new Sock(s); + Sock* sock2 = new Sock(std::move(*sock1)); + delete sock1; + BOOST_CHECK(!SocketIsClosed(s)); + BOOST_CHECK_EQUAL(sock2->Get(), s); + delete sock2; + BOOST_CHECK(SocketIsClosed(s)); +} + +BOOST_AUTO_TEST_CASE(move_assignment) +{ + const SOCKET s = CreateSocket(); + Sock* sock1 = new Sock(s); + Sock* sock2 = new Sock(); + *sock2 = std::move(*sock1); + delete sock1; + BOOST_CHECK(!SocketIsClosed(s)); + BOOST_CHECK_EQUAL(sock2->Get(), s); + delete sock2; + BOOST_CHECK(SocketIsClosed(s)); +} + +BOOST_AUTO_TEST_CASE(release) +{ + SOCKET s = CreateSocket(); + Sock* sock = new Sock(s); + BOOST_CHECK_EQUAL(sock->Release(), s); + delete sock; + BOOST_CHECK(!SocketIsClosed(s)); + BOOST_REQUIRE(CloseSocket(s)); +} + +BOOST_AUTO_TEST_CASE(reset) +{ + const SOCKET s = CreateSocket(); + Sock sock(s); + sock.Reset(); + BOOST_CHECK(SocketIsClosed(s)); +} + +#ifndef WIN32 // Windows does not have socketpair(2). + +static void CreateSocketPair(int s[2]) +{ + BOOST_REQUIRE_EQUAL(socketpair(AF_UNIX, SOCK_STREAM, 0, s), 0); +} + +static void SendAndRecvMessage(const Sock& sender, const Sock& receiver) +{ + const char* msg = "abcd"; + constexpr ssize_t msg_len = 4; + char recv_buf[10]; + + BOOST_CHECK_EQUAL(sender.Send(msg, msg_len, 0), msg_len); + BOOST_CHECK_EQUAL(receiver.Recv(recv_buf, sizeof(recv_buf), 0), msg_len); + BOOST_CHECK_EQUAL(strncmp(msg, recv_buf, msg_len), 0); +} + +BOOST_AUTO_TEST_CASE(send_and_receive) +{ + int s[2]; + CreateSocketPair(s); + + Sock* sock0 = new Sock(s[0]); + Sock* sock1 = new Sock(s[1]); + + SendAndRecvMessage(*sock0, *sock1); + + Sock* sock0moved = new Sock(std::move(*sock0)); + Sock* sock1moved = new Sock(); + *sock1moved = std::move(*sock1); + + delete sock0; + delete sock1; + + SendAndRecvMessage(*sock1moved, *sock0moved); + + delete sock0moved; + delete sock1moved; + + BOOST_CHECK(SocketIsClosed(s[0])); + BOOST_CHECK(SocketIsClosed(s[1])); +} + +BOOST_AUTO_TEST_CASE(wait) +{ + int s[2]; + CreateSocketPair(s); + + Sock sock0(s[0]); + Sock sock1(s[1]); + + std::thread waiter([&sock0]() { sock0.Wait(24h, Sock::RECV); }); + + BOOST_REQUIRE_EQUAL(sock1.Send("a", 1, 0), 1); + + waiter.join(); +} + +#endif /* WIN32 */ + +BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/txvalidation_tests.cpp b/src/test/txvalidation_tests.cpp index 7e6246d68f..8d14071297 100644 --- a/src/test/txvalidation_tests.cpp +++ b/src/test/txvalidation_tests.cpp @@ -30,25 +30,21 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_reject_coinbase, TestChain100Setup) BOOST_CHECK(CTransaction(coinbaseTx).IsCoinBase()); - TxValidationState state; - LOCK(cs_main); unsigned int initialPoolSize = m_node.mempool->size(); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, MakeTransactionRef(coinbaseTx), + true /* bypass_limits */); - BOOST_CHECK_EQUAL( - false, - AcceptToMemoryPool(*m_node.mempool, state, MakeTransactionRef(coinbaseTx), - nullptr /* plTxnReplaced */, - true /* bypass_limits */)); + BOOST_CHECK(result.m_result_type == MempoolAcceptResult::ResultType::INVALID); // Check that the transaction hasn't been added to mempool. BOOST_CHECK_EQUAL(m_node.mempool->size(), initialPoolSize); // Check that the validation state reflects the unsuccessful attempt. - BOOST_CHECK(state.IsInvalid()); - BOOST_CHECK_EQUAL(state.GetRejectReason(), "coinbase"); - BOOST_CHECK(state.GetResult() == TxValidationResult::TX_CONSENSUS); + BOOST_CHECK(result.m_state.IsInvalid()); + BOOST_CHECK_EQUAL(result.m_state.GetRejectReason(), "coinbase"); + BOOST_CHECK(result.m_state.GetResult() == TxValidationResult::TX_CONSENSUS); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index bed2ba3608..3244b58082 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -13,7 +13,10 @@ #include <boost/test/unit_test.hpp> -bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks); +bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, + const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, + bool cacheFullScriptStore, PrecomputedTransactionData& txdata, + std::vector<CScriptCheck>* pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main); BOOST_AUTO_TEST_SUITE(txvalidationcache_tests) @@ -28,9 +31,9 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) const auto ToMemPool = [this](const CMutableTransaction& tx) { LOCK(cs_main); - TxValidationState state; - return AcceptToMemoryPool(*m_node.mempool, state, MakeTransactionRef(tx), - nullptr /* plTxnReplaced */, true /* bypass_limits */); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, MakeTransactionRef(tx), + true /* bypass_limits */); + return result.m_result_type == MempoolAcceptResult::ResultType::VALID; }; // Create a double-spend of mature coinbase txn: diff --git a/src/test/util/script.h b/src/test/util/script.h new file mode 100644 index 0000000000..abd14c2067 --- /dev/null +++ b/src/test/util/script.h @@ -0,0 +1,21 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_TEST_UTIL_SCRIPT_H +#define BITCOIN_TEST_UTIL_SCRIPT_H + +#include <crypto/sha256.h> +#include <script/script.h> + +static const std::vector<uint8_t> WITNESS_STACK_ELEM_OP_TRUE{uint8_t{OP_TRUE}}; +static const CScript P2WSH_OP_TRUE{ + CScript{} + << OP_0 + << ToByteVector([] { + uint256 hash; + CSHA256().Write(WITNESS_STACK_ELEM_OP_TRUE.data(), WITNESS_STACK_ELEM_OP_TRUE.size()).Finalize(hash.begin()); + return hash; + }())}; + +#endif // BITCOIN_TEST_UTIL_SCRIPT_H diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index b9f3f8c955..1ffe435531 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -77,6 +77,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve { "dummy", "-printtoconsole=0", + "-logsourcelocations", "-logtimemicros", "-logthreadnames", "-debug", @@ -199,14 +200,43 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const } } -TestChain100Setup::TestChain100Setup() +TestChain100Setup::TestChain100Setup(bool deterministic) { + m_deterministic = deterministic; + + if (m_deterministic) { + SetMockTime(1598887952); + constexpr std::array<unsigned char, 32> vchKey = { + { + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1 + } + }; + coinbaseKey.Set(vchKey.begin(), vchKey.end(), false); + } else { + coinbaseKey.MakeNewKey(true); + } + // Generate a 100-block chain: - coinbaseKey.MakeNewKey(true); + this->mineBlocks(COINBASE_MATURITY); + + if (m_deterministic) { + LOCK(::cs_main); + assert( + m_node.chainman->ActiveChain().Tip()->GetBlockHash().ToString() == + "49c95db1e470fed04496d801c9d8fbb78155d2c7f855232c918823d2c17d0cf6"); + } +} + +void TestChain100Setup::mineBlocks(int num_blocks) +{ CScript scriptPubKey = CScript() << ToByteVector(coinbaseKey.GetPubKey()) << OP_CHECKSIG; - for (int i = 0; i < COINBASE_MATURITY; i++) { + for (int i = 0; i < num_blocks; i++) + { std::vector<CMutableTransaction> noTxns; CBlock b = CreateAndProcessBlock(noTxns, scriptPubKey); + if (m_deterministic) { + SetMockTime(GetTime() + 1); + } m_coinbase_txns.push_back(b.vtx[0]); } } @@ -231,9 +261,61 @@ CBlock TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransa return block; } + +CMutableTransaction TestChain100Setup::CreateValidMempoolTransaction(CTransactionRef input_transaction, + int input_vout, + int input_height, + CKey input_signing_key, + CScript output_destination, + CAmount output_amount) +{ + // Transaction we will submit to the mempool + CMutableTransaction mempool_txn; + + // Create an input + COutPoint outpoint_to_spend(input_transaction->GetHash(), input_vout); + CTxIn input(outpoint_to_spend); + mempool_txn.vin.push_back(input); + + // Create an output + CTxOut output(output_amount, output_destination); + mempool_txn.vout.push_back(output); + + // Sign the transaction + // - Add the signing key to a keystore + FillableSigningProvider keystore; + keystore.AddKey(input_signing_key); + // - Populate a CoinsViewCache with the unspent output + CCoinsView coins_view; + CCoinsViewCache coins_cache(&coins_view); + AddCoins(coins_cache, *input_transaction.get(), input_height); + // - Use GetCoin to properly populate utxo_to_spend, + Coin utxo_to_spend; + assert(coins_cache.GetCoin(outpoint_to_spend, utxo_to_spend)); + // - Then add it to a map to pass in to SignTransaction + std::map<COutPoint, Coin> input_coins; + input_coins.insert({outpoint_to_spend, utxo_to_spend}); + // - Default signature hashing type + int nHashType = SIGHASH_ALL; + std::map<int, std::string> input_errors; + assert(SignTransaction(mempool_txn, &keystore, input_coins, nHashType, input_errors)); + + // Add transaction to the mempool + { + LOCK(cs_main); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool.get(), MakeTransactionRef(mempool_txn), /* bypass_limits */ false); + assert(result.m_result_type == MempoolAcceptResult::ResultType::VALID); + } + + return mempool_txn; +} + TestChain100Setup::~TestChain100Setup() { gArgs.ForceSetArg("-segwitheight", "0"); + if (m_deterministic) { + SetMockTime(0); + } } CTxMemPoolEntry TestMemPoolEntryHelper::FromTx(const CMutableTransaction& tx) const diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h index 331c1235cb..33f24e7c44 100644 --- a/src/test/util/setup_common.h +++ b/src/test/util/setup_common.h @@ -78,7 +78,6 @@ struct BasicTestingSetup { explicit BasicTestingSetup(const std::string& chainName = CBaseChainParams::MAIN, const std::vector<const char*>& extra_args = {}); ~BasicTestingSetup(); -private: const fs::path m_path_root; }; @@ -112,7 +111,7 @@ class CScript; * Testing fixture that pre-creates a 100-block REGTEST-mode block chain */ struct TestChain100Setup : public RegTestingSetup { - TestChain100Setup(); + TestChain100Setup(bool deterministic = false); /** * Create a new block with just given transactions, coinbase paying to @@ -121,12 +120,38 @@ struct TestChain100Setup : public RegTestingSetup { CBlock CreateAndProcessBlock(const std::vector<CMutableTransaction>& txns, const CScript& scriptPubKey); + //! Mine a series of new blocks on the active chain. + void mineBlocks(int num_blocks); + + /** + * Create a transaction and submit to the mempool. + * + * @param input_transaction The transaction to spend + * @param input_vout The vout to spend from the input_transaction + * @param input_height The height of the block that included the input_transaction + * @param input_signing_key The key to spend the input_transaction + * @param output_destination Where to send the output + * @param output_amount How much to send + */ + CMutableTransaction CreateValidMempoolTransaction(CTransactionRef input_transaction, + int input_vout, + int input_height, + CKey input_signing_key, + CScript output_destination, + CAmount output_amount = CAmount(1 * COIN)); + ~TestChain100Setup(); + bool m_deterministic; std::vector<CTransactionRef> m_coinbase_txns; // For convenience, coinbase transactions CKey coinbaseKey; // private/public key needed to spend coinbase transactions }; + +struct TestChain100DeterministicSetup : public TestChain100Setup { + TestChain100DeterministicSetup() : TestChain100Setup(true) { } +}; + class CTxMemPoolEntry; struct TestMemPoolEntryHelper diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp index 4133f2623b..845854bd4b 100644 --- a/src/test/util_tests.cpp +++ b/src/test/util_tests.cpp @@ -13,6 +13,7 @@ #include <test/util/setup_common.h> #include <test/util/str.h> #include <uint256.h> +#include <util/getuniquepath.h> #include <util/message.h> // For MessageSign(), MessageVerify(), MESSAGE_MAGIC #include <util/moneystr.h> #include <util/spanparsing.h> @@ -1816,7 +1817,7 @@ BOOST_AUTO_TEST_CASE(test_DirIsWritable) BOOST_CHECK_EQUAL(DirIsWritable(tmpdirname), true); // Should not be able to write to a non-existent dir. - tmpdirname = tmpdirname / fs::unique_path(); + tmpdirname = GetUniquePath(tmpdirname); BOOST_CHECK_EQUAL(DirIsWritable(tmpdirname), false); fs::create_directory(tmpdirname); @@ -2200,4 +2201,17 @@ BOOST_AUTO_TEST_CASE(message_hash) BOOST_CHECK_NE(message_hash1, signature_hash); } +BOOST_AUTO_TEST_CASE(remove_prefix) +{ + BOOST_CHECK_EQUAL(RemovePrefix("./util/system.h", "./"), "util/system.h"); + BOOST_CHECK_EQUAL(RemovePrefix("foo", "foo"), ""); + BOOST_CHECK_EQUAL(RemovePrefix("foo", "fo"), "o"); + BOOST_CHECK_EQUAL(RemovePrefix("foo", "f"), "oo"); + BOOST_CHECK_EQUAL(RemovePrefix("foo", ""), "foo"); + BOOST_CHECK_EQUAL(RemovePrefix("fo", "foo"), "fo"); + BOOST_CHECK_EQUAL(RemovePrefix("f", "foo"), "f"); + BOOST_CHECK_EQUAL(RemovePrefix("", "foo"), ""); + BOOST_CHECK_EQUAL(RemovePrefix("", ""), ""); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index ec45d9a434..0c87c4d360 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -11,6 +11,7 @@ #include <pow.h> #include <random.h> #include <script/standard.h> +#include <test/util/script.h> #include <test/util/setup_common.h> #include <util/time.h> #include <validation.h> @@ -18,8 +19,6 @@ #include <thread> -static const std::vector<unsigned char> V_OP_TRUE{OP_TRUE}; - namespace validation_block_tests { struct MinerTestingSetup : public RegTestingSetup { std::shared_ptr<CBlock> Block(const uint256& prev_hash); @@ -64,27 +63,17 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash) static int i = 0; static uint64_t time = Params().GenesisBlock().nTime; - CScript pubKey; - pubKey << i++ << OP_TRUE; - - auto ptemplate = BlockAssembler(*m_node.mempool, Params()).CreateNewBlock(pubKey); + auto ptemplate = BlockAssembler(*m_node.mempool, Params()).CreateNewBlock(CScript{} << i++ << OP_TRUE); auto pblock = std::make_shared<CBlock>(ptemplate->block); pblock->hashPrevBlock = prev_hash; pblock->nTime = ++time; - pubKey.clear(); - { - WitnessV0ScriptHash witness_program; - CSHA256().Write(&V_OP_TRUE[0], V_OP_TRUE.size()).Finalize(witness_program.begin()); - pubKey << OP_0 << ToByteVector(witness_program); - } - // Make the coinbase transaction with two outputs: // One zero-value one that has a unique pubkey to make sure that blocks at the same height can have a different hash // Another one that has the coinbase reward in a P2WSH with OP_TRUE as witness program to make it easy to spend CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.vout.resize(2); - txCoinbase.vout[1].scriptPubKey = pubKey; + txCoinbase.vout[1].scriptPubKey = P2WSH_OP_TRUE; txCoinbase.vout[1].nValue = txCoinbase.vout[0].nValue; txCoinbase.vout[0].nValue = 0; txCoinbase.vin[0].scriptWitness.SetNull(); @@ -254,7 +243,7 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) for (int num_txs = 22; num_txs > 0; --num_txs) { CMutableTransaction mtx; mtx.vin.push_back(CTxIn{COutPoint{last_mined->vtx[0]->GetHash(), 1}, CScript{}}); - mtx.vin[0].scriptWitness.stack.push_back(V_OP_TRUE); + mtx.vin[0].scriptWitness.stack.push_back(WITNESS_STACK_ELEM_OP_TRUE); mtx.vout.push_back(last_mined->vtx[0]->vout[1]); mtx.vout[0].nValue -= 1000; txs.push_back(MakeTransactionRef(mtx)); @@ -283,15 +272,9 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) // Add the txs to the tx pool { LOCK(cs_main); - TxValidationState state; - std::list<CTransactionRef> plTxnReplaced; for (const auto& tx : txs) { - BOOST_REQUIRE(AcceptToMemoryPool( - *m_node.mempool, - state, - tx, - &plTxnReplaced, - /* bypass_limits */ false)); + const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, tx, false /* bypass_limits */); + BOOST_REQUIRE(result.m_result_type == MempoolAcceptResult::ResultType::VALID); } } diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp index 3d8570e27c..94d4277019 100644 --- a/src/test/validation_chainstatemanager_tests.cpp +++ b/src/test/validation_chainstatemanager_tests.cpp @@ -4,13 +4,18 @@ // #include <chainparams.h> #include <consensus/validation.h> +#include <node/utxo_snapshot.h> #include <random.h> +#include <rpc/blockchain.h> #include <sync.h> #include <test/util/setup_common.h> #include <uint256.h> #include <validation.h> #include <validationinterface.h> +#include <tinyformat.h> +#include <univalue.h> + #include <vector> #include <boost/test/unit_test.hpp> @@ -28,6 +33,8 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) std::vector<CChainState*> chainstates; const CChainParams& chainparams = Params(); + BOOST_CHECK(!manager.SnapshotBlockhash().has_value()); + // Create a legacy (IBD) chainstate. // CChainState& c1 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(mempool)); @@ -54,10 +61,17 @@ BOOST_AUTO_TEST_CASE(chainstatemanager) auto& validated_cs = manager.ValidatedChainstate(); BOOST_CHECK_EQUAL(&validated_cs, &c1); + BOOST_CHECK(!manager.SnapshotBlockhash().has_value()); + // Create a snapshot-based chainstate. // - CChainState& c2 = WITH_LOCK(::cs_main, return manager.InitializeChainstate(mempool, GetRandHash())); + const uint256 snapshot_blockhash = GetRandHash(); + CChainState& c2 = WITH_LOCK(::cs_main, return manager.InitializeChainstate( + mempool, snapshot_blockhash)); chainstates.push_back(&c2); + + BOOST_CHECK_EQUAL(manager.SnapshotBlockhash().value(), snapshot_blockhash); + c2.InitCoinsDB( /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); WITH_LOCK(::cs_main, c2.InitCoinsCache(1 << 23)); @@ -155,4 +169,175 @@ BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches) BOOST_CHECK_CLOSE(c2.m_coinsdb_cache_size_bytes, max_cache * 0.95, 1); } +auto NoMalleation = [](CAutoFile& file, SnapshotMetadata& meta){}; + +template<typename F = decltype(NoMalleation)> +static bool +CreateAndActivateUTXOSnapshot(NodeContext& node, const fs::path root, F malleation = NoMalleation) +{ + // Write out a snapshot to the test's tempdir. + // + int height; + WITH_LOCK(::cs_main, height = node.chainman->ActiveHeight()); + fs::path snapshot_path = root / tfm::format("test_snapshot.%d.dat", height); + FILE* outfile{fsbridge::fopen(snapshot_path, "wb")}; + CAutoFile auto_outfile{outfile, SER_DISK, CLIENT_VERSION}; + + UniValue result = CreateUTXOSnapshot(node, node.chainman->ActiveChainstate(), auto_outfile); + BOOST_TEST_MESSAGE( + "Wrote UTXO snapshot to " << snapshot_path.make_preferred().string() << ": " << result.write()); + + // Read the written snapshot in and then activate it. + // + FILE* infile{fsbridge::fopen(snapshot_path, "rb")}; + CAutoFile auto_infile{infile, SER_DISK, CLIENT_VERSION}; + SnapshotMetadata metadata; + auto_infile >> metadata; + + malleation(auto_infile, metadata); + + return node.chainman->ActivateSnapshot(auto_infile, metadata, /*in_memory*/ true); +} + +//! Test basic snapshot activation. +BOOST_FIXTURE_TEST_CASE(chainstatemanager_activate_snapshot, TestChain100DeterministicSetup) +{ + ChainstateManager& chainman = *Assert(m_node.chainman); + + size_t initial_size; + size_t initial_total_coins{100}; + + // Make some initial assertions about the contents of the chainstate. + { + LOCK(::cs_main); + CCoinsViewCache& ibd_coinscache = chainman.ActiveChainstate().CoinsTip(); + initial_size = ibd_coinscache.GetCacheSize(); + size_t total_coins{0}; + + for (CTransactionRef& txn : m_coinbase_txns) { + COutPoint op{txn->GetHash(), 0}; + BOOST_CHECK(ibd_coinscache.HaveCoin(op)); + total_coins++; + } + + BOOST_CHECK_EQUAL(total_coins, initial_total_coins); + BOOST_CHECK_EQUAL(initial_size, initial_total_coins); + } + + // Snapshot should refuse to load at this height. + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot(m_node, m_path_root)); + BOOST_CHECK(chainman.ActiveChainstate().m_from_snapshot_blockhash.IsNull()); + BOOST_CHECK_EQUAL( + chainman.ActiveChainstate().m_from_snapshot_blockhash, + chainman.SnapshotBlockhash().value_or(uint256())); + + // Mine 10 more blocks, putting at us height 110 where a valid assumeutxo value can + // be found. + mineBlocks(10); + initial_size += 10; + initial_total_coins += 10; + + // Should not load malleated snapshots + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot( + m_node, m_path_root, [](CAutoFile& auto_infile, SnapshotMetadata& metadata) { + // A UTXO is missing but count is correct + metadata.m_coins_count -= 1; + + COutPoint outpoint; + Coin coin; + + auto_infile >> outpoint; + auto_infile >> coin; + })); + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot( + m_node, m_path_root, [](CAutoFile& auto_infile, SnapshotMetadata& metadata) { + // Coins count is larger than coins in file + metadata.m_coins_count += 1; + })); + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot( + m_node, m_path_root, [](CAutoFile& auto_infile, SnapshotMetadata& metadata) { + // Coins count is smaller than coins in file + metadata.m_coins_count -= 1; + })); + + BOOST_REQUIRE(CreateAndActivateUTXOSnapshot(m_node, m_path_root)); + + // Ensure our active chain is the snapshot chainstate. + BOOST_CHECK(!chainman.ActiveChainstate().m_from_snapshot_blockhash.IsNull()); + BOOST_CHECK_EQUAL( + chainman.ActiveChainstate().m_from_snapshot_blockhash, + *chainman.SnapshotBlockhash()); + + // To be checked against later when we try loading a subsequent snapshot. + uint256 loaded_snapshot_blockhash{*chainman.SnapshotBlockhash()}; + + // Make some assertions about the both chainstates. These checks ensure the + // legacy chainstate hasn't changed and that the newly created chainstate + // reflects the expected content. + { + LOCK(::cs_main); + int chains_tested{0}; + + for (CChainState* chainstate : chainman.GetAll()) { + BOOST_TEST_MESSAGE("Checking coins in " << chainstate->ToString()); + CCoinsViewCache& coinscache = chainstate->CoinsTip(); + + // Both caches will be empty initially. + BOOST_CHECK_EQUAL((unsigned int)0, coinscache.GetCacheSize()); + + size_t total_coins{0}; + + for (CTransactionRef& txn : m_coinbase_txns) { + COutPoint op{txn->GetHash(), 0}; + BOOST_CHECK(coinscache.HaveCoin(op)); + total_coins++; + } + + BOOST_CHECK_EQUAL(initial_size , coinscache.GetCacheSize()); + BOOST_CHECK_EQUAL(total_coins, initial_total_coins); + chains_tested++; + } + + BOOST_CHECK_EQUAL(chains_tested, 2); + } + + // Mine some new blocks on top of the activated snapshot chainstate. + constexpr size_t new_coins{100}; + mineBlocks(new_coins); // Defined in TestChain100Setup. + + { + LOCK(::cs_main); + size_t coins_in_active{0}; + size_t coins_in_ibd{0}; + size_t coins_missing_ibd{0}; + + for (CChainState* chainstate : chainman.GetAll()) { + BOOST_TEST_MESSAGE("Checking coins in " << chainstate->ToString()); + CCoinsViewCache& coinscache = chainstate->CoinsTip(); + bool is_ibd = chainman.IsBackgroundIBD(chainstate); + + for (CTransactionRef& txn : m_coinbase_txns) { + COutPoint op{txn->GetHash(), 0}; + if (coinscache.HaveCoin(op)) { + (is_ibd ? coins_in_ibd : coins_in_active)++; + } else if (is_ibd) { + coins_missing_ibd++; + } + } + } + + BOOST_CHECK_EQUAL(coins_in_active, initial_total_coins + new_coins); + BOOST_CHECK_EQUAL(coins_in_ibd, initial_total_coins); + BOOST_CHECK_EQUAL(coins_missing_ibd, new_coins); + } + + // Snapshot should refuse to load after one has already loaded. + BOOST_REQUIRE(!CreateAndActivateUTXOSnapshot(m_node, m_path_root)); + + // Snapshot blockhash should be unchanged. + BOOST_CHECK_EQUAL( + chainman.ActiveChainstate().m_from_snapshot_blockhash, + loaded_snapshot_blockhash); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/validation_tests.cpp b/src/test/validation_tests.cpp index 9e37f14921..ecf9453094 100644 --- a/src/test/validation_tests.cpp +++ b/src/test/validation_tests.cpp @@ -5,6 +5,7 @@ #include <chainparams.h> #include <net.h> #include <signet.h> +#include <uint256.h> #include <validation.h> #include <test/util/setup_common.h> @@ -119,4 +120,27 @@ BOOST_AUTO_TEST_CASE(signet_parse_tests) BOOST_CHECK(!CheckSignetBlockSolution(block, signet_params->GetConsensus())); } +//! Test retrieval of valid assumeutxo values. +BOOST_AUTO_TEST_CASE(test_assumeutxo) +{ + const auto params = CreateChainParams(*m_node.args, CBaseChainParams::REGTEST); + + // These heights don't have assumeutxo configurations associated, per the contents + // of chainparams.cpp. + std::vector<int> bad_heights{0, 100, 111, 115, 209, 211}; + + for (auto empty : bad_heights) { + const auto out = ExpectedAssumeutxo(empty, *params); + BOOST_CHECK(!out); + } + + const auto out110 = *ExpectedAssumeutxo(110, *params); + BOOST_CHECK_EQUAL(out110.hash_serialized, uint256S("76fd7334ac7c1baf57ddc0c626f073a655a35d98a4258cd1382c8cc2b8392e10")); + BOOST_CHECK_EQUAL(out110.nChainTx, (unsigned int)110); + + const auto out210 = *ExpectedAssumeutxo(210, *params); + BOOST_CHECK_EQUAL(out210.hash_serialized, uint256S("9c5ed99ef98544b34f8920b6d1802f72ac28ae6e2bd2bd4c316ff10c230df3f2")); + BOOST_CHECK_EQUAL(out210.nChainTx, (unsigned int)210); +} + BOOST_AUTO_TEST_SUITE_END() diff --git a/src/torcontrol.cpp b/src/torcontrol.cpp index 908ad35e1b..605c77fc3a 100644 --- a/src/torcontrol.cpp +++ b/src/torcontrol.cpp @@ -14,6 +14,7 @@ #include <netbase.h> #include <util/strencodings.h> #include <util/system.h> +#include <util/time.h> #include <deque> #include <functional> diff --git a/src/txdb.cpp b/src/txdb.cpp index 72460e7c69..4b4766e1ba 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -47,11 +47,15 @@ CCoinsViewDB::CCoinsViewDB(fs::path ldb_path, size_t nCacheSize, bool fMemory, b void CCoinsViewDB::ResizeCache(size_t new_cache_size) { - // Have to do a reset first to get the original `m_db` state to release its - // filesystem lock. - m_db.reset(); - m_db = MakeUnique<CDBWrapper>( - m_ldb_path, new_cache_size, m_is_memory, /*fWipe*/ false, /*obfuscate*/ true); + // We can't do this operation with an in-memory DB since we'll lose all the coins upon + // reset. + if (!m_is_memory) { + // Have to do a reset first to get the original `m_db` state to release its + // filesystem lock. + m_db.reset(); + m_db = MakeUnique<CDBWrapper>( + m_ldb_path, new_cache_size, m_is_memory, /*fWipe*/ false, /*obfuscate*/ true); + } } bool CCoinsViewDB::GetCoin(const COutPoint &outpoint, Coin &coin) const { diff --git a/src/txmempool.cpp b/src/txmempool.cpp index c370f9e981..899835019a 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -9,14 +9,14 @@ #include <consensus/tx_verify.h> #include <consensus/validation.h> #include <optional.h> -#include <validation.h> -#include <policy/policy.h> #include <policy/fees.h> +#include <policy/policy.h> #include <policy/settings.h> #include <reverse_iterator.h> -#include <util/system.h> #include <util/moneystr.h> +#include <util/system.h> #include <util/time.h> +#include <validation.h> #include <validationinterface.h> CTxMemPoolEntry::CTxMemPoolEntry(const CTransactionRef& _tx, const CAmount& _nFee, @@ -396,7 +396,10 @@ void CTxMemPool::addUnchecked(const CTxMemPoolEntry &entry, setEntries &setAnces nTransactionsUpdated++; totalTxSize += entry.GetTxSize(); - if (minerPolicyEstimator) {minerPolicyEstimator->processTransaction(entry, validFeeEstimate);} + m_total_fee += entry.GetFee(); + if (minerPolicyEstimator) { + minerPolicyEstimator->processTransaction(entry, validFeeEstimate); + } vTxHashes.emplace_back(tx.GetWitnessHash(), newit); newit->vTxHashesIdx = vTxHashes.size() - 1; @@ -432,6 +435,7 @@ void CTxMemPool::removeUnchecked(txiter it, MemPoolRemovalReason reason) vTxHashes.clear(); totalTxSize -= it->GetTxSize(); + m_total_fee -= it->GetFee(); cachedInnerUsage -= it->DynamicMemoryUsage(); cachedInnerUsage -= memusage::DynamicUsage(it->GetMemPoolParentsConst()) + memusage::DynamicUsage(it->GetMemPoolChildrenConst()); mapTx.erase(it); @@ -499,7 +503,7 @@ void CTxMemPool::removeRecursive(const CTransaction &origTx, MemPoolRemovalReaso RemoveStaged(setAllRemoves, false, reason); } -void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMemPoolHeight, int flags) +void CTxMemPool::removeForReorg(CChainState& active_chainstate, int flags) { // Remove transactions spending a coinbase which are now immature and no-longer-final transactions AssertLockHeld(cs); @@ -507,8 +511,9 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction& tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); - bool validLP = TestLockPointValidity(&lp); - if (!CheckFinalTx(tx, flags) || !CheckSequenceLocks(*this, tx, flags, &lp, validLP)) { + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + bool validLP = TestLockPointValidity(active_chainstate.m_chain, &lp); + if (!CheckFinalTx(active_chainstate.m_chain.Tip(), tx, flags) || !CheckSequenceLocks(active_chainstate, *this, tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be invalid // So it's critical that we remove the tx and not depend on the LockPoints. txToRemove.insert(it); @@ -517,8 +522,9 @@ void CTxMemPool::removeForReorg(const CCoinsViewCache *pcoins, unsigned int nMem indexed_transaction_set::const_iterator it2 = mapTx.find(txin.prevout.hash); if (it2 != mapTx.end()) continue; - const Coin &coin = pcoins->AccessCoin(txin.prevout); + const Coin &coin = active_chainstate.CoinsTip().AccessCoin(txin.prevout); if (m_check_ratio != 0) assert(!coin.IsSpent()); + unsigned int nMemPoolHeight = active_chainstate.m_chain.Tip()->nHeight + 1; if (coin.IsSpent() || (coin.IsCoinBase() && ((signed long)nMemPoolHeight) - coin.nHeight < COINBASE_MATURITY)) { txToRemove.insert(it); break; @@ -590,6 +596,7 @@ void CTxMemPool::_clear() mapTx.clear(); mapNextTx.clear(); totalTxSize = 0; + m_total_fee = 0; cachedInnerUsage = 0; lastRollingFeeUpdate = GetTime(); blockSinceLastRollingFeeBump = false; @@ -623,6 +630,7 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const LogPrint(BCLog::MEMPOOL, "Checking mempool with %u transactions and %u inputs\n", (unsigned int)mapTx.size(), (unsigned int)mapNextTx.size()); uint64_t checkTotal = 0; + CAmount check_total_fee{0}; uint64_t innerUsage = 0; CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache*>(pcoins)); @@ -632,6 +640,7 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { unsigned int i = 0; checkTotal += it->GetTxSize(); + check_total_fee += it->GetFee(); innerUsage += it->DynamicMemoryUsage(); const CTransaction& tx = it->GetTx(); innerUsage += memusage::DynamicUsage(it->GetMemPoolParentsConst()) + memusage::DynamicUsage(it->GetMemPoolChildrenConst()); @@ -726,6 +735,7 @@ void CTxMemPool::check(const CCoinsViewCache *pcoins) const } assert(totalTxSize == checkTotal); + assert(m_total_fee == check_total_fee); assert(innerUsage == cachedInnerUsage); } diff --git a/src/txmempool.h b/src/txmempool.h index 0a9cd81ff5..b8de326737 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -19,8 +19,8 @@ #include <optional.h> #include <policy/feerate.h> #include <primitives/transaction.h> -#include <sync.h> #include <random.h> +#include <sync.h> #include <util/hasher.h> #include <boost/multi_index_container.hpp> @@ -29,6 +29,7 @@ #include <boost/multi_index/sequenced_index.hpp> class CBlockIndex; +class CChainState; extern RecursiveMutex cs_main; /** Fake height value used in Coin to signify they are only in the memory pool (since 0.8) */ @@ -478,8 +479,9 @@ private: std::atomic<unsigned int> nTransactionsUpdated{0}; //!< Used by getblocktemplate to trigger CreateNewBlock() invocation CBlockPolicyEstimator* minerPolicyEstimator; - uint64_t totalTxSize; //!< sum of all mempool tx's virtual sizes. Differs from serialized tx size since witness data is discounted. Defined in BIP 141. - uint64_t cachedInnerUsage; //!< sum of dynamic memory usage of all the map elements (NOT the maps themselves) + uint64_t totalTxSize GUARDED_BY(cs); //!< sum of all mempool tx's virtual sizes. Differs from serialized tx size since witness data is discounted. Defined in BIP 141. + CAmount m_total_fee GUARDED_BY(cs); //!< sum of all mempool tx's fees (NOT modified fee) + uint64_t cachedInnerUsage GUARDED_BY(cs); //!< sum of dynamic memory usage of all the map elements (NOT the maps themselves) mutable int64_t lastRollingFeeUpdate; mutable bool blockSinceLastRollingFeeBump; @@ -615,7 +617,7 @@ public: void addUnchecked(const CTxMemPoolEntry& entry, setEntries& setAncestors, bool validFeeEstimate = true) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void removeRecursive(const CTransaction& tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs); - void removeForReorg(const CCoinsViewCache* pcoins, unsigned int nMemPoolHeight, int flags) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); + void removeForReorg(CChainState& active_chainstate, int flags) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void removeConflicts(const CTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(cs); void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight) EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -724,6 +726,12 @@ public: return totalTxSize; } + CAmount GetTotalFee() const EXCLUSIVE_LOCKS_REQUIRED(cs) + { + AssertLockHeld(cs); + return m_total_fee; + } + bool exists(const GenTxid& gtxid) const { LOCK(cs); diff --git a/src/util/getuniquepath.cpp b/src/util/getuniquepath.cpp new file mode 100644 index 0000000000..9839d2f624 --- /dev/null +++ b/src/util/getuniquepath.cpp @@ -0,0 +1,10 @@ +#include <random.h> +#include <fs.h> +#include <util/strencodings.h> + +fs::path GetUniquePath(const fs::path& base) +{ + FastRandomContext rnd; + fs::path tmpFile = base / HexStr(rnd.randbytes(8)); + return tmpFile; +}
\ No newline at end of file diff --git a/src/util/getuniquepath.h b/src/util/getuniquepath.h new file mode 100644 index 0000000000..e0c6147876 --- /dev/null +++ b/src/util/getuniquepath.h @@ -0,0 +1,19 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_UTIL_GETUNIQUEPATH_H +#define BITCOIN_UTIL_GETUNIQUEPATH_H + +#include <fs.h> + +/** + * Helper function for getting a unique path + * + * @param[in] base Base path + * @returns base joined with a random 8-character long string. + * @post Returned path is unique with high probability. + */ +fs::path GetUniquePath(const fs::path& base); + +#endif // BITCOIN_UTIL_GETUNIQUEPATH_H
\ No newline at end of file diff --git a/src/util/macros.h b/src/util/macros.h index 36ea87c0fe..0887c80fd7 100644 --- a/src/util/macros.h +++ b/src/util/macros.h @@ -8,4 +8,11 @@ #define PASTE(x, y) x ## y #define PASTE2(x, y) PASTE(x, y) +/** + * Converts the parameter X to a string after macro replacement on X has been performed. + * Don't merge these into one macro! + */ +#define STRINGIZE(X) DO_STRINGIZE(X) +#define DO_STRINGIZE(X) #X + #endif // BITCOIN_UTIL_MACROS_H diff --git a/src/util/sock.cpp b/src/util/sock.cpp new file mode 100644 index 0000000000..4c65b5b680 --- /dev/null +++ b/src/util/sock.cpp @@ -0,0 +1,149 @@ +// Copyright (c) 2020-2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <compat.h> +#include <logging.h> +#include <tinyformat.h> +#include <util/sock.h> +#include <util/system.h> +#include <util/time.h> + +#include <codecvt> +#include <cwchar> +#include <locale> +#include <string> + +#ifdef USE_POLL +#include <poll.h> +#endif + +Sock::Sock() : m_socket(INVALID_SOCKET) {} + +Sock::Sock(SOCKET s) : m_socket(s) {} + +Sock::Sock(Sock&& other) +{ + m_socket = other.m_socket; + other.m_socket = INVALID_SOCKET; +} + +Sock::~Sock() { Reset(); } + +Sock& Sock::operator=(Sock&& other) +{ + Reset(); + m_socket = other.m_socket; + other.m_socket = INVALID_SOCKET; + return *this; +} + +SOCKET Sock::Get() const { return m_socket; } + +SOCKET Sock::Release() +{ + const SOCKET s = m_socket; + m_socket = INVALID_SOCKET; + return s; +} + +void Sock::Reset() { CloseSocket(m_socket); } + +ssize_t Sock::Send(const void* data, size_t len, int flags) const +{ + return send(m_socket, static_cast<const char*>(data), len, flags); +} + +ssize_t Sock::Recv(void* buf, size_t len, int flags) const +{ + return recv(m_socket, static_cast<char*>(buf), len, flags); +} + +bool Sock::Wait(std::chrono::milliseconds timeout, Event requested) const +{ +#ifdef USE_POLL + pollfd fd; + fd.fd = m_socket; + fd.events = 0; + if (requested & RECV) { + fd.events |= POLLIN; + } + if (requested & SEND) { + fd.events |= POLLOUT; + } + + return poll(&fd, 1, count_milliseconds(timeout)) != SOCKET_ERROR; +#else + if (!IsSelectableSocket(m_socket)) { + return false; + } + + fd_set fdset_recv; + fd_set fdset_send; + FD_ZERO(&fdset_recv); + FD_ZERO(&fdset_send); + + if (requested & RECV) { + FD_SET(m_socket, &fdset_recv); + } + + if (requested & SEND) { + FD_SET(m_socket, &fdset_send); + } + + timeval timeout_struct = MillisToTimeval(timeout); + + return select(m_socket + 1, &fdset_recv, &fdset_send, nullptr, &timeout_struct) != SOCKET_ERROR; +#endif /* USE_POLL */ +} + +#ifdef WIN32 +std::string NetworkErrorString(int err) +{ + wchar_t buf[256]; + buf[0] = 0; + if(FormatMessageW(FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS | FORMAT_MESSAGE_MAX_WIDTH_MASK, + nullptr, err, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), + buf, ARRAYSIZE(buf), nullptr)) + { + return strprintf("%s (%d)", std::wstring_convert<std::codecvt_utf8_utf16<wchar_t>,wchar_t>().to_bytes(buf), err); + } + else + { + return strprintf("Unknown error (%d)", err); + } +} +#else +std::string NetworkErrorString(int err) +{ + char buf[256]; + buf[0] = 0; + /* Too bad there are two incompatible implementations of the + * thread-safe strerror. */ + const char *s; +#ifdef STRERROR_R_CHAR_P /* GNU variant can return a pointer outside the passed buffer */ + s = strerror_r(err, buf, sizeof(buf)); +#else /* POSIX variant always returns message in buffer */ + s = buf; + if (strerror_r(err, buf, sizeof(buf))) + buf[0] = 0; +#endif + return strprintf("%s (%d)", s, err); +} +#endif + +bool CloseSocket(SOCKET& hSocket) +{ + if (hSocket == INVALID_SOCKET) + return false; +#ifdef WIN32 + int ret = closesocket(hSocket); +#else + int ret = close(hSocket); +#endif + if (ret) { + LogPrintf("Socket close failed: %d. Error: %s\n", hSocket, NetworkErrorString(WSAGetLastError())); + } + hSocket = INVALID_SOCKET; + return ret != SOCKET_ERROR; +} diff --git a/src/util/sock.h b/src/util/sock.h new file mode 100644 index 0000000000..26fe60f18f --- /dev/null +++ b/src/util/sock.h @@ -0,0 +1,118 @@ +// Copyright (c) 2020-2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#ifndef BITCOIN_UTIL_SOCK_H +#define BITCOIN_UTIL_SOCK_H + +#include <compat.h> + +#include <chrono> +#include <string> + +/** + * RAII helper class that manages a socket. Mimics `std::unique_ptr`, but instead of a pointer it + * contains a socket and closes it automatically when it goes out of scope. + */ +class Sock +{ +public: + /** + * Default constructor, creates an empty object that does nothing when destroyed. + */ + Sock(); + + /** + * Take ownership of an existent socket. + */ + explicit Sock(SOCKET s); + + /** + * Copy constructor, disabled because closing the same socket twice is undesirable. + */ + Sock(const Sock&) = delete; + + /** + * Move constructor, grab the socket from another object and close ours (if set). + */ + Sock(Sock&& other); + + /** + * Destructor, close the socket or do nothing if empty. + */ + virtual ~Sock(); + + /** + * Copy assignment operator, disabled because closing the same socket twice is undesirable. + */ + Sock& operator=(const Sock&) = delete; + + /** + * Move assignment operator, grab the socket from another object and close ours (if set). + */ + virtual Sock& operator=(Sock&& other); + + /** + * Get the value of the contained socket. + * @return socket or INVALID_SOCKET if empty + */ + virtual SOCKET Get() const; + + /** + * Get the value of the contained socket and drop ownership. It will not be closed by the + * destructor after this call. + * @return socket or INVALID_SOCKET if empty + */ + virtual SOCKET Release(); + + /** + * Close if non-empty. + */ + virtual void Reset(); + + /** + * send(2) wrapper. Equivalent to `send(this->Get(), data, len, flags);`. Code that uses this + * wrapper can be unit-tested if this method is overridden by a mock Sock implementation. + */ + virtual ssize_t Send(const void* data, size_t len, int flags) const; + + /** + * recv(2) wrapper. Equivalent to `recv(this->Get(), buf, len, flags);`. Code that uses this + * wrapper can be unit-tested if this method is overridden by a mock Sock implementation. + */ + virtual ssize_t Recv(void* buf, size_t len, int flags) const; + + using Event = uint8_t; + + /** + * If passed to `Wait()`, then it will wait for readiness to read from the socket. + */ + static constexpr Event RECV = 0b01; + + /** + * If passed to `Wait()`, then it will wait for readiness to send to the socket. + */ + static constexpr Event SEND = 0b10; + + /** + * Wait for readiness for input (recv) or output (send). + * @param[in] timeout Wait this much for at least one of the requested events to occur. + * @param[in] requested Wait for those events, bitwise-or of `RECV` and `SEND`. + * @return true on success and false otherwise + */ + virtual bool Wait(std::chrono::milliseconds timeout, Event requested) const; + +private: + /** + * Contained socket. `INVALID_SOCKET` designates the object is empty. + */ + SOCKET m_socket; +}; + +/** Return readable error string for a network error code */ +std::string NetworkErrorString(int err); + +/** Close socket and set hSocket to INVALID_SOCKET */ +bool CloseSocket(SOCKET& hSocket); + +#endif // BITCOIN_UTIL_SOCK_H diff --git a/src/util/strencodings.h b/src/util/strencodings.h index b4a61202ef..98379e9138 100644 --- a/src/util/strencodings.h +++ b/src/util/strencodings.h @@ -17,8 +17,6 @@ #include <string> #include <vector> -#define ARRAYLEN(array) (sizeof(array)/sizeof((array)[0])) - /** Used by SanitizeString() */ enum SafeChars { diff --git a/src/util/string.h b/src/util/string.h index 5ffdc80d88..b26facc502 100644 --- a/src/util/string.h +++ b/src/util/string.h @@ -25,6 +25,14 @@ return str.substr(front, end - front + 1); } +[[nodiscard]] inline std::string RemovePrefix(const std::string& str, const std::string& prefix) +{ + if (str.substr(0, prefix.size()) == prefix) { + return str.substr(prefix.size()); + } + return str; +} + /** * Join a list of items * diff --git a/src/util/system.cpp b/src/util/system.cpp index d1fb921642..9a2e719bbc 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -3,7 +3,6 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. -#include <sync.h> #include <util/system.h> #ifdef HAVE_BOOST_PROCESS @@ -11,6 +10,9 @@ #endif // HAVE_BOOST_PROCESS #include <chainparamsbase.h> +#include <sync.h> +#include <util/check.h> +#include <util/getuniquepath.h> #include <util/strencodings.h> #include <util/string.h> #include <util/translation.h> @@ -123,7 +125,7 @@ void ReleaseDirectoryLocks() bool DirIsWritable(const fs::path& directory) { - fs::path tmpFile = directory / fs::unique_path(); + fs::path tmpFile = GetUniquePath(directory); FILE* file = fsbridge::fopen(tmpFile, "a"); if (!file) return false; @@ -310,8 +312,22 @@ bool ArgsManager::ParseParameters(int argc, const char* const argv[], std::strin key[0] = '-'; #endif - if (key[0] != '-') + if (key[0] != '-') { + if (!m_accept_any_command && m_command.empty()) { + // The first non-dash arg is a registered command + Optional<unsigned int> flags = GetArgFlags(key); + if (!flags || !(*flags & ArgsManager::COMMAND)) { + error = strprintf("Invalid command '%s'", argv[i]); + return false; + } + } + m_command.push_back(key); + while (++i < argc) { + // The remaining args are command args + m_command.push_back(argv[i]); + } break; + } // Transform --foo to -foo if (key.length() > 1 && key[1] == '-') @@ -359,6 +375,26 @@ Optional<unsigned int> ArgsManager::GetArgFlags(const std::string& name) const return nullopt; } +std::optional<const ArgsManager::Command> ArgsManager::GetCommand() const +{ + Command ret; + LOCK(cs_args); + auto it = m_command.begin(); + if (it == m_command.end()) { + // No command was passed + return std::nullopt; + } + if (!m_accept_any_command) { + // The registered command + ret.command = *(it++); + } + while (it != m_command.end()) { + // The unregistered command and args (if any) + ret.args.push_back(*(it++)); + } + return ret; +} + std::vector<std::string> ArgsManager::GetArgs(const std::string& strArg) const { std::vector<std::string> result; @@ -504,8 +540,22 @@ void ArgsManager::ForceSetArg(const std::string& strArg, const std::string& strV m_settings.forced_settings[SettingName(strArg)] = strValue; } +void ArgsManager::AddCommand(const std::string& cmd, const std::string& help, const OptionsCategory& cat) +{ + Assert(cmd.find('=') == std::string::npos); + Assert(cmd.at(0) != '-'); + + LOCK(cs_args); + m_accept_any_command = false; // latch to false + std::map<std::string, Arg>& arg_map = m_available_args[cat]; + auto ret = arg_map.emplace(cmd, Arg{"", help, ArgsManager::COMMAND}); + Assert(ret.second); // Fail on duplicate commands +} + void ArgsManager::AddArg(const std::string& name, const std::string& help, unsigned int flags, const OptionsCategory& cat) { + Assert((flags & ArgsManager::COMMAND) == 0); // use AddCommand + // Split arg name from its help param size_t eq_index = name.find('='); if (eq_index == std::string::npos) { diff --git a/src/util/system.h b/src/util/system.h index d06c30bfa7..5959bc4196 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -166,7 +166,7 @@ struct SectionInfo class ArgsManager { public: - enum Flags { + enum Flags : uint32_t { // Boolean options can accept negation syntax -noOPTION or -noOPTION=1 ALLOW_BOOL = 0x01, ALLOW_INT = 0x02, @@ -181,6 +181,7 @@ public: NETWORK_ONLY = 0x200, // This argument's value is sensitive (such as a password). SENSITIVE = 0x400, + COMMAND = 0x800, }; protected: @@ -193,9 +194,11 @@ protected: mutable RecursiveMutex cs_args; util::Settings m_settings GUARDED_BY(cs_args); + std::vector<std::string> m_command GUARDED_BY(cs_args); std::string m_network GUARDED_BY(cs_args); std::set<std::string> m_network_only_args GUARDED_BY(cs_args); std::map<OptionsCategory, std::map<std::string, Arg>> m_available_args GUARDED_BY(cs_args); + bool m_accept_any_command GUARDED_BY(cs_args){true}; std::list<SectionInfo> m_config_sections GUARDED_BY(cs_args); [[nodiscard]] bool ReadConfigStream(std::istream& stream, const std::string& filepath, std::string& error, bool ignore_invalid_keys = false); @@ -246,6 +249,20 @@ public: */ const std::list<SectionInfo> GetUnrecognizedSections() const; + struct Command { + /** The command (if one has been registered with AddCommand), or empty */ + std::string command; + /** + * If command is non-empty: Any args that followed it + * If command is empty: The unregistered command and any args that followed it + */ + std::vector<std::string> args; + }; + /** + * Get the command and command args (returns std::nullopt if no command provided) + */ + std::optional<const Command> GetCommand() const; + /** * Return a vector of strings of the given argument * @@ -332,6 +349,11 @@ public: void AddArg(const std::string& name, const std::string& help, unsigned int flags, const OptionsCategory& cat); /** + * Add subcommand + */ + void AddCommand(const std::string& cmd, const std::string& help, const OptionsCategory& cat); + + /** * Add many hidden arguments */ void AddHiddenArgs(const std::vector<std::string>& args); diff --git a/src/util/time.cpp b/src/util/time.cpp index e96972fe12..e6f0986a39 100644 --- a/src/util/time.cpp +++ b/src/util/time.cpp @@ -7,8 +7,11 @@ #include <config/bitcoin-config.h> #endif +#include <compat.h> #include <util/time.h> +#include <util/check.h> + #include <atomic> #include <boost/date_time/posix_time/posix_time.hpp> #include <ctime> @@ -18,7 +21,7 @@ void UninterruptibleSleep(const std::chrono::microseconds& n) { std::this_thread::sleep_for(n); } -static std::atomic<int64_t> nMockTime(0); //!< For unit testing +static std::atomic<int64_t> nMockTime(0); //!< For testing int64_t GetTime() { @@ -30,6 +33,49 @@ int64_t GetTime() return now; } +bool ChronoSanityCheck() +{ + // std::chrono::system_clock.time_since_epoch and time_t(0) are not guaranteed + // to use the Unix epoch timestamp, prior to C++20, but in practice they almost + // certainly will. Any differing behavior will be assumed to be an error, unless + // certain platforms prove to consistently deviate, at which point we'll cope + // with it by adding offsets. + + // Create a new clock from time_t(0) and make sure that it represents 0 + // seconds from the system_clock's time_since_epoch. Then convert that back + // to a time_t and verify that it's the same as before. + const time_t time_t_epoch{}; + auto clock = std::chrono::system_clock::from_time_t(time_t_epoch); + if (std::chrono::duration_cast<std::chrono::seconds>(clock.time_since_epoch()).count() != 0) { + return false; + } + + time_t time_val = std::chrono::system_clock::to_time_t(clock); + if (time_val != time_t_epoch) { + return false; + } + + // Check that the above zero time is actually equal to the known unix timestamp. + struct tm epoch; +#ifdef HAVE_GMTIME_R + if (gmtime_r(&time_val, &epoch) == nullptr) { +#else + if (gmtime_s(&epoch, &time_val) != 0) { +#endif + return false; + } + + if ((epoch.tm_sec != 0) || + (epoch.tm_min != 0) || + (epoch.tm_hour != 0) || + (epoch.tm_mday != 1) || + (epoch.tm_mon != 0) || + (epoch.tm_year != 70)) { + return false; + } + return true; +} + template <typename T> T GetTime() { @@ -44,35 +90,43 @@ template std::chrono::seconds GetTime(); template std::chrono::milliseconds GetTime(); template std::chrono::microseconds GetTime(); +template <typename T> +static T GetSystemTime() +{ + const auto now = std::chrono::duration_cast<T>(std::chrono::system_clock::now().time_since_epoch()); + assert(now.count() > 0); + return now; +} + void SetMockTime(int64_t nMockTimeIn) { + Assert(nMockTimeIn >= 0); nMockTime.store(nMockTimeIn, std::memory_order_relaxed); } -int64_t GetMockTime() +void SetMockTime(std::chrono::seconds mock_time_in) +{ + nMockTime.store(mock_time_in.count(), std::memory_order_relaxed); +} + +std::chrono::seconds GetMockTime() { - return nMockTime.load(std::memory_order_relaxed); + return std::chrono::seconds(nMockTime.load(std::memory_order_relaxed)); } int64_t GetTimeMillis() { - int64_t now = (boost::posix_time::microsec_clock::universal_time() - - boost::posix_time::ptime(boost::gregorian::date(1970,1,1))).total_milliseconds(); - assert(now > 0); - return now; + return int64_t{GetSystemTime<std::chrono::milliseconds>().count()}; } int64_t GetTimeMicros() { - int64_t now = (boost::posix_time::microsec_clock::universal_time() - - boost::posix_time::ptime(boost::gregorian::date(1970,1,1))).total_microseconds(); - assert(now > 0); - return now; + return int64_t{GetSystemTime<std::chrono::microseconds>().count()}; } int64_t GetSystemTimeInSeconds() { - return GetTimeMicros()/1000000; + return int64_t{GetSystemTime<std::chrono::seconds>().count()}; } std::string FormatISO8601DateTime(int64_t nTime) { @@ -114,3 +168,16 @@ int64_t ParseISO8601DateTime(const std::string& str) return 0; return (ptime - epoch).total_seconds(); } + +struct timeval MillisToTimeval(int64_t nTimeout) +{ + struct timeval timeout; + timeout.tv_sec = nTimeout / 1000; + timeout.tv_usec = (nTimeout % 1000) * 1000; + return timeout; +} + +struct timeval MillisToTimeval(std::chrono::milliseconds ms) +{ + return MillisToTimeval(count_milliseconds(ms)); +} diff --git a/src/util/time.h b/src/util/time.h index c69f604dc6..56131ce0fe 100644 --- a/src/util/time.h +++ b/src/util/time.h @@ -6,6 +6,8 @@ #ifndef BITCOIN_UTIL_TIME_H #define BITCOIN_UTIL_TIME_H +#include <compat.h> + #include <chrono> #include <stdint.h> #include <string> @@ -25,6 +27,7 @@ void UninterruptibleSleep(const std::chrono::microseconds& n); * interface that doesn't support std::chrono (e.g. RPC, debug log, or the GUI) */ inline int64_t count_seconds(std::chrono::seconds t) { return t.count(); } +inline int64_t count_milliseconds(std::chrono::milliseconds t) { return t.count(); } inline int64_t count_microseconds(std::chrono::microseconds t) { return t.count(); } /** @@ -40,10 +43,19 @@ int64_t GetTimeMicros(); /** Returns the system time (not mockable) */ int64_t GetSystemTimeInSeconds(); // Like GetTime(), but not mockable -/** For testing. Set e.g. with the setmocktime rpc, or -mocktime argument */ +/** + * DEPRECATED + * Use SetMockTime with chrono type + * + * @param[in] nMockTimeIn Time in seconds. + */ void SetMockTime(int64_t nMockTimeIn); + +/** For testing. Set e.g. with the setmocktime rpc, or -mocktime argument */ +void SetMockTime(std::chrono::seconds mock_time_in); + /** For testing */ -int64_t GetMockTime(); +std::chrono::seconds GetMockTime(); /** Return system time (or mocked time, if set) */ template <typename T> @@ -57,4 +69,17 @@ std::string FormatISO8601DateTime(int64_t nTime); std::string FormatISO8601Date(int64_t nTime); int64_t ParseISO8601DateTime(const std::string& str); +/** + * Convert milliseconds to a struct timeval for e.g. select. + */ +struct timeval MillisToTimeval(int64_t nTimeout); + +/** + * Convert milliseconds to a struct timeval for e.g. select. + */ +struct timeval MillisToTimeval(std::chrono::milliseconds ms); + +/** Sanity check epoch match normal Unix epoch */ +bool ChronoSanityCheck(); + #endif // BITCOIN_UTIL_TIME_H diff --git a/src/validation.cpp b/src/validation.cpp index 38df71b994..0b2ca4b422 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -17,9 +17,11 @@ #include <cuckoocache.h> #include <flatfile.h> #include <hash.h> +#include <index/blockfilterindex.h> #include <index/txindex.h> #include <logging.h> #include <logging/timer.h> +#include <node/coinstats.h> #include <node/ui_interface.h> #include <optional.h> #include <policy/policy.h> @@ -197,14 +199,19 @@ CBlockIndex* BlockManager::FindForkInGlobalIndex(const CChain& chain, const CBlo std::unique_ptr<CBlockTreeDB> pblocktree; -bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks = nullptr); +bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, + const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, + bool cacheFullScriptStore, PrecomputedTransactionData& txdata, + std::vector<CScriptCheck>* pvChecks = nullptr) + EXCLUSIVE_LOCKS_REQUIRED(cs_main); static FILE* OpenUndoFile(const FlatFilePos &pos, bool fReadOnly = false); static FlatFileSeq BlockFileSeq(); static FlatFileSeq UndoFileSeq(); -bool CheckFinalTx(const CTransaction &tx, int flags) +bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags) { AssertLockHeld(cs_main); + assert(std::addressof(*::ChainActive().Tip()) == std::addressof(*active_chain_tip)); // By convention a negative value for flags indicates that the // current network-enforced consensus rules should be used. In @@ -220,7 +227,7 @@ bool CheckFinalTx(const CTransaction &tx, int flags) // evaluated is what is used. Thus if we want to know if a // transaction can be part of the *next* block, we need to call // IsFinalTx() with one more than ::ChainActive().Height(). - const int nBlockHeight = ::ChainActive().Height() + 1; + const int nBlockHeight = active_chain_tip->nHeight + 1; // BIP113 requires that time-locked transactions have nLockTime set to // less than the median time of the previous block they're contained in. @@ -228,13 +235,13 @@ bool CheckFinalTx(const CTransaction &tx, int flags) // chain tip, so we use that to calculate the median time passed to // IsFinalTx() if LOCKTIME_MEDIAN_TIME_PAST is set. const int64_t nBlockTime = (flags & LOCKTIME_MEDIAN_TIME_PAST) - ? ::ChainActive().Tip()->GetMedianTimePast() + ? active_chain_tip->GetMedianTimePast() : GetAdjustedTime(); return IsFinalTx(tx, nBlockHeight, nBlockTime); } -bool TestLockPointValidity(const LockPoints* lp) +bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp) { AssertLockHeld(cs_main); assert(lp); @@ -243,7 +250,8 @@ bool TestLockPointValidity(const LockPoints* lp) if (lp->maxInputBlock) { // Check whether ::ChainActive() is an extension of the block at which the LockPoints // calculation was valid. If not LockPoints are no longer valid - if (!::ChainActive().Contains(lp->maxInputBlock)) { + assert(std::addressof(::ChainActive()) == std::addressof(active_chain)); + if (!active_chain.Contains(lp->maxInputBlock)) { return false; } } @@ -252,22 +260,28 @@ bool TestLockPointValidity(const LockPoints* lp) return true; } -bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp, bool useExistingLockPoints) +bool CheckSequenceLocks(CChainState& active_chainstate, + const CTxMemPool& pool, + const CTransaction& tx, + int flags, + LockPoints* lp, + bool useExistingLockPoints) { AssertLockHeld(cs_main); AssertLockHeld(pool.cs); + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); - CBlockIndex* tip = ::ChainActive().Tip(); + CBlockIndex* tip = active_chainstate.m_chain.Tip(); assert(tip != nullptr); CBlockIndex index; index.pprev = tip; - // CheckSequenceLocks() uses ::ChainActive().Height()+1 to evaluate + // CheckSequenceLocks() uses active_chainstate.m_chain.Height()+1 to evaluate // height based locks because when SequenceLocks() is called within // ConnectBlock(), the height of the block *being* // evaluated is what is used. // Thus if we want to know if a transaction can be part of the - // *next* block, we need to use one more than ::ChainActive().Height() + // *next* block, we need to use one more than active_chainstate.m_chain.Height() index.nHeight = tip->nHeight + 1; std::pair<int, int64_t> lockPair; @@ -277,8 +291,8 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag lockPair.second = lp->time; } else { - // CoinsTip() contains the UTXO set for ::ChainActive().Tip() - CCoinsViewMemPool viewMemPool(&::ChainstateActive().CoinsTip(), pool); + // CoinsTip() contains the UTXO set for active_chainstate.m_chain.Tip() + CCoinsViewMemPool viewMemPool(&active_chainstate.CoinsTip(), pool); std::vector<int> prevheights; prevheights.resize(tx.vin.size()); for (size_t txinIndex = 0; txinIndex < tx.vin.size(); txinIndex++) { @@ -327,7 +341,7 @@ bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flag // Returns the script flags which should be checked for a given block static unsigned int GetBlockScriptFlags(const CBlockIndex* pindex, const Consensus::Params& chainparams); -static void LimitMempoolSize(CTxMemPool& pool, size_t limit, std::chrono::seconds age) +static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, size_t limit, std::chrono::seconds age) EXCLUSIVE_LOCKS_REQUIRED(pool.cs, ::cs_main) { int expired = pool.Expire(GetTime<std::chrono::seconds>() - age); @@ -337,18 +351,20 @@ static void LimitMempoolSize(CTxMemPool& pool, size_t limit, std::chrono::second std::vector<COutPoint> vNoSpendsRemaining; pool.TrimToSize(limit, &vNoSpendsRemaining); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_cache)); for (const COutPoint& removed : vNoSpendsRemaining) - ::ChainstateActive().CoinsTip().Uncache(removed); + coins_cache.Uncache(removed); } -static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); - if (::ChainstateActive().IsInitialBlockDownload()) + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + if (active_chainstate.IsInitialBlockDownload()) return false; - if (::ChainActive().Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE)) + if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE)) return false; - if (::ChainActive().Height() < pindexBestHeader->nHeight - 1) + if (active_chainstate.m_chain.Height() < pindexBestHeader->nHeight - 1) return false; return true; } @@ -366,10 +382,11 @@ static bool IsCurrentForFeeEstimation() EXCLUSIVE_LOCKS_REQUIRED(cs_main) * and instead just erase from the mempool as needed. */ -static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs) +static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& mempool, DisconnectedBlockTransactions& disconnectpool, bool fAddToMempool) EXCLUSIVE_LOCKS_REQUIRED(cs_main, mempool.cs) { AssertLockHeld(cs_main); AssertLockHeld(mempool.cs); + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); std::vector<uint256> vHashUpdate; // disconnectpool's insertion_order index sorts the entries from // oldest to newest, but the oldest entry will be the last tx from the @@ -380,10 +397,8 @@ static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransact auto it = disconnectpool.queuedTx.get<insertion_order>().rbegin(); while (it != disconnectpool.queuedTx.get<insertion_order>().rend()) { // ignore validation errors in resurrected transactions - TxValidationState stateDummy; if (!fAddToMempool || (*it)->IsCoinBase() || - !AcceptToMemoryPool(mempool, stateDummy, *it, - nullptr /* plTxnReplaced */, true /* bypass_limits */)) { + AcceptToMemoryPool(active_chainstate, mempool, *it, true /* bypass_limits */).m_result_type != MempoolAcceptResult::ResultType::VALID) { // If the transaction doesn't make it in to the mempool, remove any // transactions that depend on it (which would now be orphans). mempool.removeRecursive(**it, MemPoolRemovalReason::REORG); @@ -401,9 +416,9 @@ static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransact mempool.UpdateTransactionsFromBlock(vHashUpdate); // We also need to remove any now-immature transactions - mempool.removeForReorg(&::ChainstateActive().CoinsTip(), ::ChainActive().Tip()->nHeight + 1, STANDARD_LOCKTIME_VERIFY_FLAGS); + mempool.removeForReorg(active_chainstate, STANDARD_LOCKTIME_VERIFY_FLAGS); // Re-limit mempool size, in case we added any transactions - LimitMempoolSize(mempool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); + LimitMempoolSize(mempool, active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); } /** @@ -413,7 +428,7 @@ static void UpdateMempoolForReorg(CTxMemPool& mempool, DisconnectedBlockTransact * */ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationState& state, const CCoinsViewCache& view, const CTxMemPool& pool, - unsigned int flags, PrecomputedTransactionData& txdata) + unsigned int flags, PrecomputedTransactionData& txdata, CCoinsViewCache& coins_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main, pool.cs) { AssertLockHeld(cs_main); @@ -438,7 +453,8 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationS assert(txFrom->vout.size() > txin.prevout.n); assert(txFrom->vout[txin.prevout.n] == coin.out); } else { - const Coin& coinFromUTXOSet = ::ChainstateActive().CoinsTip().AccessCoin(txin.prevout); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_tip)); + const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout); assert(!coinFromUTXOSet.IsSpent()); assert(coinFromUTXOSet.out == coin.out); } @@ -453,19 +469,19 @@ namespace { class MemPoolAccept { public: - explicit MemPoolAccept(CTxMemPool& mempool) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&::ChainstateActive().CoinsTip(), m_pool), + explicit MemPoolAccept(CTxMemPool& mempool, CChainState& active_chainstate) : m_pool(mempool), m_view(&m_dummy), m_viewmempool(&active_chainstate.CoinsTip(), m_pool), m_active_chainstate(active_chainstate), m_limit_ancestors(gArgs.GetArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT)), m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000), m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)), - m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) {} + m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) { + assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); + } // We put the arguments we're handed into a struct, so we can pass them // around easier. struct ATMPArgs { const CChainParams& m_chainparams; - TxValidationState &m_state; const int64_t m_accept_time; - std::list<CTransactionRef>* m_replaced_transactions; const bool m_bypass_limits; /* * Return any outpoints which were not previously present in the coins @@ -476,11 +492,10 @@ public: */ std::vector<COutPoint>& m_coins_to_uncache; const bool m_test_accept; - CAmount* m_fee_out; }; // Single transaction acceptance - bool AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + MempoolAcceptResult AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); private: // All the intermediate state that gets passed between the various levels @@ -491,14 +506,17 @@ private: CTxMemPool::setEntries m_all_conflicting; CTxMemPool::setEntries m_ancestors; std::unique_ptr<CTxMemPoolEntry> m_entry; + std::list<CTransactionRef> m_replaced_transactions; bool m_replacement_transaction; + CAmount m_base_fees; CAmount m_modified_fees; CAmount m_conflicting_fees; size_t m_conflicting_size; const CTransactionRef& m_ptx; const uint256& m_hash; + TxValidationState m_state; }; // Run the policy checks on a given transaction, excluding any script checks. @@ -509,18 +527,18 @@ private: // Run the script checks using our policy flags. As this can be slow, we should // only invoke this on transactions that have otherwise passed policy checks. - bool PolicyScriptChecks(ATMPArgs& args, const Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); + bool PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Re-run the script checks, using consensus flags, and try to cache the // result in the scriptcache. This should be done after // PolicyScriptChecks(). This requires that all inputs either be in our // utxo set or in the mempool. - bool ConsensusScriptChecks(ATMPArgs& args, const Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); + bool ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData &txdata) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Try to add the transaction to the mempool, removing any conflicts first. // Returns true if the transaction is in the mempool after any size // limiting is performed, false otherwise. - bool Finalize(ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); + bool Finalize(const ATMPArgs& args, Workspace& ws) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Compare a package's feerate against minimum allowed. bool CheckFeeRate(size_t package_size, CAmount package_fee, TxValidationState& state) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs) @@ -542,6 +560,8 @@ private: CCoinsViewMemPool m_viewmempool; CCoinsView m_dummy; + CChainState& m_active_chainstate; + // The package limits in effect at the time of invocation. const size_t m_limit_ancestors; const size_t m_limit_ancestor_size; @@ -558,12 +578,12 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) const uint256& hash = ws.m_hash; // Copy/alias what we need out of args - TxValidationState &state = args.m_state; const int64_t nAcceptTime = args.m_accept_time; const bool bypass_limits = args.m_bypass_limits; std::vector<COutPoint>& coins_to_uncache = args.m_coins_to_uncache; // Alias what we need out of ws + TxValidationState& state = ws.m_state; std::set<uint256>& setConflicts = ws.m_conflicts; CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting; CTxMemPool::setEntries& setAncestors = ws.m_ancestors; @@ -596,7 +616,8 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Only accept nLockTime-using transactions that can be mined in the next // block; we don't want our mempool filled up with transactions that can't // be mined yet. - if (!CheckFinalTx(tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) + assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); + if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final"); // is it already in the memory pool? @@ -644,7 +665,8 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) LockPoints lp; m_view.SetBackend(m_viewmempool); - const CCoinsViewCache& coins_cache = ::ChainstateActive().CoinsTip(); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); + const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip(); // do all inputs exist? for (const CTxIn& txin : tx.vin) { if (!coins_cache.HaveCoinInCache(txin.prevout)) { @@ -680,22 +702,19 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // be mined yet. // Must keep pool.cs for this unless we change CheckSequenceLocks to take a // CoinsViewCache instead of create its own - if (!CheckSequenceLocks(m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) + assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); + if (!CheckSequenceLocks(m_active_chainstate, m_pool, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final"); - CAmount nFees = 0; - if (!Consensus::CheckTxInputs(tx, state, m_view, g_chainman.m_blockman.GetSpendHeight(m_view), nFees)) { + assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_active_chainstate.m_blockman)); + if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) { return false; // state filled in by CheckTxInputs } - // If fee_out is passed, return the fee to the caller - if (args.m_fee_out) { - *args.m_fee_out = nFees; - } - // Check for non-standard pay-to-script-hash in inputs const auto& params = args.m_chainparams.GetConsensus(); - auto taproot_state = VersionBitsState(::ChainActive().Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache); + assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); + auto taproot_state = VersionBitsState(m_active_chainstate.m_chain.Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache); if (fRequireStandard && !AreInputsStandard(tx, m_view, taproot_state == ThresholdState::ACTIVE)) { return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); } @@ -707,7 +726,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) int64_t nSigOpsCost = GetTransactionSigOpCost(tx, m_view, STANDARD_SCRIPT_VERIFY_FLAGS); // nModifiedFees includes any fee deltas from PrioritiseTransaction - nModifiedFees = nFees; + nModifiedFees = ws.m_base_fees; m_pool.ApplyDelta(hash, nModifiedFees); // Keep track of transactions that spend a coinbase, which we re-scan @@ -721,7 +740,8 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) } } - entry.reset(new CTxMemPoolEntry(ptx, nFees, nAcceptTime, ::ChainActive().Height(), + assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); + entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(), fSpendsCoinbase, nSigOpsCost, lp)); unsigned int nSize = entry->GetTxSize(); @@ -925,11 +945,10 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) return true; } -bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, const Workspace& ws, PrecomputedTransactionData& txdata) +bool MemPoolAccept::PolicyScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) { const CTransaction& tx = *ws.m_ptx; - - TxValidationState &state = args.m_state; + TxValidationState& state = ws.m_state; constexpr unsigned int scriptVerifyFlags = STANDARD_SCRIPT_VERIFY_FLAGS; @@ -952,12 +971,11 @@ bool MemPoolAccept::PolicyScriptChecks(ATMPArgs& args, const Workspace& ws, Prec return true; } -bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, const Workspace& ws, PrecomputedTransactionData& txdata) +bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, PrecomputedTransactionData& txdata) { const CTransaction& tx = *ws.m_ptx; const uint256& hash = ws.m_hash; - - TxValidationState &state = args.m_state; + TxValidationState& state = ws.m_state; const CChainParams& chainparams = args.m_chainparams; // Check again against the current block tip's script verification @@ -975,8 +993,10 @@ bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, const Workspace& ws, P // There is a similar check in CreateNewBlock() to prevent creating // invalid blocks (using TestBlockValidity), however allowing such // transactions into the mempool can be exploited as a DoS attack. - unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(::ChainActive().Tip(), chainparams.GetConsensus()); - if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata)) { + assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); + unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus()); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); + if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata, m_active_chainstate.CoinsTip())) { return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s", __func__, hash.ToString(), state.ToString()); } @@ -984,11 +1004,11 @@ bool MemPoolAccept::ConsensusScriptChecks(ATMPArgs& args, const Workspace& ws, P return true; } -bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws) +bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws) { const CTransaction& tx = *ws.m_ptx; const uint256& hash = ws.m_hash; - TxValidationState &state = args.m_state; + TxValidationState& state = ws.m_state; const bool bypass_limits = args.m_bypass_limits; CTxMemPool::setEntries& allConflicting = ws.m_all_conflicting; @@ -1007,8 +1027,7 @@ bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws) hash.ToString(), FormatMoney(nModifiedFees - nConflictingFees), (int)entry->GetTxSize() - (int)nConflictingSize); - if (args.m_replaced_transactions) - args.m_replaced_transactions->push_back(it->GetSharedTx()); + ws.m_replaced_transactions.push_back(it->GetSharedTx()); } m_pool.RemoveStaged(allConflicting, false, MemPoolRemovalReason::REPLACED); @@ -1017,28 +1036,30 @@ bool MemPoolAccept::Finalize(ATMPArgs& args, Workspace& ws) // - it's not being re-added during a reorg which bypasses typical mempool fee limits // - the node is not behind // - the transaction is not dependent on any other transactions in the mempool - bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation() && m_pool.HasNoInputsOf(tx); + assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); + bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx); // Store transaction in memory m_pool.addUnchecked(*entry, setAncestors, validForFeeEstimation); // trim mempool and check if tx was trimmed if (!bypass_limits) { - LimitMempoolSize(m_pool, gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); + assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); + LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); if (!m_pool.exists(hash)) return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full"); } return true; } -bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) +MempoolAcceptResult MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs& args) { AssertLockHeld(cs_main); LOCK(m_pool.cs); // mempool "read lock" (held through GetMainSignals().TransactionAddedToMempool()) - Workspace workspace(ptx); + Workspace ws(ptx); - if (!PreChecks(args, workspace)) return false; + if (!PreChecks(args, ws)) return MempoolAcceptResult(ws.m_state); // Only compute the precomputed transaction data if we need to verify // scripts (ie, other policy checks pass). We perform the inexpensive @@ -1046,51 +1067,56 @@ bool MemPoolAccept::AcceptSingleTransaction(const CTransactionRef& ptx, ATMPArgs // checks pass, to mitigate CPU exhaustion denial-of-service attacks. PrecomputedTransactionData txdata; - if (!PolicyScriptChecks(args, workspace, txdata)) return false; + if (!PolicyScriptChecks(args, ws, txdata)) return MempoolAcceptResult(ws.m_state); - if (!ConsensusScriptChecks(args, workspace, txdata)) return false; + if (!ConsensusScriptChecks(args, ws, txdata)) return MempoolAcceptResult(ws.m_state); // Tx was accepted, but not added - if (args.m_test_accept) return true; + if (args.m_test_accept) { + return MempoolAcceptResult(std::move(ws.m_replaced_transactions), ws.m_base_fees); + } - if (!Finalize(args, workspace)) return false; + if (!Finalize(args, ws)) return MempoolAcceptResult(ws.m_state); GetMainSignals().TransactionAddedToMempool(ptx, m_pool.GetAndIncrementSequence()); - return true; + return MempoolAcceptResult(std::move(ws.m_replaced_transactions), ws.m_base_fees); } } // anon namespace /** (try to) add transaction to memory pool with a specified acceptance time **/ -static bool AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx, - int64_t nAcceptTime, std::list<CTransactionRef>* plTxnReplaced, - bool bypass_limits, bool test_accept, CAmount* fee_out=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static MempoolAcceptResult AcceptToMemoryPoolWithTime(const CChainParams& chainparams, CTxMemPool& pool, + CChainState& active_chainstate, + const CTransactionRef &tx, int64_t nAcceptTime, + bool bypass_limits, bool test_accept) + EXCLUSIVE_LOCKS_REQUIRED(cs_main) { std::vector<COutPoint> coins_to_uncache; - MemPoolAccept::ATMPArgs args { chainparams, state, nAcceptTime, plTxnReplaced, bypass_limits, coins_to_uncache, test_accept, fee_out }; - bool res = MemPoolAccept(pool).AcceptSingleTransaction(tx, args); - if (!res) { + MemPoolAccept::ATMPArgs args { chainparams, nAcceptTime, bypass_limits, coins_to_uncache, test_accept }; + + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args); + if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { // Remove coins that were not present in the coins cache before calling ATMPW; // this is to prevent memory DoS in case we receive a large number of // invalid transactions that attempt to overrun the in-memory coins cache // (`CCoinsViewCache::cacheCoins`). for (const COutPoint& hashTx : coins_to_uncache) - ::ChainstateActive().CoinsTip().Uncache(hashTx); + active_chainstate.CoinsTip().Uncache(hashTx); } // After we've (potentially) uncached entries, ensure our coins cache is still within its size limits BlockValidationState state_dummy; - ::ChainstateActive().FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC); - return res; + active_chainstate.FlushStateToDisk(chainparams, state_dummy, FlushStateMode::PERIODIC); + return result; } -bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx, - std::list<CTransactionRef>* plTxnReplaced, - bool bypass_limits, bool test_accept, CAmount* fee_out) +MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPool& pool, const CTransactionRef& tx, + bool bypass_limits, bool test_accept) { - const CChainParams& chainparams = Params(); - return AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, GetTime(), plTxnReplaced, bypass_limits, test_accept, fee_out); + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + return AcceptToMemoryPoolWithTime(Params(), pool, active_chainstate, tx, GetTime(), bypass_limits, test_accept); } CTransactionRef GetTransaction(const CBlockIndex* const block_index, const CTxMemPool* const mempool, const uint256& hash, const Consensus::Params& consensusParams, uint256& hashBlock) @@ -1459,7 +1485,10 @@ void InitScriptExecutionCache() { * * Non-static (and re-declared) in src/test/txvalidationcache_tests.cpp */ -bool CheckInputScripts(const CTransaction& tx, TxValidationState &state, const CCoinsViewCache &inputs, unsigned int flags, bool cacheSigStore, bool cacheFullScriptStore, PrecomputedTransactionData& txdata, std::vector<CScriptCheck> *pvChecks) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +bool CheckInputScripts(const CTransaction& tx, TxValidationState& state, + const CCoinsViewCache& inputs, unsigned int flags, bool cacheSigStore, + bool cacheFullScriptStore, PrecomputedTransactionData& txdata, + std::vector<CScriptCheck>* pvChecks) { if (tx.IsCoinBase()) return true; @@ -2249,17 +2278,25 @@ bool CChainState::FlushStateToDisk( { bool fFlushForPrune = false; bool fDoFullFlush = false; + CoinsCacheSizeState cache_state = GetCoinsCacheSizeState(&m_mempool); LOCK(cs_LastBlockFile); if (fPruneMode && (fCheckForPruning || nManualPruneHeight > 0) && !fReindex) { + // make sure we don't prune above the blockfilterindexes bestblocks + // pruning is height-based + int last_prune = m_chain.Height(); // last height we can prune + ForEachBlockFilterIndex([&](BlockFilterIndex& index) { + last_prune = std::max(1, std::min(last_prune, index.GetSummary().best_block_height)); + }); + if (nManualPruneHeight > 0) { LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune (manual)", BCLog::BENCH); - m_blockman.FindFilesToPruneManual(setFilesToPrune, nManualPruneHeight, m_chain.Height()); + m_blockman.FindFilesToPruneManual(setFilesToPrune, std::min(last_prune, nManualPruneHeight), m_chain.Height()); } else { LOG_TIME_MILLIS_WITH_CATEGORY("find files to prune", BCLog::BENCH); - m_blockman.FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight(), m_chain.Height(), IsInitialBlockDownload()); + m_blockman.FindFilesToPrune(setFilesToPrune, chainparams.PruneAfterHeight(), m_chain.Height(), last_prune, IsInitialBlockDownload()); fCheckForPruning = false; } if (!setFilesToPrune.empty()) { @@ -2693,7 +2730,7 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChai if (!DisconnectTip(state, chainparams, &disconnectpool)) { // This is likely a fatal error, but keep the mempool consistent, // just in case. Only remove from the mempool in this case. - UpdateMempoolForReorg(m_mempool, disconnectpool, false); + UpdateMempoolForReorg(::ChainstateActive(), m_mempool, disconnectpool, false); // If we're unable to disconnect a block during normal operation, // then that is a failure of our local system -- we should abort @@ -2737,7 +2774,7 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChai // A system error occurred (disk space, database error, ...). // Make the mempool consistent with the current tip, just in case // any observers try to use it before shutdown. - UpdateMempoolForReorg(m_mempool, disconnectpool, false); + UpdateMempoolForReorg(::ChainstateActive(), m_mempool, disconnectpool, false); return false; } } else { @@ -2754,7 +2791,7 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChai if (fBlocksDisconnected) { // If any blocks were disconnected, disconnectpool may be non empty. Add // any disconnected transactions back to the mempool. - UpdateMempoolForReorg(m_mempool, disconnectpool, true); + UpdateMempoolForReorg(::ChainstateActive(), m_mempool, disconnectpool, true); } m_mempool.check(&CoinsTip()); @@ -2991,7 +3028,7 @@ bool CChainState::InvalidateBlock(BlockValidationState& state, const CChainParam // transactions back to the mempool if disconnecting was successful, // and we're not doing a very deep invalidation (in which case // keeping the mempool up to date is probably futile anyway). - UpdateMempoolForReorg(m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret); + UpdateMempoolForReorg(::ChainstateActive(), m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret); if (!ret) return false; assert(invalid_walk_tip->pprev == m_chain.Tip()); @@ -3203,7 +3240,7 @@ static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int n bool finalize_undo = false; if (!fKnown) { - while (vinfoBlockFile[nFile].nSize + nAddSize >= MAX_BLOCKFILE_SIZE) { + while (vinfoBlockFile[nFile].nSize + nAddSize >= (gArgs.GetBoolArg("-fastprune", false) ? 0x10000 /* 64kb */ : MAX_BLOCKFILE_SIZE)) { // when the undo file is keeping up with the block file, we want to flush it explicitly // when it is lagging behind (more blocks arrive than are being connected), we let the // undo block write case handle it @@ -3219,7 +3256,7 @@ static bool FindBlockPos(FlatFilePos &pos, unsigned int nAddSize, unsigned int n if ((int)nFile != nLastBlockFile) { if (!fKnown) { - LogPrintf("Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString()); + LogPrint(BCLog::VALIDATION, "Leaving block file %i: %s\n", nLastBlockFile, vinfoBlockFile[nLastBlockFile].ToString()); } FlushBlockFile(!fKnown, finalize_undo); nLastBlockFile = nFile; @@ -3934,7 +3971,7 @@ void PruneBlockFilesManual(int nManualPruneHeight) } } -void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, bool is_ibd) +void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd) { LOCK2(cs_main, cs_LastBlockFile); if (chain_tip_height < 0 || nPruneTarget == 0) { @@ -3944,7 +3981,7 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr return; } - unsigned int nLastBlockWeCanPrune = chain_tip_height - MIN_BLOCKS_TO_KEEP; + unsigned int nLastBlockWeCanPrune = std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP)); uint64_t nCurrentUsage = CalculateCurrentUsage(); // We don't check to prune until after we've allocated new space for files // So we should leave a buffer under our target to account for another allocation @@ -3995,7 +4032,7 @@ void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPr static FlatFileSeq BlockFileSeq() { - return FlatFileSeq(GetBlocksDir(), "blk", BLOCKFILE_CHUNK_SIZE); + return FlatFileSeq(GetBlocksDir(), "blk", gArgs.GetBoolArg("-fastprune", false) ? 0x4000 /* 16kb */ : BLOCKFILE_CHUNK_SIZE); } static FlatFileSeq UndoFileSeq() @@ -4161,7 +4198,8 @@ bool static LoadBlockIndexDB(ChainstateManager& chainman, const CChainParams& ch void CChainState::LoadMempool(const ArgsManager& args) { if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { - ::LoadMempool(m_mempool); + assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); + ::LoadMempool(m_mempool, *this); } m_mempool.SetIsLoaded(!ShutdownRequested()); } @@ -4991,7 +5029,7 @@ int VersionBitsTipStateSinceHeight(const Consensus::Params& params, Consensus::D static const uint64_t MEMPOOL_DUMP_VERSION = 1; -bool LoadMempool(CTxMemPool& pool) +bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate) { const CChainParams& chainparams = Params(); int64_t nExpiryTimeout = gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY) * 60 * 60; @@ -5029,13 +5067,11 @@ bool LoadMempool(CTxMemPool& pool) if (amountdelta) { pool.PrioritiseTransaction(tx->GetHash(), amountdelta); } - TxValidationState state; if (nTime > nNow - nExpiryTimeout) { LOCK(cs_main); - AcceptToMemoryPoolWithTime(chainparams, pool, state, tx, nTime, - nullptr /* plTxnReplaced */, false /* bypass_limits */, - false /* test_accept */); - if (state.IsValid()) { + assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + if (AcceptToMemoryPoolWithTime(chainparams, pool, active_chainstate, tx, nTime, false /* bypass_limits */, + false /* test_accept */).m_result_type == MempoolAcceptResult::ResultType::VALID) { ++count; } else { // mempool may contain the transaction already, e.g. from @@ -5159,8 +5195,9 @@ double GuessVerificationProgress(const ChainTxData& data, const CBlockIndex *pin } Optional<uint256> ChainstateManager::SnapshotBlockhash() const { - LOCK(::cs_main); // for m_active_chainstate access - if (m_active_chainstate != nullptr) { + LOCK(::cs_main); + if (m_active_chainstate != nullptr && + !m_active_chainstate->m_from_snapshot_blockhash.IsNull()) { // If a snapshot chainstate exists, it will always be our active. return m_active_chainstate->m_from_snapshot_blockhash; } @@ -5169,6 +5206,7 @@ Optional<uint256> ChainstateManager::SnapshotBlockhash() const { std::vector<CChainState*> ChainstateManager::GetAll() { + LOCK(::cs_main); std::vector<CChainState*> out; if (!IsSnapshotValidated() && m_ibd_chainstate) { @@ -5204,6 +5242,295 @@ CChainState& ChainstateManager::InitializeChainstate(CTxMemPool& mempool, const return *to_modify; } +const AssumeutxoData* ExpectedAssumeutxo( + const int height, const CChainParams& chainparams) +{ + const MapAssumeutxo& valid_assumeutxos_map = chainparams.Assumeutxo(); + const auto assumeutxo_found = valid_assumeutxos_map.find(height); + + if (assumeutxo_found != valid_assumeutxos_map.end()) { + return &assumeutxo_found->second; + } + return nullptr; +} + +bool ChainstateManager::ActivateSnapshot( + CAutoFile& coins_file, + const SnapshotMetadata& metadata, + bool in_memory) +{ + uint256 base_blockhash = metadata.m_base_blockhash; + + if (this->SnapshotBlockhash()) { + LogPrintf("[snapshot] can't activate a snapshot-based chainstate more than once\n"); + return false; + } + + int64_t current_coinsdb_cache_size{0}; + int64_t current_coinstip_cache_size{0}; + + // Cache percentages to allocate to each chainstate. + // + // These particular percentages don't matter so much since they will only be + // relevant during snapshot activation; caches are rebalanced at the conclusion of + // this function. We want to give (essentially) all available cache capacity to the + // snapshot to aid the bulk load later in this function. + static constexpr double IBD_CACHE_PERC = 0.01; + static constexpr double SNAPSHOT_CACHE_PERC = 0.99; + + { + LOCK(::cs_main); + // Resize the coins caches to ensure we're not exceeding memory limits. + // + // Allocate the majority of the cache to the incoming snapshot chainstate, since + // (optimistically) getting to its tip will be the top priority. We'll need to call + // `MaybeRebalanceCaches()` once we're done with this function to ensure + // the right allocation (including the possibility that no snapshot was activated + // and that we should restore the active chainstate caches to their original size). + // + current_coinsdb_cache_size = this->ActiveChainstate().m_coinsdb_cache_size_bytes; + current_coinstip_cache_size = this->ActiveChainstate().m_coinstip_cache_size_bytes; + + // Temporarily resize the active coins cache to make room for the newly-created + // snapshot chain. + this->ActiveChainstate().ResizeCoinsCaches( + static_cast<size_t>(current_coinstip_cache_size * IBD_CACHE_PERC), + static_cast<size_t>(current_coinsdb_cache_size * IBD_CACHE_PERC)); + } + + auto snapshot_chainstate = WITH_LOCK(::cs_main, return MakeUnique<CChainState>( + this->ActiveChainstate().m_mempool, m_blockman, base_blockhash)); + + { + LOCK(::cs_main); + snapshot_chainstate->InitCoinsDB( + static_cast<size_t>(current_coinsdb_cache_size * SNAPSHOT_CACHE_PERC), + in_memory, false, "chainstate"); + snapshot_chainstate->InitCoinsCache( + static_cast<size_t>(current_coinstip_cache_size * SNAPSHOT_CACHE_PERC)); + } + + const bool snapshot_ok = this->PopulateAndValidateSnapshot( + *snapshot_chainstate, coins_file, metadata); + + if (!snapshot_ok) { + WITH_LOCK(::cs_main, this->MaybeRebalanceCaches()); + return false; + } + + { + LOCK(::cs_main); + assert(!m_snapshot_chainstate); + m_snapshot_chainstate.swap(snapshot_chainstate); + const bool chaintip_loaded = m_snapshot_chainstate->LoadChainTip(::Params()); + assert(chaintip_loaded); + + m_active_chainstate = m_snapshot_chainstate.get(); + + LogPrintf("[snapshot] successfully activated snapshot %s\n", base_blockhash.ToString()); + LogPrintf("[snapshot] (%.2f MB)\n", + m_snapshot_chainstate->CoinsTip().DynamicMemoryUsage() / (1000 * 1000)); + + this->MaybeRebalanceCaches(); + } + return true; +} + +bool ChainstateManager::PopulateAndValidateSnapshot( + CChainState& snapshot_chainstate, + CAutoFile& coins_file, + const SnapshotMetadata& metadata) +{ + // It's okay to release cs_main before we're done using `coins_cache` because we know + // that nothing else will be referencing the newly created snapshot_chainstate yet. + CCoinsViewCache& coins_cache = *WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsTip()); + + uint256 base_blockhash = metadata.m_base_blockhash; + + COutPoint outpoint; + Coin coin; + const uint64_t coins_count = metadata.m_coins_count; + uint64_t coins_left = metadata.m_coins_count; + + LogPrintf("[snapshot] loading coins from snapshot %s\n", base_blockhash.ToString()); + int64_t flush_now{0}; + int64_t coins_processed{0}; + + while (coins_left > 0) { + try { + coins_file >> outpoint; + } catch (const std::ios_base::failure&) { + LogPrintf("[snapshot] bad snapshot - no coins left after deserializing %d coins\n", + coins_count - coins_left); + return false; + } + coins_file >> coin; + coins_cache.EmplaceCoinInternalDANGER(std::move(outpoint), std::move(coin)); + + --coins_left; + ++coins_processed; + + if (coins_processed % 1000000 == 0) { + LogPrintf("[snapshot] %d coins loaded (%.2f%%, %.2f MB)\n", + coins_processed, + static_cast<float>(coins_processed) * 100 / static_cast<float>(coins_count), + coins_cache.DynamicMemoryUsage() / (1000 * 1000)); + } + + // Batch write and flush (if we need to) every so often. + // + // If our average Coin size is roughly 41 bytes, checking every 120,000 coins + // means <5MB of memory imprecision. + if (coins_processed % 120000 == 0) { + if (ShutdownRequested()) { + return false; + } + + const auto snapshot_cache_state = WITH_LOCK(::cs_main, + return snapshot_chainstate.GetCoinsCacheSizeState(&snapshot_chainstate.m_mempool)); + + if (snapshot_cache_state >= + CoinsCacheSizeState::CRITICAL) { + LogPrintf("[snapshot] flushing coins cache (%.2f MB)... ", /* Continued */ + coins_cache.DynamicMemoryUsage() / (1000 * 1000)); + flush_now = GetTimeMillis(); + + // This is a hack - we don't know what the actual best block is, but that + // doesn't matter for the purposes of flushing the cache here. We'll set this + // to its correct value (`base_blockhash`) below after the coins are loaded. + coins_cache.SetBestBlock(GetRandHash()); + + coins_cache.Flush(); + LogPrintf("done (%.2fms)\n", GetTimeMillis() - flush_now); + } + } + } + + // Important that we set this. This and the coins_cache accesses above are + // sort of a layer violation, but either we reach into the innards of + // CCoinsViewCache here or we have to invert some of the CChainState to + // embed them in a snapshot-activation-specific CCoinsViewCache bulk load + // method. + coins_cache.SetBestBlock(base_blockhash); + + bool out_of_coins{false}; + try { + coins_file >> outpoint; + } catch (const std::ios_base::failure&) { + // We expect an exception since we should be out of coins. + out_of_coins = true; + } + if (!out_of_coins) { + LogPrintf("[snapshot] bad snapshot - coins left over after deserializing %d coins\n", + coins_count); + return false; + } + + LogPrintf("[snapshot] loaded %d (%.2f MB) coins from snapshot %s\n", + coins_count, + coins_cache.DynamicMemoryUsage() / (1000 * 1000), + base_blockhash.ToString()); + + LogPrintf("[snapshot] flushing snapshot chainstate to disk\n"); + // No need to acquire cs_main since this chainstate isn't being used yet. + coins_cache.Flush(); // TODO: if #17487 is merged, add erase=false here for better performance. + + assert(coins_cache.GetBestBlock() == base_blockhash); + + CCoinsStats stats; + auto breakpoint_fnc = [] { /* TODO insert breakpoint here? */ }; + + // As above, okay to immediately release cs_main here since no other context knows + // about the snapshot_chainstate. + CCoinsViewDB* snapshot_coinsdb = WITH_LOCK(::cs_main, return &snapshot_chainstate.CoinsDB()); + + if (!GetUTXOStats(snapshot_coinsdb, stats, CoinStatsHashType::HASH_SERIALIZED, breakpoint_fnc)) { + LogPrintf("[snapshot] failed to generate coins stats\n"); + return false; + } + + // Ensure that the base blockhash appears in the known chain of valid headers. We're willing to + // wait a bit here because the snapshot may have been loaded on startup, before we've + // received headers from the network. + + int max_secs_to_wait_for_headers = 60 * 10; + CBlockIndex* snapshot_start_block = nullptr; + + while (max_secs_to_wait_for_headers > 0) { + snapshot_start_block = WITH_LOCK(::cs_main, + return m_blockman.LookupBlockIndex(base_blockhash)); + --max_secs_to_wait_for_headers; + + if (!snapshot_start_block) { + std::this_thread::sleep_for(std::chrono::seconds(1)); + } else { + break; + } + } + + if (snapshot_start_block == nullptr) { + LogPrintf("[snapshot] timed out waiting for snapshot start blockheader %s\n", + base_blockhash.ToString()); + return false; + } + + // Assert that the deserialized chainstate contents match the expected assumeutxo value. + + int base_height = snapshot_start_block->nHeight; + auto maybe_au_data = ExpectedAssumeutxo(base_height, ::Params()); + + if (!maybe_au_data) { + LogPrintf("[snapshot] assumeutxo height in snapshot metadata not recognized " /* Continued */ + "(%d) - refusing to load snapshot\n", base_height); + return false; + } + + const AssumeutxoData& au_data = *maybe_au_data; + + if (stats.hashSerialized != au_data.hash_serialized) { + LogPrintf("[snapshot] bad snapshot content hash: expected %s, got %s\n", + au_data.hash_serialized.ToString(), stats.hashSerialized.ToString()); + return false; + } + + snapshot_chainstate.m_chain.SetTip(snapshot_start_block); + + // The remainder of this function requires modifying data protected by cs_main. + LOCK(::cs_main); + + // Fake various pieces of CBlockIndex state: + // + // - nChainTx: so that we accurately report IBD-to-tip progress + // - nTx: so that LoadBlockIndex() loads assumed-valid CBlockIndex entries + // (among other things) + // - nStatus & BLOCK_OPT_WITNESS: so that RewindBlockIndex() doesn't zealously + // unwind the assumed-valid chain. + // + CBlockIndex* index = nullptr; + for (int i = 0; i <= snapshot_chainstate.m_chain.Height(); ++i) { + index = snapshot_chainstate.m_chain[i]; + + if (!index->nTx) { + index->nTx = 1; + } + index->nChainTx = index->pprev ? index->pprev->nChainTx + index->nTx : 1; + + // We need to fake this flag so that CChainState::RewindBlockIndex() + // won't try to rewind the entire assumed-valid chain on startup. + if (index->pprev && ::IsWitnessEnabled(index->pprev, ::Params().GetConsensus())) { + index->nStatus |= BLOCK_OPT_WITNESS; + } + } + + assert(index); + index->nChainTx = metadata.m_nchaintx; + snapshot_chainstate.setBlockIndexCandidates.insert(snapshot_start_block); + + LogPrintf("[snapshot] validated snapshot (%.2f MB)\n", + coins_cache.DynamicMemoryUsage() / (1000 * 1000)); + return true; +} + CChainState& ChainstateManager::ActiveChainstate() const { LOCK(::cs_main); @@ -5213,11 +5540,13 @@ CChainState& ChainstateManager::ActiveChainstate() const bool ChainstateManager::IsSnapshotActive() const { - return m_snapshot_chainstate && WITH_LOCK(::cs_main, return m_active_chainstate) == m_snapshot_chainstate.get(); + LOCK(::cs_main); + return m_snapshot_chainstate && m_active_chainstate == m_snapshot_chainstate.get(); } CChainState& ChainstateManager::ValidatedChainstate() const { + LOCK(::cs_main); if (m_snapshot_chainstate && IsSnapshotValidated()) { return *m_snapshot_chainstate.get(); } @@ -5227,6 +5556,7 @@ CChainState& ChainstateManager::ValidatedChainstate() const bool ChainstateManager::IsBackgroundIBD(CChainState* chainstate) const { + LOCK(::cs_main); return (m_snapshot_chainstate && chainstate == m_ibd_chainstate.get()); } @@ -5242,12 +5572,10 @@ void ChainstateManager::Unload() void ChainstateManager::Reset() { + LOCK(::cs_main); m_ibd_chainstate.reset(); m_snapshot_chainstate.reset(); - { - LOCK(::cs_main); - m_active_chainstate = nullptr; - } + m_active_chainstate = nullptr; m_snapshot_validated = false; } diff --git a/src/validation.h b/src/validation.h index fc7add85b7..4e4bdbea54 100644 --- a/src/validation.h +++ b/src/validation.h @@ -11,9 +11,12 @@ #endif #include <amount.h> +#include <attributes.h> #include <coins.h> +#include <consensus/validation.h> #include <crypto/common.h> // for ReadLE64 #include <fs.h> +#include <node/utxo_snapshot.h> #include <optional.h> #include <policy/feerate.h> #include <protocol.h> // For CMessageHeader::MessageStartChars @@ -23,6 +26,7 @@ #include <txdb.h> #include <versionbits.h> #include <serialize.h> +#include <util/check.h> #include <util/hasher.h> #include <atomic> @@ -46,12 +50,12 @@ class CConnman; class CScriptCheck; class CTxMemPool; class ChainstateManager; -class TxValidationState; struct ChainTxData; struct DisconnectedBlockTransactions; struct PrecomputedTransactionData; struct LockPoints; +struct AssumeutxoData; /** Default for -minrelaytxfee, minimum relay fee for transactions */ static const unsigned int DEFAULT_MIN_RELAY_TX_FEE = 1000; @@ -181,12 +185,46 @@ void UnlinkPrunedFiles(const std::set<int>& setFilesToPrune); /** Prune block files up to a given height */ void PruneBlockFilesManual(int nManualPruneHeight); -/** (try to) add transaction to memory pool - * plTxnReplaced will be appended to with all transactions replaced from mempool - * @param[out] fee_out optional argument to return tx fee to the caller **/ -bool AcceptToMemoryPool(CTxMemPool& pool, TxValidationState &state, const CTransactionRef &tx, - std::list<CTransactionRef>* plTxnReplaced, - bool bypass_limits, bool test_accept=false, CAmount* fee_out=nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +/** +* Validation result for a single transaction mempool acceptance. +*/ +struct MempoolAcceptResult { + /** Used to indicate the results of mempool validation, + * including the possibility of unfinished validation. + */ + enum class ResultType { + VALID, //!> Fully validated, valid. + INVALID, //!> Invalid. + }; + ResultType m_result_type; + TxValidationState m_state; + + // The following fields are only present when m_result_type = ResultType::VALID + /** Mempool transactions replaced by the tx per BIP 125 rules. */ + std::optional<std::list<CTransactionRef>> m_replaced_transactions; + /** Raw base fees. */ + std::optional<CAmount> m_base_fees; + + /** Constructor for failure case */ + explicit MempoolAcceptResult(TxValidationState state) + : m_result_type(ResultType::INVALID), + m_state(state), m_replaced_transactions(nullopt), m_base_fees(nullopt) { + Assume(!state.IsValid()); // Can be invalid or error + } + + /** Constructor for success case */ + explicit MempoolAcceptResult(std::list<CTransactionRef>&& replaced_txns, CAmount fees) + : m_result_type(ResultType::VALID), m_state(TxValidationState{}), + m_replaced_transactions(std::move(replaced_txns)), m_base_fees(fees) {} +}; + +/** + * (Try to) add a transaction to the memory pool. + * @param[in] bypass_limits When true, don't enforce mempool fee limits. + * @param[in] test_accept When true, run validation checks but don't submit to mempool. + */ +MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPool& pool, const CTransactionRef& tx, + bool bypass_limits, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** Get the BIP9 state for a given deployment at the current tip. */ ThresholdState VersionBitsTipState(const Consensus::Params& params, Consensus::DeploymentPos pos); @@ -210,12 +248,12 @@ void UpdateCoins(const CTransaction& tx, CCoinsViewCache& inputs, int nHeight); * * See consensus/consensus.h for flag definitions. */ -bool CheckFinalTx(const CTransaction &tx, int flags = -1) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, int flags = -1) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** * Test whether the LockPoints height and time are still valid on the current chain */ -bool TestLockPointValidity(const LockPoints* lp) EXCLUSIVE_LOCKS_REQUIRED(cs_main); +bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** * Check if transaction will be BIP 68 final in the next block to be created. @@ -228,7 +266,12 @@ bool TestLockPointValidity(const LockPoints* lp) EXCLUSIVE_LOCKS_REQUIRED(cs_mai * * See consensus/consensus.h for flag definitions. */ -bool CheckSequenceLocks(const CTxMemPool& pool, const CTransaction& tx, int flags, LockPoints* lp = nullptr, bool useExistingLockPoints = false) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs); +bool CheckSequenceLocks(CChainState& active_chainstate, + const CTxMemPool& pool, + const CTransaction& tx, + int flags, + LockPoints* lp = nullptr, + bool useExistingLockPoints = false) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, pool.cs); /** * Closure representing one script verification @@ -361,7 +404,7 @@ private: * * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned */ - void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, bool is_ibd); + void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd); public: BlockMap m_block_index GUARDED_BY(cs_main); @@ -795,35 +838,29 @@ private: //! using this pointer (e.g. net_processing). //! //! Once this pointer is set to a corresponding chainstate, it will not - //! be reset until init.cpp:Shutdown(). This means it is safe to acquire - //! the contents of this pointer with ::cs_main held, release the lock, - //! and then use the reference without concern of it being deconstructed. + //! be reset until init.cpp:Shutdown(). //! //! This is especially important when, e.g., calling ActivateBestChain() //! on all chainstates because we are not able to hold ::cs_main going into //! that call. - std::unique_ptr<CChainState> m_ibd_chainstate; + std::unique_ptr<CChainState> m_ibd_chainstate GUARDED_BY(::cs_main); //! A chainstate initialized on the basis of a UTXO snapshot. If this is //! non-null, it is always our active chainstate. //! //! Once this pointer is set to a corresponding chainstate, it will not - //! be reset until init.cpp:Shutdown(). This means it is safe to acquire - //! the contents of this pointer with ::cs_main held, release the lock, - //! and then use the reference without concern of it being deconstructed. + //! be reset until init.cpp:Shutdown(). //! //! This is especially important when, e.g., calling ActivateBestChain() //! on all chainstates because we are not able to hold ::cs_main going into //! that call. - std::unique_ptr<CChainState> m_snapshot_chainstate; + std::unique_ptr<CChainState> m_snapshot_chainstate GUARDED_BY(::cs_main); //! Points to either the ibd or snapshot chainstate; indicates our //! most-work chain. //! //! Once this pointer is set to a corresponding chainstate, it will not - //! be reset until init.cpp:Shutdown(). This means it is safe to acquire - //! the contents of this pointer with ::cs_main held, release the lock, - //! and then use the reference without concern of it being deconstructed. + //! be reset until init.cpp:Shutdown(). //! //! This is especially important when, e.g., calling ActivateBestChain() //! on all chainstates because we are not able to hold ::cs_main going into @@ -834,6 +871,12 @@ private: //! by the background validation chainstate. bool m_snapshot_validated{false}; + //! Internal helper for ActivateSnapshot(). + [[nodiscard]] bool PopulateAndValidateSnapshot( + CChainState& snapshot_chainstate, + CAutoFile& coins_file, + const SnapshotMetadata& metadata); + // For access to m_active_chainstate. friend CChainState& ChainstateActive(); friend CChain& ChainActive(); @@ -864,6 +907,22 @@ public: //! Get all chainstates currently being used. std::vector<CChainState*> GetAll(); + //! Construct and activate a Chainstate on the basis of UTXO snapshot data. + //! + //! Steps: + //! + //! - Initialize an unused CChainState. + //! - Load its `CoinsViews` contents from `coins_file`. + //! - Verify that the hash of the resulting coinsdb matches the expected hash + //! per assumeutxo chain parameters. + //! - Wait for our headers chain to include the base block of the snapshot. + //! - "Fast forward" the tip of the new chainstate to the base of the snapshot, + //! faking nTx* block index data along the way. + //! - Move the new chainstate to `m_snapshot_chainstate` and make it our + //! ChainstateActive(). + [[nodiscard]] bool ActivateSnapshot( + CAutoFile& coins_file, const SnapshotMetadata& metadata, bool in_memory); + //! The most-work chain. CChainState& ActiveChainstate() const; CChain& ActiveChain() const { return ActiveChainstate().m_chain; } @@ -970,7 +1029,7 @@ CBlockFileInfo* GetBlockFileInfo(size_t n); bool DumpMempool(const CTxMemPool& pool); /** Load the mempool from disk. */ -bool LoadMempool(CTxMemPool& pool); +bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate); //! Check whether the block associated with this index entry is pruned or not. inline bool IsBlockPruned(const CBlockIndex* pblockindex) @@ -978,4 +1037,13 @@ inline bool IsBlockPruned(const CBlockIndex* pblockindex) return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0); } +/** + * Return the expected assumeutxo value for a given height, if one exists. + * + * @param height[in] Get the assumeutxo value for this height. + * + * @returns empty if no assumeutxo configuration exists for the given height. + */ +const AssumeutxoData* ExpectedAssumeutxo(const int height, const CChainParams& params); + #endif // BITCOIN_VALIDATION_H diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 8505ddc309..99803a91d2 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -934,9 +934,9 @@ static std::string RecurseImportData(const CScript& script, ImportData& import_d case TxoutType::NONSTANDARD: case TxoutType::WITNESS_UNKNOWN: case TxoutType::WITNESS_V1_TAPROOT: - default: return "unrecognized script"; - } + } // no default case, so the compiler can warn about missing cases + CHECK_NONFATAL(false); } static UniValue ProcessImportLegacy(ImportData& import_data, std::map<CKeyID, CPubKey>& pubkey_map, std::map<CKeyID, CKey>& privkey_map, std::set<CScript>& script_pub_keys, bool& have_solving_data, const UniValue& data, std::vector<CKeyID>& ordered_pubkeys) diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 46de273d63..53232db6bc 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -400,6 +400,12 @@ UniValue SendMoney(CWallet* const pwallet, const CCoinControl &coin_control, std { EnsureWalletIsUnlocked(pwallet); + // This function is only used by sendtoaddress and sendmany. + // This should always try to sign, if we don't have private keys, don't try to do anything here. + if (pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { + throw JSONRPCError(RPC_WALLET_ERROR, "Error: Private keys are disabled for this wallet"); + } + // Shuffle recipient list std::shuffle(recipients.begin(), recipients.end(), FastRandomContext()); @@ -409,7 +415,7 @@ UniValue SendMoney(CWallet* const pwallet, const CCoinControl &coin_control, std bilingual_str error; CTransactionRef tx; FeeCalculation fee_calc_out; - bool fCreated = pwallet->CreateTransaction(recipients, tx, nFeeRequired, nChangePosRet, error, coin_control, fee_calc_out, !pwallet->IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)); + const bool fCreated = pwallet->CreateTransaction(recipients, tx, nFeeRequired, nChangePosRet, error, coin_control, fee_calc_out, true); if (!fCreated) { throw JSONRPCError(RPC_WALLET_INSUFFICIENT_FUNDS, error.original); } @@ -3787,6 +3793,7 @@ RPCHelpMan getaddressinfo() {RPCResult::Type::BOOL, "iswatchonly", "If the address is watchonly."}, {RPCResult::Type::BOOL, "solvable", "If we know how to spend coins sent to this address, ignoring the possible lack of private keys."}, {RPCResult::Type::STR, "desc", /* optional */ true, "A descriptor for spending coins sent to this address (only when solvable)."}, + {RPCResult::Type::STR, "parent_desc", /* optional */ true, "The descriptor used to derive this address if this is a descriptor wallet"}, {RPCResult::Type::BOOL, "isscript", "If the key is a script."}, {RPCResult::Type::BOOL, "ischange", "If the address was used for change output."}, {RPCResult::Type::BOOL, "iswitness", "If the address is a witness address."}, @@ -3862,6 +3869,14 @@ RPCHelpMan getaddressinfo() ret.pushKV("desc", InferDescriptor(scriptPubKey, *provider)->ToString()); } + DescriptorScriptPubKeyMan* desc_spk_man = dynamic_cast<DescriptorScriptPubKeyMan*>(pwallet->GetScriptPubKeyMan(scriptPubKey)); + if (desc_spk_man) { + std::string desc_str; + if (desc_spk_man->GetDescriptorString(desc_str, false)) { + ret.pushKV("parent_desc", desc_str); + } + } + ret.pushKV("iswatchonly", bool(mine & ISMINE_WATCH_ONLY)); UniValue detail = DescribeWalletAddress(pwallet, dest); diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 15972fe7bb..4630603f8e 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -94,8 +94,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s TxoutType whichType = Solver(scriptPubKey, vSolutions); CKeyID keyID; - switch (whichType) - { + switch (whichType) { case TxoutType::NONSTANDARD: case TxoutType::NULL_DATA: case TxoutType::WITNESS_UNKNOWN: @@ -194,7 +193,7 @@ IsMineResult IsMineInner(const LegacyScriptPubKeyMan& keystore, const CScript& s } break; } - } + } // no default case, so the compiler can warn about missing cases if (ret == IsMineResult::NO && keystore.HaveWatchOnly(scriptPubKey)) { ret = std::max(ret, IsMineResult::WATCH_ONLY); @@ -2265,3 +2264,16 @@ const std::vector<CScript> DescriptorScriptPubKeyMan::GetScriptPubKeys() const } return script_pub_keys; } + +bool DescriptorScriptPubKeyMan::GetDescriptorString(std::string& out, bool priv) const +{ + LOCK(cs_desc_man); + if (m_storage.IsLocked()) { + return false; + } + + FlatSigningProvider provider; + provider.keys = GetKeys(); + + return m_wallet_descriptor.descriptor->ToNormalizedString(provider, out, priv); +} diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index 8f6b69bc78..51283e791d 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -616,6 +616,8 @@ public: const WalletDescriptor GetWalletDescriptor() const EXCLUSIVE_LOCKS_REQUIRED(cs_desc_man); const std::vector<CScript> GetScriptPubKeys() const; + + bool GetDescriptorString(std::string& out, bool priv) const; }; #endif // BITCOIN_WALLET_SCRIPTPUBKEYMAN_H diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp index 27179839b7..b2eb8e4bca 100644 --- a/src/wallet/test/db_tests.cpp +++ b/src/wallet/test/db_tests.cpp @@ -30,8 +30,8 @@ BOOST_AUTO_TEST_CASE(getwalletenv_file) std::string filename; std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, filename); - BOOST_CHECK(filename == test_name); - BOOST_CHECK(env->Directory() == datadir); + BOOST_CHECK_EQUAL(filename, test_name); + BOOST_CHECK_EQUAL(env->Directory(), datadir); } BOOST_AUTO_TEST_CASE(getwalletenv_directory) @@ -41,8 +41,8 @@ BOOST_AUTO_TEST_CASE(getwalletenv_directory) std::string filename; std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(datadir, filename); - BOOST_CHECK(filename == expected_name); - BOOST_CHECK(env->Directory() == datadir); + BOOST_CHECK_EQUAL(filename, expected_name); + BOOST_CHECK_EQUAL(env->Directory(), datadir); } BOOST_AUTO_TEST_CASE(getwalletenv_g_dbenvs_multiple) diff --git a/src/wallet/test/init_tests.cpp b/src/wallet/test/init_tests.cpp index e70b56c529..45e1b8c4b8 100644 --- a/src/wallet/test/init_tests.cpp +++ b/src/wallet/test/init_tests.cpp @@ -19,7 +19,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_default) BOOST_CHECK(result == true); fs::path walletdir = gArgs.GetArg("-walletdir", ""); fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]); - BOOST_CHECK(walletdir == expected_path); + BOOST_CHECK_EQUAL(walletdir, expected_path); } BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_custom) @@ -29,7 +29,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_custom) BOOST_CHECK(result == true); fs::path walletdir = gArgs.GetArg("-walletdir", ""); fs::path expected_path = fs::canonical(m_walletdir_path_cases["custom"]); - BOOST_CHECK(walletdir == expected_path); + BOOST_CHECK_EQUAL(walletdir, expected_path); } BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_does_not_exist) @@ -69,7 +69,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing) BOOST_CHECK(result == true); fs::path walletdir = gArgs.GetArg("-walletdir", ""); fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]); - BOOST_CHECK(walletdir == expected_path); + BOOST_CHECK_EQUAL(walletdir, expected_path); } BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing2) @@ -79,7 +79,7 @@ BOOST_AUTO_TEST_CASE(walletinit_verify_walletdir_no_trailing2) BOOST_CHECK(result == true); fs::path walletdir = gArgs.GetArg("-walletdir", ""); fs::path expected_path = fs::canonical(m_walletdir_path_cases["default"]); - BOOST_CHECK(walletdir == expected_path); + BOOST_CHECK_EQUAL(walletdir, expected_path); } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp index 4e6270220e..69854cae05 100644 --- a/src/wallet/walletdb.cpp +++ b/src/wallet/walletdb.cpp @@ -551,13 +551,6 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, CHDChain chain; ssValue >> chain; pwallet->GetOrCreateLegacyScriptPubKeyMan()->LoadHDChain(chain); - } else if (strType == DBKeys::FLAGS) { - uint64_t flags; - ssValue >> flags; - if (!pwallet->LoadWalletFlags(flags)) { - strErr = "Error reading wallet database: Unknown non-tolerable wallet flags found"; - return false; - } } else if (strType == DBKeys::OLD_KEY) { strErr = "Found unsupported 'wkey' record, try loading with version 0.18"; return false; @@ -662,7 +655,8 @@ ReadKeyValue(CWallet* pwallet, CDataStream& ssKey, CDataStream& ssValue, wss.fIsEncrypted = true; } else if (strType != DBKeys::BESTBLOCK && strType != DBKeys::BESTBLOCK_NOMERKLE && strType != DBKeys::MINVERSION && strType != DBKeys::ACENTRY && - strType != DBKeys::VERSION && strType != DBKeys::SETTINGS) { + strType != DBKeys::VERSION && strType != DBKeys::SETTINGS && + strType != DBKeys::FLAGS) { wss.m_unknown_records++; } } catch (const std::exception& e) { @@ -707,6 +701,16 @@ DBErrors WalletBatch::LoadWallet(CWallet* pwallet) pwallet->LoadMinVersion(nMinVersion); } + // Load wallet flags, so they are known when processing other records. + // The FLAGS key is absent during wallet creation. + uint64_t flags; + if (m_batch->Read(DBKeys::FLAGS, flags)) { + if (!pwallet->LoadWalletFlags(flags)) { + pwallet->WalletLogPrintf("Error reading wallet database: Unknown non-tolerable wallet flags found\n"); + return DBErrors::CORRUPT; + } + } + // Get cursor if (!m_batch->StartCursor()) { diff --git a/src/wallet/wallettool.cpp b/src/wallet/wallettool.cpp index bc90491a2c..b2cb0bf479 100644 --- a/src/wallet/wallettool.cpp +++ b/src/wallet/wallettool.cpp @@ -103,10 +103,8 @@ static void WalletShowInfo(CWallet* wallet_instance) tfm::format(std::cout, "Address Book: %zu\n", wallet_instance->m_address_book.size()); } -bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command, const std::string& name) +bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command) { - const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), name); - if (args.IsArgSet("-format") && command != "createfromdump") { tfm::format(std::cerr, "The -format option can only be used with the \"createfromdump\" command.\n"); return false; @@ -119,6 +117,12 @@ bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command, tfm::format(std::cerr, "The -descriptors option can only be used with the 'create' command.\n"); return false; } + if (command == "create" && !args.IsArgSet("-wallet")) { + tfm::format(std::cerr, "Wallet name must be provided when creating a new wallet.\n"); + return false; + } + const std::string name = args.GetArg("-wallet", ""); + const fs::path path = fsbridge::AbsPathJoin(GetWalletDir(), name); if (command == "create") { DatabaseOptions options; diff --git a/src/wallet/wallettool.h b/src/wallet/wallettool.h index f544a6f727..f4516bb5bc 100644 --- a/src/wallet/wallettool.h +++ b/src/wallet/wallettool.h @@ -10,7 +10,7 @@ namespace WalletTool { void WalletShowInfo(CWallet* wallet_instance); -bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command, const std::string& file); +bool ExecuteWalletToolFunc(const ArgsManager& args, const std::string& command); } // namespace WalletTool diff --git a/test/functional/README.md b/test/functional/README.md index 2d04413eb2..d830ba0334 100644 --- a/test/functional/README.md +++ b/test/functional/README.md @@ -63,10 +63,13 @@ don't have test cases for. - Avoid stop-starting the nodes multiple times during the test if possible. A stop-start takes several seconds, so doing it several times blows up the runtime of the test. -- Set the `self.setup_clean_chain` variable in `set_test_params()` to control whether - or not to use the cached data directories. The cached data directories - contain a 200-block pre-mined blockchain and wallets for four nodes. Each node - has 25 mature blocks (25x50=1250 BTC) in its wallet. +- Set the `self.setup_clean_chain` variable in `set_test_params()` to `True` to + initialize an empty blockchain and start from the Genesis block, rather than + load a premined blockchain from cache with the default value of `False`. The + cached data directories contain a 200-block pre-mined blockchain with the + spendable mining rewards being split between four nodes. Each node has 25 + mature block subsidies (25x50=1250 BTC) in its wallet. Using them is much more + efficient than mining blocks in your test. - When calling RPCs with lots of arguments, consider using named keyword arguments instead of positional arguments to make the intent of the call clear to readers. diff --git a/test/functional/data/invalid_txs.py b/test/functional/data/invalid_txs.py index 6e72db1d96..fab921ef19 100644 --- a/test/functional/data/invalid_txs.py +++ b/test/functional/data/invalid_txs.py @@ -57,7 +57,7 @@ class BadTxTemplate: __metaclass__ = abc.ABCMeta # The expected error code given by bitcoind upon submission of the tx. - reject_reason = "" # type: Optional[str] + reject_reason: Optional[str] = "" # Only specified if it differs from mempool acceptance error. block_reject_reason = "" diff --git a/test/functional/example_test.py b/test/functional/example_test.py index 97f24e1b6e..a0eb213a78 100755 --- a/test/functional/example_test.py +++ b/test/functional/example_test.py @@ -76,6 +76,9 @@ class ExampleTest(BitcoinTestFramework): """Override test parameters for your individual test. This method must be overridden and num_nodes must be explicitly set.""" + # By default every test loads a pre-mined chain of 200 blocks from cache. + # Set setup_clean_chain to True to skip this and start from the Genesis + # block. self.setup_clean_chain = True self.num_nodes = 3 # Use self.extra_args to change command-line arguments for the nodes diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py index 2c6553fbe2..5fcecb4882 100755 --- a/test/functional/feature_asmap.py +++ b/test/functional/feature_asmap.py @@ -36,7 +36,6 @@ def expected_messages(filename): class AsmapTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def test_without_asmap_arg(self): diff --git a/test/functional/feature_assumevalid.py b/test/functional/feature_assumevalid.py index 603d7f5d3b..1a148f04f4 100755 --- a/test/functional/feature_assumevalid.py +++ b/test/functional/feature_assumevalid.py @@ -29,9 +29,11 @@ Start three nodes: block 200. node2 will reject block 102 since it's assumed valid, but it isn't buried by at least two weeks' work. """ -import time -from test_framework.blocktools import (create_block, create_coinbase) +from test_framework.blocktools import ( + create_block, + create_coinbase, +) from test_framework.key import ECKey from test_framework.messages import ( CBlockHeader, @@ -79,24 +81,6 @@ class AssumeValidTest(BitcoinTestFramework): assert not p2p_conn.is_connected break - def assert_blockchain_height(self, node, height): - """Wait until the blockchain is no longer advancing and verify it's reached the expected height.""" - last_height = node.getblock(node.getbestblockhash())['height'] - timeout = 10 - while True: - time.sleep(0.25) - current_height = node.getblock(node.getbestblockhash())['height'] - if current_height != last_height: - last_height = current_height - if timeout < 0: - assert False, "blockchain too short after timeout: %d" % current_height - timeout - 0.25 - continue - elif current_height > height: - assert False, "blockchain too long: %d" % current_height - elif current_height == height: - break - def run_test(self): p2p0 = self.nodes[0].add_p2p_connection(BaseNode()) @@ -177,7 +161,8 @@ class AssumeValidTest(BitcoinTestFramework): # Send blocks to node0. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p0) - self.assert_blockchain_height(self.nodes[0], 101) + self.wait_until(lambda: self.nodes[0].getblockcount() >= 101) + assert_equal(self.nodes[0].getblockcount(), 101) # Send all blocks to node1. All blocks will be accepted. for i in range(2202): @@ -188,7 +173,8 @@ class AssumeValidTest(BitcoinTestFramework): # Send blocks to node2. Block 102 will be rejected. self.send_blocks_until_disconnected(p2p2) - self.assert_blockchain_height(self.nodes[2], 101) + self.wait_until(lambda: self.nodes[2].getblockcount() >= 101) + assert_equal(self.nodes[2].getblockcount(), 101) if __name__ == '__main__': diff --git a/test/functional/feature_backwards_compatibility.py b/test/functional/feature_backwards_compatibility.py index b161c71a85..e6a53b52db 100755 --- a/test/functional/feature_backwards_compatibility.py +++ b/test/functional/feature_backwards_compatibility.py @@ -354,73 +354,75 @@ class BackwardsCompatibilityTest(BitcoinTestFramework): hdkeypath = v17_info["hdkeypath"] pubkey = v17_info["pubkey"] - # Copy the 0.16 wallet to the last Bitcoin Core version and open it: - shutil.copyfile( - os.path.join(node_v16_wallets_dir, "wallets/u1_v16"), - os.path.join(node_master_wallets_dir, "u1_v16") - ) - load_res = node_master.loadwallet("u1_v16") - # Make sure this wallet opens without warnings. See https://github.com/bitcoin/bitcoin/pull/19054 - assert_equal(load_res['warning'], '') - wallet = node_master.get_wallet_rpc("u1_v16") - info = wallet.getaddressinfo(v16_addr) - descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")" - assert_equal(info["desc"], descsum_create(descriptor)) - - # Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it - os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16")) - shutil.copyfile( - os.path.join(node_master_wallets_dir, "u1_v16"), - os.path.join(node_v16_wallets_dir, "wallets/u1_v16") - ) - self.start_node(-1, extra_args=["-wallet=u1_v16"]) - wallet = node_v16.get_wallet_rpc("u1_v16") - info = wallet.validateaddress(v16_addr) - assert_equal(info, v16_info) - - # Copy the 0.17 wallet to the last Bitcoin Core version and open it: - node_v17.unloadwallet("u1_v17") - shutil.copytree( - os.path.join(node_v17_wallets_dir, "u1_v17"), - os.path.join(node_master_wallets_dir, "u1_v17") - ) - node_master.loadwallet("u1_v17") - wallet = node_master.get_wallet_rpc("u1_v17") - info = wallet.getaddressinfo(address) - descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")" - assert_equal(info["desc"], descsum_create(descriptor)) - - # Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it - node_master.unloadwallet("u1_v17") - shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17")) - shutil.copytree( - os.path.join(node_master_wallets_dir, "u1_v17"), - os.path.join(node_v17_wallets_dir, "u1_v17") - ) - node_v17.loadwallet("u1_v17") - wallet = node_v17.get_wallet_rpc("u1_v17") - info = wallet.getaddressinfo(address) - assert_equal(info, v17_info) - - # Copy the 0.19 wallet to the last Bitcoin Core version and open it: - shutil.copytree( - os.path.join(node_v19_wallets_dir, "w1_v19"), - os.path.join(node_master_wallets_dir, "w1_v19") - ) - node_master.loadwallet("w1_v19") - wallet = node_master.get_wallet_rpc("w1_v19") - assert wallet.getaddressinfo(address_18075)["solvable"] + if self.is_bdb_compiled(): + # Old wallets are BDB and will only work if BDB is compiled + # Copy the 0.16 wallet to the last Bitcoin Core version and open it: + shutil.copyfile( + os.path.join(node_v16_wallets_dir, "wallets/u1_v16"), + os.path.join(node_master_wallets_dir, "u1_v16") + ) + load_res = node_master.loadwallet("u1_v16") + # Make sure this wallet opens without warnings. See https://github.com/bitcoin/bitcoin/pull/19054 + assert_equal(load_res['warning'], '') + wallet = node_master.get_wallet_rpc("u1_v16") + info = wallet.getaddressinfo(v16_addr) + descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + v16_pubkey + ")" + assert_equal(info["desc"], descsum_create(descriptor)) + + # Now copy that same wallet back to 0.16 to make sure no automatic upgrade breaks it + os.remove(os.path.join(node_v16_wallets_dir, "wallets/u1_v16")) + shutil.copyfile( + os.path.join(node_master_wallets_dir, "u1_v16"), + os.path.join(node_v16_wallets_dir, "wallets/u1_v16") + ) + self.start_node(-1, extra_args=["-wallet=u1_v16"]) + wallet = node_v16.get_wallet_rpc("u1_v16") + info = wallet.validateaddress(v16_addr) + assert_equal(info, v16_info) - # Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it - node_master.unloadwallet("w1_v19") - shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19")) - shutil.copytree( - os.path.join(node_master_wallets_dir, "w1_v19"), - os.path.join(node_v19_wallets_dir, "w1_v19") - ) - node_v19.loadwallet("w1_v19") - wallet = node_v19.get_wallet_rpc("w1_v19") - assert wallet.getaddressinfo(address_18075)["solvable"] + # Copy the 0.17 wallet to the last Bitcoin Core version and open it: + node_v17.unloadwallet("u1_v17") + shutil.copytree( + os.path.join(node_v17_wallets_dir, "u1_v17"), + os.path.join(node_master_wallets_dir, "u1_v17") + ) + node_master.loadwallet("u1_v17") + wallet = node_master.get_wallet_rpc("u1_v17") + info = wallet.getaddressinfo(address) + descriptor = "wpkh([" + info["hdmasterfingerprint"] + hdkeypath[1:] + "]" + pubkey + ")" + assert_equal(info["desc"], descsum_create(descriptor)) + + # Now copy that same wallet back to 0.17 to make sure no automatic upgrade breaks it + node_master.unloadwallet("u1_v17") + shutil.rmtree(os.path.join(node_v17_wallets_dir, "u1_v17")) + shutil.copytree( + os.path.join(node_master_wallets_dir, "u1_v17"), + os.path.join(node_v17_wallets_dir, "u1_v17") + ) + node_v17.loadwallet("u1_v17") + wallet = node_v17.get_wallet_rpc("u1_v17") + info = wallet.getaddressinfo(address) + assert_equal(info, v17_info) + + # Copy the 0.19 wallet to the last Bitcoin Core version and open it: + shutil.copytree( + os.path.join(node_v19_wallets_dir, "w1_v19"), + os.path.join(node_master_wallets_dir, "w1_v19") + ) + node_master.loadwallet("w1_v19") + wallet = node_master.get_wallet_rpc("w1_v19") + assert wallet.getaddressinfo(address_18075)["solvable"] + + # Now copy that same wallet back to 0.19 to make sure no automatic upgrade breaks it + node_master.unloadwallet("w1_v19") + shutil.rmtree(os.path.join(node_v19_wallets_dir, "w1_v19")) + shutil.copytree( + os.path.join(node_master_wallets_dir, "w1_v19"), + os.path.join(node_v19_wallets_dir, "w1_v19") + ) + node_v19.loadwallet("w1_v19") + wallet = node_v19.get_wallet_rpc("w1_v19") + assert wallet.getaddressinfo(address_18075)["solvable"] if __name__ == '__main__': BackwardsCompatibilityTest().main() diff --git a/test/functional/feature_blockfilterindex_prune.py b/test/functional/feature_blockfilterindex_prune.py new file mode 100755 index 0000000000..455073ef9c --- /dev/null +++ b/test/functional/feature_blockfilterindex_prune.py @@ -0,0 +1,54 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test blockfilterindex in conjunction with prune.""" +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import ( + assert_raises_rpc_error, + assert_greater_than, +) + + +class FeatureBlockfilterindexPruneTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 2 + self.extra_args = [["-fastprune", "-prune=1"], ["-fastprune", "-prune=1", "-blockfilterindex=1"]] + + def run_test(self): + # test basic pruning compatibility & filter access of pruned blocks + self.log.info("check if we can access a blockfilter when pruning is enabled but no blocks are actually pruned") + assert len(self.nodes[1].getblockfilter(self.nodes[1].getbestblockhash())['filter']) > 0 + # Mine two batches of blocks to avoid hitting NODE_NETWORK_LIMITED_MIN_BLOCKS disconnection + self.nodes[1].generate(250) + self.sync_all() + self.nodes[1].generate(250) + self.sync_all() + self.log.info("prune some blocks") + pruneheight = self.nodes[1].pruneblockchain(400) + assert pruneheight != 0 + self.log.info("check if we can access the tips blockfilter when we have pruned some blocks") + assert len(self.nodes[1].getblockfilter(self.nodes[1].getbestblockhash())['filter']) > 0 + self.log.info("check if we can access the blockfilter of a pruned block") + assert len(self.nodes[1].getblockfilter(self.nodes[1].getblockhash(2))['filter']) > 0 + self.log.info("start node without blockfilterindex") + self.stop_node(1) + self.start_node(1, extra_args=self.extra_args[0]) + self.log.info("make sure accessing the blockfilters throws an error") + assert_raises_rpc_error(-1, "Index is not enabled for filtertype basic", self.nodes[1].getblockfilter, self.nodes[1].getblockhash(2)) + self.nodes[1].generate(1000) + self.log.info("prune below the blockfilterindexes best block while blockfilters are disabled") + pruneheight_new = self.nodes[1].pruneblockchain(1000) + assert_greater_than(pruneheight_new, pruneheight) + self.stop_node(1) + self.log.info("make sure we get an init error when starting the node again with block filters") + with self.nodes[1].assert_debug_log(["basic block filter index best block of the index goes beyond pruned data. Please disable the index or reindex (which will download the whole blockchain again)"]): + self.nodes[1].assert_start_raises_init_error(extra_args=self.extra_args[1]) + self.log.info("make sure the node starts again with the -reindex arg") + reindex_args = self.extra_args[1] + reindex_args.append("-reindex") + self.start_node(1, extra_args=reindex_args) + + +if __name__ == '__main__': + FeatureBlockfilterindexPruneTest().main() diff --git a/test/functional/feature_config_args.py b/test/functional/feature_config_args.py index 2445b6d977..573760a8cb 100755 --- a/test/functional/feature_config_args.py +++ b/test/functional/feature_config_args.py @@ -5,6 +5,7 @@ """Test various command line arguments and configuration file parameters.""" import os +import time from test_framework.test_framework import BitcoinTestFramework from test_framework import util @@ -147,11 +148,68 @@ class ConfArgsTest(BitcoinTestFramework): self.start_node(0, extra_args=['-nonetworkactive=1']) self.stop_node(0) + def test_seed_peers(self): + self.log.info('Test seed peers') + default_data_dir = self.nodes[0].datadir + + # No peers.dat exists and -dnsseed=1 + # We expect the node will use DNS Seeds, but Regtest mode has 0 DNS seeds + # So after 60 seconds, the node should fallback to fixed seeds (this is a slow test) + assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) + start = int(time.time()) + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Loaded 0 addresses from peers.dat", + "0 addresses found from DNS seeds"]): + self.start_node(0, extra_args=['-dnsseed=1 -mocktime={}'.format(start)]) + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Adding fixed seeds as 60 seconds have passed and addrman is empty"]): + self.nodes[0].setmocktime(start + 65) + self.stop_node(0) + + # No peers.dat exists and -dnsseed=0 + # We expect the node will fallback immediately to fixed seeds + assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) + start = time.time() + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Loaded 0 addresses from peers.dat", + "DNS seeding disabled", + "Adding fixed seeds as -dnsseed=0, -addnode is not provided and all -seednode(s) attempted\n"]): + self.start_node(0, extra_args=['-dnsseed=0']) + assert time.time() - start < 60 + self.stop_node(0) + + # No peers.dat exists and dns seeds are disabled. + # We expect the node will not add fixed seeds when explicitly disabled. + assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) + start = time.time() + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Loaded 0 addresses from peers.dat", + "DNS seeding disabled", + "Fixed seeds are disabled"]): + self.start_node(0, extra_args=['-dnsseed=0', '-fixedseeds=0']) + assert time.time() - start < 60 + self.stop_node(0) + + # No peers.dat exists and -dnsseed=0, but a -addnode is provided + # We expect the node will allow 60 seconds prior to using fixed seeds + assert not os.path.exists(os.path.join(default_data_dir, "peers.dat")) + start = int(time.time()) + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Loaded 0 addresses from peers.dat", + "DNS seeding disabled"]): + self.start_node(0, extra_args=['-dnsseed=0', '-addnode=fakenodeaddr -mocktime={}'.format(start)]) + with self.nodes[0].assert_debug_log(expected_msgs=[ + "Adding fixed seeds as 60 seconds have passed and addrman is empty"]): + self.nodes[0].setmocktime(start + 65) + self.stop_node(0) + + def run_test(self): self.stop_node(0) self.test_log_buffer() self.test_args_log() + self.test_seed_peers() self.test_networkactive() self.test_config_file_parser() diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index f9ece244fb..2b56bc78f5 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -49,7 +49,6 @@ from test_framework.util import ( class ChainstateWriteCrashTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 4 - self.setup_clean_chain = False self.rpc_timeout = 480 self.supports_cli = False diff --git a/test/functional/feature_filelock.py b/test/functional/feature_filelock.py index 7de9a589be..2798d11b0a 100755 --- a/test/functional/feature_filelock.py +++ b/test/functional/feature_filelock.py @@ -4,6 +4,8 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Check that it's not possible to start a second bitcoind instance using the same datadir or wallet.""" import os +import random +import string from test_framework.test_framework import BitcoinTestFramework from test_framework.test_node import ErrorMatch @@ -27,11 +29,21 @@ class FilelockTest(BitcoinTestFramework): self.nodes[1].assert_start_raises_init_error(extra_args=['-datadir={}'.format(self.nodes[0].datadir), '-noserver'], expected_msg=expected_msg) if self.is_wallet_compiled(): - self.nodes[0].createwallet(self.default_wallet_name) - wallet_dir = os.path.join(datadir, 'wallets') - self.log.info("Check that we can't start a second bitcoind instance using the same wallet") - expected_msg = "Error: Error initializing wallet database environment" - self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-wallet=' + self.default_wallet_name, '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX) + def check_wallet_filelock(descriptors): + wallet_name = ''.join([random.choice(string.ascii_lowercase) for _ in range(6)]) + self.nodes[0].createwallet(wallet_name=wallet_name, descriptors=descriptors) + wallet_dir = os.path.join(datadir, 'wallets') + self.log.info("Check that we can't start a second bitcoind instance using the same wallet") + if descriptors: + expected_msg = "Error: SQLiteDatabase: Unable to obtain an exclusive lock on the database, is it being used by another bitcoind?" + else: + expected_msg = "Error: Error initializing wallet database environment" + self.nodes[1].assert_start_raises_init_error(extra_args=['-walletdir={}'.format(wallet_dir), '-wallet=' + wallet_name, '-noserver'], expected_msg=expected_msg, match=ErrorMatch.PARTIAL_REGEX) + + if self.is_bdb_compiled(): + check_wallet_filelock(False) + if self.is_sqlite_compiled(): + check_wallet_filelock(True) if __name__ == '__main__': FilelockTest().main() diff --git a/test/functional/feature_includeconf.py b/test/functional/feature_includeconf.py index 6f1a0cd348..f22b7f266a 100755 --- a/test/functional/feature_includeconf.py +++ b/test/functional/feature_includeconf.py @@ -20,7 +20,6 @@ from test_framework.test_framework import BitcoinTestFramework class IncludeConfTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def setup_chain(self): diff --git a/test/functional/feature_notifications.py b/test/functional/feature_notifications.py index f2313bac13..b068ce612c 100755 --- a/test/functional/feature_notifications.py +++ b/test/functional/feature_notifications.py @@ -5,11 +5,11 @@ """Test the -alertnotify, -blocknotify and -walletnotify options.""" import os -from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, keyhash_to_p2pkh +from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE +from test_framework.descriptors import descsum_create from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, - hex_str_to_bytes, ) # Linux allow all characters other than \x00 @@ -49,6 +49,31 @@ class NotificationsTest(BitcoinTestFramework): super().setup_network() def run_test(self): + if self.is_wallet_compiled(): + # Setup the descriptors to be imported to the wallet + seed = "cTdGmKFWpbvpKQ7ejrdzqYT2hhjyb3GPHnLAK7wdi5Em67YLwSm9" + xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v" + desc_imports = [{ + "desc": descsum_create("wpkh(" + xpriv + "/0/*)"), + "timestamp": 0, + "active": True, + "keypool": True, + },{ + "desc": descsum_create("wpkh(" + xpriv + "/1/*)"), + "timestamp": 0, + "active": True, + "keypool": True, + "internal": True, + }] + # Make the wallets and import the descriptors + # Ensures that node 0 and node 1 share the same wallet for the conflicting transaction tests below. + for i, name in enumerate(self.wallet_names): + self.nodes[i].createwallet(wallet_name=name, descriptors=self.options.descriptors, blank=True, load_on_startup=True) + if self.options.descriptors: + self.nodes[i].importdescriptors(desc_imports) + else: + self.nodes[i].sethdseed(True, seed) + self.log.info("test -blocknotify") block_count = 10 blocks = self.nodes[1].generatetoaddress(block_count, self.nodes[1].getnewaddress() if self.is_wallet_compiled() else ADDRESS_BCRT1_UNSPENDABLE) @@ -84,11 +109,10 @@ class NotificationsTest(BitcoinTestFramework): for tx_file in os.listdir(self.walletnotify_dir): os.remove(os.path.join(self.walletnotify_dir, tx_file)) - # Conflicting transactions tests. Give node 0 same wallet seed as - # node 1, generate spends from node 0, and check notifications + # Conflicting transactions tests. + # Generate spends from node 0, and check notifications # triggered by node 1 self.log.info("test -walletnotify with conflicting transactions") - self.nodes[0].sethdseed(seed=self.nodes[1].dumpprivkey(keyhash_to_p2pkh(hex_str_to_bytes(self.nodes[1].getwalletinfo()['hdseedid'])[::-1]))) self.nodes[0].rescanblockchain() self.nodes[0].generatetoaddress(100, ADDRESS_BCRT1_UNSPENDABLE) self.sync_blocks() diff --git a/test/functional/feature_proxy.py b/test/functional/feature_proxy.py index cd5eff9184..2983feaa0d 100755 --- a/test/functional/feature_proxy.py +++ b/test/functional/feature_proxy.py @@ -44,8 +44,8 @@ from test_framework.netutil import test_ipv6_local RANGE_BEGIN = PORT_MIN + 2 * PORT_RANGE # Start after p2p and rpc ports -# Networks returned by RPC getpeerinfo, defined in src/netbase.cpp::GetNetworkName() -NET_UNROUTABLE = "unroutable" +# Networks returned by RPC getpeerinfo. +NET_UNROUTABLE = "not_publicly_routable" NET_IPV4 = "ipv4" NET_IPV6 = "ipv6" NET_ONION = "onion" diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index 6ee2b72c11..5027a9828f 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -517,7 +517,6 @@ def add_spender(spenders, *args, **kwargs): def random_checksig_style(pubkey): """Creates a random CHECKSIG* tapscript that would succeed with only the valid signature on witness stack.""" - return bytes(CScript([pubkey, OP_CHECKSIG])) opcode = random.choice([OP_CHECKSIG, OP_CHECKSIGVERIFY, OP_CHECKSIGADD]) if (opcode == OP_CHECKSIGVERIFY): ret = CScript([pubkey, opcode, OP_1]) diff --git a/test/functional/feature_utxo_set_hash.py b/test/functional/feature_utxo_set_hash.py new file mode 100755 index 0000000000..6e6046d84d --- /dev/null +++ b/test/functional/feature_utxo_set_hash.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020-2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test UTXO set hash value calculation in gettxoutsetinfo.""" + +import struct + +from test_framework.blocktools import create_transaction +from test_framework.messages import ( + CBlock, + COutPoint, + FromHex, +) +from test_framework.muhash import MuHash3072 +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +class UTXOSetHashTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.setup_clean_chain = True + + def skip_test_if_missing_module(self): + self.skip_if_no_wallet() + + def test_deterministic_hash_results(self): + self.log.info("Test deterministic UTXO set hash results") + + # These depend on the setup_clean_chain option, the chain loaded from the cache + assert_equal(self.nodes[0].gettxoutsetinfo()['hash_serialized_2'], "b32ec1dda5a53cd025b95387aad344a801825fe46a60ff952ce26528f01d3be8") + assert_equal(self.nodes[0].gettxoutsetinfo("muhash")['muhash'], "dd5ad2a105c2d29495f577245c357409002329b9f4d6182c0af3dc2f462555c8") + + def test_muhash_implementation(self): + self.log.info("Test MuHash implementation consistency") + + node = self.nodes[0] + + # Generate 100 blocks and remove the first since we plan to spend its + # coinbase + block_hashes = node.generate(100) + blocks = list(map(lambda block: FromHex(CBlock(), node.getblock(block, False)), block_hashes)) + spending = blocks.pop(0) + + # Create a spending transaction and mine a block which includes it + tx = create_transaction(node, spending.vtx[0].rehash(), node.getnewaddress(), amount=49) + txid = node.sendrawtransaction(hexstring=tx.serialize_with_witness().hex(), maxfeerate=0) + + tx_block = node.generateblock(output=node.getnewaddress(), transactions=[txid]) + blocks.append(FromHex(CBlock(), node.getblock(tx_block['hash'], False))) + + # Serialize the outputs that should be in the UTXO set and add them to + # a MuHash object + muhash = MuHash3072() + + for height, block in enumerate(blocks): + # The Genesis block coinbase is not part of the UTXO set and we + # spent the first mined block + height += 2 + + for tx in block.vtx: + for n, tx_out in enumerate(tx.vout): + coinbase = 1 if not tx.vin[0].prevout.hash else 0 + + # Skip witness commitment + if (coinbase and n > 0): + continue + + data = COutPoint(int(tx.rehash(), 16), n).serialize() + data += struct.pack("<i", height * 2 + coinbase) + data += tx_out.serialize() + + muhash.insert(data) + + finalized = muhash.digest() + node_muhash = node.gettxoutsetinfo("muhash")['muhash'] + + assert_equal(finalized[::-1].hex(), node_muhash) + + def run_test(self): + self.test_deterministic_hash_results() + self.test_muhash_implementation() + + +if __name__ == '__main__': + UTXOSetHashTest().main() diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py index 1257dff1ae..2cf0ef2251 100755 --- a/test/functional/interface_bitcoin_cli.py +++ b/test/functional/interface_bitcoin_cli.py @@ -29,6 +29,8 @@ class TestBitcoinCli(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True self.num_nodes = 1 + if self.is_wallet_compiled(): + self.requires_wallet = True def skip_test_if_missing_module(self): self.skip_if_no_cli() diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index 946bfa51d4..d0967a9340 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -27,28 +27,31 @@ def hash256_reversed(byte_str): class ZMQSubscriber: def __init__(self, socket, topic): - self.sequence = 0 + self.sequence = None # no sequence number received yet self.socket = socket self.topic = topic self.socket.setsockopt(zmq.SUBSCRIBE, self.topic) - def receive(self): + # Receive message from publisher and verify that topic and sequence match + def _receive_from_publisher_and_check(self): topic, body, seq = self.socket.recv_multipart() # Topic should match the subscriber topic. assert_equal(topic, self.topic) # Sequence should be incremental. - assert_equal(struct.unpack('<I', seq)[-1], self.sequence) + received_seq = struct.unpack('<I', seq)[-1] + if self.sequence is None: + self.sequence = received_seq + else: + assert_equal(received_seq, self.sequence) self.sequence += 1 return body + def receive(self): + return self._receive_from_publisher_and_check() + def receive_sequence(self): - topic, body, seq = self.socket.recv_multipart() - # Topic should match the subscriber topic. - assert_equal(topic, self.topic) - # Sequence should be incremental. - assert_equal(struct.unpack('<I', seq)[-1], self.sequence) - self.sequence += 1 + body = self._receive_from_publisher_and_check() hash = body[:32].hex() label = chr(body[32]) mempool_sequence = None if len(body) != 32+1+8 else struct.unpack("<Q", body[32+1:])[0] @@ -62,6 +65,11 @@ class ZMQSubscriber: class ZMQTest (BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 + if self.is_wallet_compiled(): + self.requires_wallet = True + # This test isn't testing txn relay/timing, so set whitelist on the + # peers for instant txn relay. This speeds up the test run time 2-3x. + self.extra_args = [["-whitelist=noban@127.0.0.1"]] * self.num_nodes def skip_test_if_missing_module(self): self.skip_if_no_py3_zmq() @@ -82,23 +90,46 @@ class ZMQTest (BitcoinTestFramework): # Restart node with the specified zmq notifications enabled, subscribe to # all of them and return the corresponding ZMQSubscriber objects. - def setup_zmq_test(self, services, recv_timeout=60, connect_nodes=False): + def setup_zmq_test(self, services, *, recv_timeout=60, sync_blocks=True): subscribers = [] for topic, address in services: socket = self.ctx.socket(zmq.SUB) - socket.set(zmq.RCVTIMEO, recv_timeout*1000) subscribers.append(ZMQSubscriber(socket, topic.encode())) - self.restart_node(0, ["-zmqpub%s=%s" % (topic, address) for topic, address in services]) - - if connect_nodes: - self.connect_nodes(0, 1) + self.restart_node(0, ["-zmqpub%s=%s" % (topic, address) for topic, address in services] + + self.extra_args[0]) for i, sub in enumerate(subscribers): sub.socket.connect(services[i][1]) - # Relax so that the subscribers are ready before publishing zmq messages - sleep(0.2) + # Ensure that all zmq publisher notification interfaces are ready by + # running the following "sync up" procedure: + # 1. Generate a block on the node + # 2. Try to receive a notification on all subscribers + # 3. If all subscribers get a message within the timeout (1 second), + # we are done, otherwise repeat starting from step 1 + for sub in subscribers: + sub.socket.set(zmq.RCVTIMEO, 1000) + while True: + self.nodes[0].generate(1) + recv_failed = False + for sub in subscribers: + try: + sub.receive() + except zmq.error.Again: + self.log.debug("Didn't receive sync-up notification, trying again.") + recv_failed = True + if not recv_failed: + self.log.debug("ZMQ sync-up completed, all subscribers are ready.") + break + + # set subscriber's desired timeout for the test + for sub in subscribers: + sub.socket.set(zmq.RCVTIMEO, recv_timeout*1000) + + self.connect_nodes(0, 1) + if sync_blocks: + self.sync_blocks() return subscribers @@ -108,9 +139,7 @@ class ZMQTest (BitcoinTestFramework): self.restart_node(0, ["-zmqpubrawtx=foo", "-zmqpubhashtx=bar"]) address = 'tcp://127.0.0.1:28332' - subs = self.setup_zmq_test( - [(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]], - connect_nodes=True) + subs = self.setup_zmq_test([(topic, address) for topic in ["hashblock", "hashtx", "rawblock", "rawtx"]]) hashblock = subs[0] hashtx = subs[1] @@ -187,6 +216,7 @@ class ZMQTest (BitcoinTestFramework): hashblock, hashtx = self.setup_zmq_test( [(topic, address) for topic in ["hashblock", "hashtx"]], recv_timeout=2) # 2 second timeout to check end of notifications + self.disconnect_nodes(0, 1) # Generate 1 block in nodes[0] with 1 mempool tx and receive all notifications payment_txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 1.0) @@ -235,6 +265,7 @@ class ZMQTest (BitcoinTestFramework): """ self.log.info("Testing 'sequence' publisher") [seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")]) + self.disconnect_nodes(0, 1) # Mempool sequence number starts at 1 seq_num = 1 @@ -385,7 +416,7 @@ class ZMQTest (BitcoinTestFramework): return self.log.info("Testing 'mempool sync' usage of sequence notifier") - [seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")], connect_nodes=True) + [seq] = self.setup_zmq_test([("sequence", "tcp://127.0.0.1:28333")]) # In-memory counter, should always start at 1 next_mempool_seq = self.nodes[0].getrawmempool(mempool_sequence=True)["mempool_sequence"] @@ -485,10 +516,13 @@ class ZMQTest (BitcoinTestFramework): def test_multiple_interfaces(self): # Set up two subscribers with different addresses + # (note that after the reorg test, syncing would fail due to different + # chain lengths on node0 and node1; for this test we only need node0, so + # we can disable syncing blocks on the setup) subscribers = self.setup_zmq_test([ ("hashblock", "tcp://127.0.0.1:28334"), ("hashblock", "tcp://127.0.0.1:28335"), - ]) + ], sync_blocks=False) # Generate 1 block in nodes[0] and receive all notifications self.nodes[0].generatetoaddress(1, ADDRESS_BCRT1_UNSPENDABLE) diff --git a/test/functional/mempool_persist.py b/test/functional/mempool_persist.py index 70cd4ebb3b..752b925b92 100755 --- a/test/functional/mempool_persist.py +++ b/test/functional/mempool_persist.py @@ -69,6 +69,8 @@ class MempoolPersistTest(BitcoinTestFramework): assert_equal(len(self.nodes[0].getrawmempool()), 5) assert_equal(len(self.nodes[1].getrawmempool()), 5) + total_fee_old = self.nodes[0].getmempoolinfo()['total_fee'] + self.log.debug("Prioritize a transaction on node0") fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] assert_equal(fees['base'], fees['modified']) @@ -76,6 +78,10 @@ class MempoolPersistTest(BitcoinTestFramework): fees = self.nodes[0].getmempoolentry(txid=last_txid)['fees'] assert_equal(fees['base'] + Decimal('0.00001000'), fees['modified']) + self.log.info('Check the total base fee is unchanged after prioritisetransaction') + assert_equal(total_fee_old, self.nodes[0].getmempoolinfo()['total_fee']) + assert_equal(total_fee_old, sum(v['fees']['base'] for k, v in self.nodes[0].getrawmempool(verbose=True).items())) + tx_creation_time = self.nodes[0].getmempoolentry(txid=last_txid)['time'] assert_greater_than_or_equal(tx_creation_time, tx_creation_time_lower) assert_greater_than_or_equal(tx_creation_time_higher, tx_creation_time) diff --git a/test/functional/p2p_add_connections.py b/test/functional/p2p_add_connections.py index a63c3a3287..a04ba5db2d 100755 --- a/test/functional/p2p_add_connections.py +++ b/test/functional/p2p_add_connections.py @@ -17,7 +17,6 @@ def check_node_connections(*, node, num_in, num_out): class P2PAddConnections(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 2 def setup_network(self): diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 91fbd722cf..69821763bd 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -46,7 +46,6 @@ class AddrReceiver(P2PInterface): class AddrTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def run_test(self): diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py index c592ab52b1..6584efae79 100755 --- a/test/functional/p2p_blocksonly.py +++ b/test/functional/p2p_blocksonly.py @@ -15,7 +15,6 @@ from test_framework.util import assert_equal class P2PBlocksOnly(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 self.extra_args = [["-blocksonly"]] diff --git a/test/functional/p2p_filter.py b/test/functional/p2p_filter.py index 642a217047..8f64419138 100755 --- a/test/functional/p2p_filter.py +++ b/test/functional/p2p_filter.py @@ -19,7 +19,13 @@ from test_framework.messages import ( msg_mempool, msg_version, ) -from test_framework.p2p import P2PInterface, p2p_lock +from test_framework.p2p import ( + P2PInterface, + P2P_SERVICES, + P2P_SUBVERSION, + P2P_VERSION, + p2p_lock, +) from test_framework.script import MAX_SCRIPT_ELEMENT_SIZE from test_framework.test_framework import BitcoinTestFramework @@ -81,7 +87,6 @@ class P2PBloomFilter(P2PInterface): class FilterTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 self.extra_args = [[ '-peerbloomfilters', @@ -217,9 +222,12 @@ class FilterTest(BitcoinTestFramework): self.log.info('Test BIP 37 for a node with fRelay = False') # Add peer but do not send version yet filter_peer_without_nrelay = self.nodes[0].add_p2p_connection(P2PBloomFilter(), send_version=False, wait_for_verack=False) - # Send version with fRelay=False + # Send version with relay=False version_without_fRelay = msg_version() - version_without_fRelay.nRelay = 0 + version_without_fRelay.nVersion = P2P_VERSION + version_without_fRelay.strSubVer = P2P_SUBVERSION + version_without_fRelay.nServices = P2P_SERVICES + version_without_fRelay.relay = 0 filter_peer_without_nrelay.send_message(version_without_fRelay) filter_peer_without_nrelay.wait_for_verack() assert not self.nodes[0].getpeerinfo()[0]['relaytxes'] diff --git a/test/functional/p2p_getaddr_caching.py b/test/functional/p2p_getaddr_caching.py index 2b75ad5175..d375af6fe1 100755 --- a/test/functional/p2p_getaddr_caching.py +++ b/test/functional/p2p_getaddr_caching.py @@ -41,7 +41,6 @@ class AddrReceiver(P2PInterface): class AddrTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def run_test(self): diff --git a/test/functional/p2p_invalid_locator.py b/test/functional/p2p_invalid_locator.py index e4fc9fd178..f884cf90ff 100755 --- a/test/functional/p2p_invalid_locator.py +++ b/test/functional/p2p_invalid_locator.py @@ -13,7 +13,6 @@ from test_framework.test_framework import BitcoinTestFramework class InvalidLocatorTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.setup_clean_chain = False def run_test(self): node = self.nodes[0] # convenience reference to the node diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py index ca8bf908a9..12b8b7baff 100755 --- a/test/functional/p2p_leak.py +++ b/test/functional/p2p_leak.py @@ -17,7 +17,12 @@ from test_framework.messages import ( msg_ping, msg_version, ) -from test_framework.p2p import P2PInterface +from test_framework.p2p import ( + P2PInterface, + P2P_SUBVERSION, + P2P_SERVICES, + P2P_VERSION_RELAY, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -125,12 +130,15 @@ class P2PLeakTest(BitcoinTestFramework): assert_equal(ver.addrFrom.port, 0) assert_equal(ver.addrFrom.ip, '0.0.0.0') assert_equal(ver.nStartingHeight, 201) - assert_equal(ver.nRelay, 1) + assert_equal(ver.relay, 1) self.log.info('Check that old peers are disconnected') p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False) old_version_msg = msg_version() old_version_msg.nVersion = 31799 + old_version_msg.strSubVer = P2P_SUBVERSION + old_version_msg.nServices = P2P_SERVICES + old_version_msg.relay = P2P_VERSION_RELAY with self.nodes[0].assert_debug_log(['peer=3 using obsolete version 31799; disconnecting']): p2p_old_peer.send_message(old_version_msg) p2p_old_peer.wait_for_disconnect() diff --git a/test/functional/p2p_message_capture.py b/test/functional/p2p_message_capture.py new file mode 100755 index 0000000000..080b2d93ad --- /dev/null +++ b/test/functional/p2p_message_capture.py @@ -0,0 +1,76 @@ +#!/usr/bin/env python3 +# Copyright (c) 2020 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test per-peer message capture capability. + +Additionally, the output of contrib/message-capture/message-capture-parser.py should be verified manually. +""" + +import glob +from io import BytesIO +import os + +from test_framework.p2p import P2PDataStore, MESSAGEMAP +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + +TIME_SIZE = 8 +LENGTH_SIZE = 4 +MSGTYPE_SIZE = 12 + +def mini_parser(dat_file): + """Parse a data file created by CaptureMessage. + + From the data file we'll only check the structure. + + We won't care about things like: + - Deserializing the payload of the message + - This is managed by the deserialize methods in test_framework.messages + - The order of the messages + - There's no reason why we can't, say, change the order of the messages in the handshake + - Message Type + - We can add new message types + + We're ignoring these because they're simply too brittle to test here. + """ + with open(dat_file, 'rb') as f_in: + # This should have at least one message in it + assert(os.fstat(f_in.fileno()).st_size >= TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) + while True: + tmp_header_raw = f_in.read(TIME_SIZE + LENGTH_SIZE + MSGTYPE_SIZE) + if not tmp_header_raw: + break + tmp_header = BytesIO(tmp_header_raw) + tmp_header.read(TIME_SIZE) # skip the timestamp field + raw_msgtype = tmp_header.read(MSGTYPE_SIZE) + msgtype: bytes = raw_msgtype.split(b'\x00', 1)[0] + remainder = raw_msgtype.split(b'\x00', 1)[1] + assert(len(msgtype) > 0) + assert(msgtype in MESSAGEMAP) + assert(len(remainder) == 0 or not remainder.decode().isprintable()) + length: int = int.from_bytes(tmp_header.read(LENGTH_SIZE), "little") + data = f_in.read(length) + assert_equal(len(data), length) + + + +class MessageCaptureTest(BitcoinTestFramework): + def set_test_params(self): + self.num_nodes = 1 + self.extra_args = [["-capturemessages"]] + self.setup_clean_chain = True + + def run_test(self): + capturedir = os.path.join(self.nodes[0].datadir, "regtest/message_capture") + # Connect a node so that the handshake occurs + self.nodes[0].add_p2p_connection(P2PDataStore()) + self.nodes[0].disconnect_p2ps() + recv_file = glob.glob(os.path.join(capturedir, "*/msgs_recv.dat"))[0] + mini_parser(recv_file) + sent_file = glob.glob(os.path.join(capturedir, "*/msgs_sent.dat"))[0] + mini_parser(sent_file) + + +if __name__ == '__main__': + MessageCaptureTest().main() diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 8a751c6b54..4bf96cb0e6 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -56,7 +56,6 @@ MAX_GETDATA_INBOUND_WAIT = GETDATA_TX_INTERVAL + INBOUND_PEER_TX_DELAY + TXID_RE class TxDownloadTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 2 def test_tx_requests(self): diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 99be6b7b8e..84ca1b99c2 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -268,6 +268,18 @@ class BlockchainTest(BitcoinTestFramework): res5 = node.gettxoutsetinfo(hash_type='none') assert 'hash_serialized_2' not in res5 + # hash_type muhash should return a different UTXO set hash. + res6 = node.gettxoutsetinfo(hash_type='muhash') + assert 'muhash' in res6 + assert(res['hash_serialized_2'] != res6['muhash']) + + # muhash should not be included in gettxoutset unless requested. + for r in [res, res2, res3, res4, res5]: + assert 'muhash' not in r + + # Unknown hash_type raises an error + assert_raises_rpc_error(-8, "foohash is not a valid hash_type", node.gettxoutsetinfo, "foohash") + def _test_getblockheader(self): node = self.nodes[0] diff --git a/test/functional/rpc_estimatefee.py b/test/functional/rpc_estimatefee.py index 81862ac69e..51b7efb4c3 100755 --- a/test/functional/rpc_estimatefee.py +++ b/test/functional/rpc_estimatefee.py @@ -14,7 +14,6 @@ from test_framework.util import assert_raises_rpc_error class EstimateFeeTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def run_test(self): diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py index de0b7f303f..2d41963beb 100755 --- a/test/functional/rpc_net.py +++ b/test/functional/rpc_net.py @@ -25,6 +25,7 @@ from test_framework.util import ( assert_raises_rpc_error, p2p_port, ) +from test_framework.wallet import MiniWallet def assert_net_servicesnames(servicesflag, servicenames): @@ -48,6 +49,9 @@ class NetTest(BitcoinTestFramework): self.supports_cli = False def run_test(self): + # We need miniwallet to make a transaction + self.wallet = MiniWallet(self.nodes[0]) + self.wallet.generate(1) # Get out of IBD for the minfeefilter and getpeerinfo tests. self.nodes[0].generate(101) @@ -74,8 +78,7 @@ class NetTest(BitcoinTestFramework): def test_getpeerinfo(self): self.log.info("Test getpeerinfo") # Create a few getpeerinfo last_block/last_transaction values. - if self.is_wallet_compiled(): - self.nodes[0].sendtoaddress(self.nodes[1].getnewaddress(), 1) + self.wallet.send_self_transfer(from_node=self.nodes[0]) # Make a transaction so we can see it in the getpeerinfo results self.nodes[1].generate(1) self.sync_all() time_now = int(time.time()) @@ -101,6 +104,9 @@ class NetTest(BitcoinTestFramework): assert_equal(peer_info[1][0]['connection_type'], 'manual') assert_equal(peer_info[1][1]['connection_type'], 'inbound') + # Check dynamically generated networks list in getpeerinfo help output. + assert "(ipv4, ipv6, onion, not_publicly_routable)" in self.nodes[0].help("getpeerinfo") + def test_getnettotals(self): self.log.info("Test getnettotals") # Test getnettotals and getpeerinfo by doing a ping. The bytes @@ -149,6 +155,9 @@ class NetTest(BitcoinTestFramework): for info in network_info: assert_net_servicesnames(int(info["localservices"], 0x10), info["localservicesnames"]) + # Check dynamically generated networks list in getnetworkinfo help output. + assert "(ipv4, ipv6, onion)" in self.nodes[0].help("getnetworkinfo") + def test_getaddednodeinfo(self): self.log.info("Test getaddednodeinfo") assert_equal(self.nodes[0].getaddednodeinfo(), []) diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py index b364077a9a..ed6abaed78 100755 --- a/test/functional/rpc_psbt.py +++ b/test/functional/rpc_psbt.py @@ -24,7 +24,6 @@ MAX_BIP125_RBF_SEQUENCE = 0xfffffffd class PSBTTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 3 self.extra_args = [ ["-walletrbf=1"], diff --git a/test/functional/rpc_uptime.py b/test/functional/rpc_uptime.py index e86f91b1d0..6177970872 100755 --- a/test/functional/rpc_uptime.py +++ b/test/functional/rpc_uptime.py @@ -10,6 +10,7 @@ Test corresponds to code in rpc/server.cpp. import time from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_raises_rpc_error class UptimeTest(BitcoinTestFramework): @@ -18,8 +19,12 @@ class UptimeTest(BitcoinTestFramework): self.setup_clean_chain = True def run_test(self): + self._test_negative_time() self._test_uptime() + def _test_negative_time(self): + assert_raises_rpc_error(-8, "Mocktime can not be negative: -1.", self.nodes[0].setmocktime, -1) + def _test_uptime(self): wait_time = 10 self.nodes[0].setmocktime(int(time.time() + wait_time)) diff --git a/test/functional/test-shell.md b/test/functional/test-shell.md index f6ea9ef682..b8e899d675 100644 --- a/test/functional/test-shell.md +++ b/test/functional/test-shell.md @@ -178,7 +178,7 @@ can be called after the TestShell is shut down. | `num_nodes` | `1` | Sets the number of initialized bitcoind processes. | | `perf` | False | Profiles running nodes with `perf` for the duration of the test if set to `True`. | | `rpc_timeout` | `60` | Sets the RPC server timeout for the underlying bitcoind processes. | -| `setup_clean_chain` | `False` | Initializes an empty blockchain by default. A 199-block-long chain is initialized if set to `True`. | +| `setup_clean_chain` | `False` | A 200-block-long chain is initialized from cache by default. Instead, `setup_clean_chain` initializes an empty blockchain if set to `True`. | | `randomseed` | Random Integer | `TestShell.options.randomseed` is a member of `TestShell` which can be accessed during a test to seed a random generator. User can override default with a constant value for reproducible test runs. | | `supports_cli` | `False` | Whether the bitcoin-cli utility is compiled and available for the test. | | `tmpdir` | `"/var/folders/.../"` | Sets directory for test logs. Will be deleted upon a successful test run unless `nocleanup` is set to `True` | diff --git a/test/functional/test_framework/bdb.py b/test/functional/test_framework/bdb.py index 9de358aa0a..97b9c1d6d0 100644 --- a/test/functional/test_framework/bdb.py +++ b/test/functional/test_framework/bdb.py @@ -51,7 +51,6 @@ def dump_leaf_page(data): page_info['pgno'] = pgno page_info['prev_pgno'] = prev_pgno page_info['next_pgno'] = next_pgno - page_info['entries'] = entries page_info['hf_offset'] = hf_offset page_info['level'] = level page_info['pg_type'] = pg_type diff --git a/test/functional/test_framework/key.py b/test/functional/test_framework/key.py index e0cbab45ce..26526e35fa 100644 --- a/test/functional/test_framework/key.py +++ b/test/functional/test_framework/key.py @@ -20,10 +20,6 @@ def TaggedHash(tag, data): ss += data return hashlib.sha256(ss).digest() -def xor_bytes(b0, b1): - assert len(b0) == len(b1) - return bytes(x ^ y for (x, y) in zip(b0, b1)) - def jacobi_symbol(n, k): """Compute the Jacobi symbol of n modulo k @@ -510,7 +506,7 @@ class TestFrameworkKey(unittest.TestCase): if pubkey is not None: keys[privkey] = pubkey for msg in byte_arrays: # test every combination of message, signing key, verification key - for sign_privkey, sign_pubkey in keys.items(): + for sign_privkey, _ in keys.items(): sig = sign_schnorr(sign_privkey, msg) for verify_privkey, verify_pubkey in keys.items(): if verify_privkey == sign_privkey: diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 6ad4e13db2..a18a9ec109 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -31,11 +31,6 @@ import time from test_framework.siphash import siphash256 from test_framework.util import hex_str_to_bytes, assert_equal -MIN_VERSION_SUPPORTED = 60001 -MY_VERSION = 70016 # past wtxid relay -MY_SUBVERSION = b"/python-p2p-tester:0.0.3/" -MY_RELAY = 1 # from version 70001 onwards, fRelay should be appended to version messages (BIP37) - MAX_LOCATOR_SZ = 101 MAX_BLOCK_BASE_SIZE = 1000000 MAX_BLOOM_FILTER_SIZE = 36000 @@ -326,22 +321,20 @@ class CBlockLocator: __slots__ = ("nVersion", "vHave") def __init__(self): - self.nVersion = MY_VERSION self.vHave = [] def deserialize(self, f): - self.nVersion = struct.unpack("<i", f.read(4))[0] + struct.unpack("<i", f.read(4))[0] # Ignore version field. self.vHave = deser_uint256_vector(f) def serialize(self): r = b"" - r += struct.pack("<i", self.nVersion) + r += struct.pack("<i", 0) # Bitcoin Core ignores version field. Set it to 0. r += ser_uint256_vector(self.vHave) return r def __repr__(self): - return "CBlockLocator(nVersion=%i vHave=%s)" \ - % (self.nVersion, repr(self.vHave)) + return "CBlockLocator(vHave=%s)" % (repr(self.vHave)) class COutPoint: @@ -1023,20 +1016,20 @@ class CMerkleBlock: # Objects that correspond to messages on the wire class msg_version: - __slots__ = ("addrFrom", "addrTo", "nNonce", "nRelay", "nServices", + __slots__ = ("addrFrom", "addrTo", "nNonce", "relay", "nServices", "nStartingHeight", "nTime", "nVersion", "strSubVer") msgtype = b"version" def __init__(self): - self.nVersion = MY_VERSION - self.nServices = NODE_NETWORK | NODE_WITNESS + self.nVersion = 0 + self.nServices = 0 self.nTime = int(time.time()) self.addrTo = CAddress() self.addrFrom = CAddress() self.nNonce = random.getrandbits(64) - self.strSubVer = MY_SUBVERSION + self.strSubVer = '' self.nStartingHeight = -1 - self.nRelay = MY_RELAY + self.relay = 0 def deserialize(self, f): self.nVersion = struct.unpack("<i", f.read(4))[0] @@ -1048,18 +1041,18 @@ class msg_version: self.addrFrom = CAddress() self.addrFrom.deserialize(f, with_time=False) self.nNonce = struct.unpack("<Q", f.read(8))[0] - self.strSubVer = deser_string(f) + self.strSubVer = deser_string(f).decode('utf-8') self.nStartingHeight = struct.unpack("<i", f.read(4))[0] if self.nVersion >= 70001: # Relay field is optional for version 70001 onwards try: - self.nRelay = struct.unpack("<b", f.read(1))[0] + self.relay = struct.unpack("<b", f.read(1))[0] except: - self.nRelay = 0 + self.relay = 0 else: - self.nRelay = 0 + self.relay = 0 def serialize(self): r = b"" @@ -1069,16 +1062,16 @@ class msg_version: r += self.addrTo.serialize(with_time=False) r += self.addrFrom.serialize(with_time=False) r += struct.pack("<Q", self.nNonce) - r += ser_string(self.strSubVer) + r += ser_string(self.strSubVer.encode('utf-8')) r += struct.pack("<i", self.nStartingHeight) - r += struct.pack("<b", self.nRelay) + r += struct.pack("<b", self.relay) return r def __repr__(self): - return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i nRelay=%i)' \ + return 'msg_version(nVersion=%i nServices=%i nTime=%s addrTo=%s addrFrom=%s nNonce=0x%016X strSubVer=%s nStartingHeight=%i relay=%i)' \ % (self.nVersion, self.nServices, time.ctime(self.nTime), repr(self.addrTo), repr(self.addrFrom), self.nNonce, - self.strSubVer, self.nStartingHeight, self.nRelay) + self.strSubVer, self.nStartingHeight, self.relay) class msg_verack: @@ -1273,7 +1266,7 @@ class msg_block: # for cases where a user needs tighter control over what is sent over the wire # note that the user must supply the name of the msgtype, and the data class msg_generic: - __slots__ = ("msgtype", "data") + __slots__ = ("data") def __init__(self, msgtype, data=None): self.msgtype = msgtype diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py index fa4a567aac..05099f3339 100755 --- a/test/functional/test_framework/p2p.py +++ b/test/functional/test_framework/p2p.py @@ -31,7 +31,6 @@ import threading from test_framework.messages import ( CBlockHeader, MAX_HEADERS_RESULTS, - MIN_VERSION_SUPPORTED, msg_addr, msg_addrv2, msg_block, @@ -79,6 +78,18 @@ from test_framework.util import ( logger = logging.getLogger("TestFramework.p2p") +# The minimum P2P version that this test framework supports +MIN_P2P_VERSION_SUPPORTED = 60001 +# The P2P version that this test framework implements and sends in its `version` message +# Version 70016 supports wtxid relay +P2P_VERSION = 70016 +# The services that this test framework offers in its `version` message +P2P_SERVICES = NODE_NETWORK | NODE_WITNESS +# The P2P user agent string that this test framework sends in its `version` message +P2P_SUBVERSION = "/python-p2p-tester:0.0.3/" +# Value for relay that this test framework sends in its `version` message +P2P_VERSION_RELAY = 1 + MESSAGEMAP = { b"addr": msg_addr, b"addrv2": msg_addrv2, @@ -327,6 +338,9 @@ class P2PInterface(P2PConnection): def peer_connect_send_version(self, services): # Send a version msg vt = msg_version() + vt.nVersion = P2P_VERSION + vt.strSubVer = P2P_SUBVERSION + vt.relay = P2P_VERSION_RELAY vt.nServices = services vt.addrTo.ip = self.dstaddr vt.addrTo.port = self.dstport @@ -334,7 +348,7 @@ class P2PInterface(P2PConnection): vt.addrFrom.port = 0 self.on_connection_send_msg = vt # Will be sent in connection_made callback - def peer_connect(self, *args, services=NODE_NETWORK | NODE_WITNESS, send_version=True, **kwargs): + def peer_connect(self, *args, services=P2P_SERVICES, send_version=True, **kwargs): create_conn = super().peer_connect(*args, **kwargs) if send_version: @@ -417,7 +431,7 @@ class P2PInterface(P2PConnection): pass def on_version(self, message): - assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED) + assert message.nVersion >= MIN_P2P_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_P2P_VERSION_SUPPORTED) if message.nVersion >= 70016 and self.wtxidrelay: self.send_message(msg_wtxidrelay()) if self.support_addrv2: diff --git a/test/functional/test_framework/script.py b/test/functional/test_framework/script.py index be0e9f24e2..c35533698c 100644 --- a/test/functional/test_framework/script.py +++ b/test/functional/test_framework/script.py @@ -29,8 +29,6 @@ MAX_SCRIPT_ELEMENT_SIZE = 520 LOCKTIME_THRESHOLD = 500000000 ANNEX_TAG = 0x50 -OPCODE_NAMES = {} # type: Dict[CScriptOp, str] - LEAF_VERSION_TAPSCRIPT = 0xc0 def hash160(s): @@ -47,7 +45,6 @@ def bn2vch(v): # Serialize to bytes return encoded_v.to_bytes(n_bytes, 'little') -_opcode_instances = [] # type: List[CScriptOp] class CScriptOp(int): """A single script opcode""" __slots__ = () @@ -111,6 +108,9 @@ class CScriptOp(int): _opcode_instances.append(super().__new__(cls, n)) return _opcode_instances[n] +OPCODE_NAMES: Dict[CScriptOp, str] = {} +_opcode_instances: List[CScriptOp] = [] + # Populate opcode instance table for n in range(0xff + 1): CScriptOp(n) diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index 4bda73599d..70a9798449 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -108,6 +108,9 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): # skipped. If list is truncated, wallet creation is skipped and keys # are not imported. self.wallet_names = None + # By default the wallet is not required. Set to true by skip_if_no_wallet(). + # When False, we ignore wallet_names regardless of what it is. + self.requires_wallet = False self.set_test_params() assert self.wallet_names is None or len(self.wallet_names) <= self.num_nodes if self.options.timeout_factor == 0 : @@ -184,15 +187,30 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): parser.add_argument('--timeout-factor', dest="timeout_factor", type=float, default=1.0, help='adjust test timeouts by a factor. Setting it to 0 disables all timeouts') group = parser.add_mutually_exclusive_group() - group.add_argument("--descriptors", default=False, action="store_true", + group.add_argument("--descriptors", action='store_const', const=True, help="Run test using a descriptor wallet", dest='descriptors') - group.add_argument("--legacy-wallet", default=False, action="store_false", + group.add_argument("--legacy-wallet", action='store_const', const=False, help="Run test using legacy wallets", dest='descriptors') self.add_options(parser) self.options = parser.parse_args() self.options.previous_releases_path = previous_releases_path + config = configparser.ConfigParser() + config.read_file(open(self.options.configfile)) + self.config = config + + if self.options.descriptors is None: + # Prefer BDB unless it isn't available + if self.is_bdb_compiled(): + self.options.descriptors = False + elif self.is_sqlite_compiled(): + self.options.descriptors = True + else: + # If neither are compiled, tests requiring a wallet will be skipped and the value of self.options.descriptors won't matter + # It still needs to exist and be None in order for tests to work however. + self.options.descriptors = None + def setup(self): """Call this method to start up the test framework object with options set.""" @@ -202,9 +220,8 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): self.options.cachedir = os.path.abspath(self.options.cachedir) - config = configparser.ConfigParser() - config.read_file(open(self.options.configfile)) - self.config = config + config = self.config + fname_bitcoind = os.path.join( config["environment"]["BUILDDIR"], "src", @@ -377,7 +394,7 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): extra_args = self.extra_args self.add_nodes(self.num_nodes, extra_args) self.start_nodes() - if self.is_wallet_compiled(): + if self.requires_wallet: self.import_deterministic_coinbase_privkeys() if not self.setup_clean_chain: for n in self.nodes: @@ -769,10 +786,13 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): def skip_if_no_wallet(self): """Skip the running test if wallet has not been compiled.""" + self.requires_wallet = True if not self.is_wallet_compiled(): raise SkipTest("wallet has not been compiled.") if self.options.descriptors: self.skip_if_no_sqlite() + else: + self.skip_if_no_bdb() def skip_if_no_sqlite(self): """Skip the running test if sqlite has not been compiled.""" diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py index b61d433652..b820c36e6e 100755 --- a/test/functional/test_framework/test_node.py +++ b/test/functional/test_framework/test_node.py @@ -23,9 +23,10 @@ import sys from .authproxy import JSONRPCException from .descriptors import descsum_create -from .messages import MY_SUBVERSION +from .p2p import P2P_SUBVERSION from .util import ( MAX_NODES, + assert_equal, append_config, delete_cookie_file, get_auth_cookie, @@ -114,6 +115,8 @@ class TestNode(): if self.version_is_at_least(190000): self.args.append("-logthreadnames") + if self.version_is_at_least(219900): + self.args.append("-logsourcelocations") self.cli = TestNodeCLI(bitcoin_cli, self.datadir) self.use_cli = use_cli @@ -545,6 +548,11 @@ class TestNode(): # in comparison to the upside of making tests less fragile and unexpected intermittent errors less likely. p2p_conn.sync_with_ping() + # Consistency check that the Bitcoin Core has received our user agent string. This checks the + # node's newest peer. It could be racy if another Bitcoin Core node has connected since we opened + # our connection, but we don't expect that to happen. + assert_equal(self.getpeerinfo()[-1]['subver'], P2P_SUBVERSION) + return p2p_conn def add_outbound_p2p_connection(self, p2p_conn, *, p2p_idx, connection_type="outbound-full-relay", **kwargs): @@ -572,7 +580,7 @@ class TestNode(): def num_test_p2p_connections(self): """Return number of test framework p2p connections to the node.""" - return len([peer for peer in self.getpeerinfo() if peer['subver'] == MY_SUBVERSION.decode("utf-8")]) + return len([peer for peer in self.getpeerinfo() if peer['subver'] == P2P_SUBVERSION]) def disconnect_p2ps(self): """Close all p2p connections to the node.""" diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index c652ac0a06..d742ef4eee 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -86,29 +86,29 @@ EXTENDED_SCRIPTS = [ BASE_SCRIPTS = [ # Scripts that are run by default. # Longest test should go first, to favor running tests in parallel - 'wallet_hd.py', + 'wallet_hd.py --legacy-wallet', 'wallet_hd.py --descriptors', - 'wallet_backup.py', + 'wallet_backup.py --legacy-wallet', 'wallet_backup.py --descriptors', # vv Tests less than 5m vv 'mining_getblocktemplate_longpoll.py', 'feature_maxuploadtarget.py', 'feature_block.py', - 'rpc_fundrawtransaction.py', + 'rpc_fundrawtransaction.py --legacy-wallet', 'rpc_fundrawtransaction.py --descriptors', 'p2p_compactblocks.py', 'feature_segwit.py --legacy-wallet', # vv Tests less than 2m vv - 'wallet_basic.py', + 'wallet_basic.py --legacy-wallet', 'wallet_basic.py --descriptors', - 'wallet_labels.py', + 'wallet_labels.py --legacy-wallet', 'wallet_labels.py --descriptors', 'p2p_segwit.py', 'p2p_timeouts.py', 'p2p_tx_download.py', 'mempool_updatefromblock.py', 'wallet_dump.py --legacy-wallet', - 'wallet_listtransactions.py', + 'wallet_listtransactions.py --legacy-wallet', 'wallet_listtransactions.py --descriptors', 'feature_taproot.py', # vv Tests less than 60s vv @@ -116,21 +116,21 @@ BASE_SCRIPTS = [ 'wallet_importmulti.py --legacy-wallet', 'mempool_limit.py', 'rpc_txoutproof.py', - 'wallet_listreceivedby.py', + 'wallet_listreceivedby.py --legacy-wallet', 'wallet_listreceivedby.py --descriptors', - 'wallet_abandonconflict.py', + 'wallet_abandonconflict.py --legacy-wallet', 'wallet_abandonconflict.py --descriptors', 'feature_csv_activation.py', - 'rpc_rawtransaction.py', + 'rpc_rawtransaction.py --legacy-wallet', 'rpc_rawtransaction.py --descriptors', - 'wallet_address_types.py', + 'wallet_address_types.py --legacy-wallet', 'wallet_address_types.py --descriptors', 'feature_bip68_sequence.py', 'p2p_feefilter.py', 'feature_reindex.py', 'feature_abortnode.py', # vv Tests less than 30s vv - 'wallet_keypool_topup.py', + 'wallet_keypool_topup.py --legacy-wallet', 'wallet_keypool_topup.py --descriptors', 'feature_fee_estimation.py', 'interface_zmq.py', @@ -138,7 +138,7 @@ BASE_SCRIPTS = [ 'interface_bitcoin_cli.py', 'mempool_resurrect.py', 'wallet_txn_doublespend.py --mineblock', - 'tool_wallet.py', + 'tool_wallet.py --legacy-wallet', 'tool_wallet.py --descriptors', 'wallet_txn_clone.py', 'wallet_txn_clone.py --segwit', @@ -146,14 +146,14 @@ BASE_SCRIPTS = [ 'rpc_misc.py', 'interface_rest.py', 'mempool_spend_coinbase.py', - 'wallet_avoidreuse.py', + 'wallet_avoidreuse.py --legacy-wallet', 'wallet_avoidreuse.py --descriptors', 'mempool_reorg.py', 'mempool_persist.py', - 'wallet_multiwallet.py', + 'wallet_multiwallet.py --legacy-wallet', 'wallet_multiwallet.py --descriptors', 'wallet_multiwallet.py --usecli', - 'wallet_createwallet.py', + 'wallet_createwallet.py --legacy-wallet', 'wallet_createwallet.py --usecli', 'wallet_createwallet.py --descriptors', 'wallet_watchonly.py --legacy-wallet', @@ -161,27 +161,27 @@ BASE_SCRIPTS = [ 'wallet_reorgsrestore.py', 'interface_http.py', 'interface_rpc.py', - 'rpc_psbt.py', + 'rpc_psbt.py --legacy-wallet', 'rpc_psbt.py --descriptors', 'rpc_users.py', 'rpc_whitelist.py', 'feature_proxy.py', - 'rpc_signrawtransaction.py', + 'rpc_signrawtransaction.py --legacy-wallet', 'rpc_signrawtransaction.py --descriptors', - 'wallet_groups.py', + 'wallet_groups.py --legacy-wallet', 'p2p_addrv2_relay.py', 'wallet_groups.py --descriptors', 'p2p_disconnect_ban.py', 'rpc_decodescript.py', 'rpc_blockchain.py', 'rpc_deprecated.py', - 'wallet_disable.py', + 'wallet_disable.py --legacy-wallet', 'wallet_disable.py --descriptors', 'p2p_addr_relay.py', 'p2p_getaddr_caching.py', 'p2p_getdata.py', 'rpc_net.py', - 'wallet_keypool.py', + 'wallet_keypool.py --legacy-wallet', 'wallet_keypool.py --descriptors', 'wallet_descriptor.py --descriptors', 'p2p_nobloomfilter_messages.py', @@ -195,70 +195,72 @@ BASE_SCRIPTS = [ 'p2p_invalid_tx.py', 'feature_assumevalid.py', 'example_test.py', - 'wallet_txn_doublespend.py', + 'wallet_txn_doublespend.py --legacy-wallet', 'wallet_txn_doublespend.py --descriptors', - 'feature_backwards_compatibility.py', + 'feature_backwards_compatibility.py --legacy-wallet', 'feature_backwards_compatibility.py --descriptors', 'wallet_txn_clone.py --mineblock', 'feature_notifications.py', 'rpc_getblockfilter.py', 'rpc_invalidateblock.py', + 'feature_utxo_set_hash.py', 'feature_rbf.py', 'mempool_packages.py', 'mempool_package_onemore.py', - 'rpc_createmultisig.py', + 'rpc_createmultisig.py --legacy-wallet', 'rpc_createmultisig.py --descriptors', 'feature_versionbits_warning.py', 'rpc_preciousblock.py', - 'wallet_importprunedfunds.py', + 'wallet_importprunedfunds.py --legacy-wallet', 'wallet_importprunedfunds.py --descriptors', 'p2p_leak_tx.py', 'p2p_eviction.py', 'rpc_signmessage.py', 'rpc_generateblock.py', 'rpc_generate.py', - 'wallet_balance.py', + 'wallet_balance.py --legacy-wallet', 'wallet_balance.py --descriptors', - 'feature_nulldummy.py', + 'feature_nulldummy.py --legacy-wallet', 'feature_nulldummy.py --descriptors', 'mempool_accept.py', 'mempool_expiry.py', 'wallet_import_rescan.py --legacy-wallet', 'wallet_import_with_label.py --legacy-wallet', 'wallet_importdescriptors.py --descriptors', - 'wallet_upgradewallet.py', + 'wallet_upgradewallet.py --legacy-wallet', 'rpc_bind.py --ipv4', 'rpc_bind.py --ipv6', 'rpc_bind.py --nonloopback', 'mining_basic.py', 'feature_signet.py', - 'wallet_bumpfee.py', + 'wallet_bumpfee.py --legacy-wallet', 'wallet_bumpfee.py --descriptors', 'wallet_implicitsegwit.py --legacy-wallet', 'rpc_named_arguments.py', - 'wallet_listsinceblock.py', + 'wallet_listsinceblock.py --legacy-wallet', 'wallet_listsinceblock.py --descriptors', 'wallet_listdescriptors.py --descriptors', 'p2p_leak.py', - 'wallet_encryption.py', + 'wallet_encryption.py --legacy-wallet', 'wallet_encryption.py --descriptors', 'feature_dersig.py', 'feature_cltv.py', 'rpc_uptime.py', - 'wallet_resendwallettransactions.py', + 'wallet_resendwallettransactions.py --legacy-wallet', 'wallet_resendwallettransactions.py --descriptors', - 'wallet_fallbackfee.py', + 'wallet_fallbackfee.py --legacy-wallet', 'wallet_fallbackfee.py --descriptors', 'rpc_dumptxoutset.py', 'feature_minchainwork.py', 'rpc_estimatefee.py', 'rpc_getblockstats.py', - 'wallet_create_tx.py', - 'wallet_send.py', + 'wallet_create_tx.py --legacy-wallet', + 'wallet_send.py --legacy-wallet', + 'wallet_send.py --descriptors', 'wallet_create_tx.py --descriptors', 'p2p_fingerprint.py', 'feature_uacomment.py', - 'wallet_coinbase_category.py', + 'wallet_coinbase_category.py --legacy-wallet', 'wallet_coinbase_category.py --descriptors', 'feature_filelock.py', 'feature_loadblock.py', @@ -266,6 +268,7 @@ BASE_SCRIPTS = [ 'p2p_add_connections.py', 'p2p_unrequested_blocks.py', 'p2p_blockfilters.py', + 'p2p_message_capture.py', 'feature_includeconf.py', 'feature_asmap.py', 'mempool_unbroadcast.py', @@ -286,6 +289,7 @@ BASE_SCRIPTS = [ 'feature_help.py', 'feature_shutdown.py', 'p2p_ibd_txrelay.py', + 'feature_blockfilterindex_prune.py' # Don't append tests at the end to avoid merge conflicts # Put them in a random line within the section that fits their approximate run-time ] diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py index 8a1af24dcf..28103793df 100755 --- a/test/functional/tool_wallet.py +++ b/test/functional/tool_wallet.py @@ -183,11 +183,13 @@ class ToolWalletTest(BitcoinTestFramework): def test_invalid_tool_commands_and_args(self): self.log.info('Testing that various invalid commands raise with specific error messages') - self.assert_raises_tool_error('Invalid command: foo', 'foo') + self.assert_raises_tool_error("Error parsing command line arguments: Invalid command 'foo'", 'foo') # `bitcoin-wallet help` raises an error. Use `bitcoin-wallet -help`. - self.assert_raises_tool_error('Invalid command: help', 'help') - self.assert_raises_tool_error('Error: two methods provided (info and create). Only one method should be provided.', 'info', 'create') + self.assert_raises_tool_error("Error parsing command line arguments: Invalid command 'help'", 'help') + self.assert_raises_tool_error('Error: Additional arguments provided (create). Methods do not take arguments. Please refer to `-help`.', 'info', 'create') self.assert_raises_tool_error('Error parsing command line arguments: Invalid parameter -foo', '-foo') + self.assert_raises_tool_error('No method provided. Run `bitcoin-wallet -help` for valid methods.') + self.assert_raises_tool_error('Wallet name must be provided when creating a new wallet.', 'create') locked_dir = os.path.join(self.options.tmpdir, "node0", "regtest", "wallets") error = 'Error initializing wallet database environment "{}"!'.format(locked_dir) if self.options.descriptors: @@ -348,7 +350,8 @@ class ToolWalletTest(BitcoinTestFramework): self.log.info('Checking createfromdump') self.do_tool_createfromdump("load", "wallet.dump") - self.do_tool_createfromdump("load-bdb", "wallet.dump", "bdb") + if self.is_bdb_compiled(): + self.do_tool_createfromdump("load-bdb", "wallet.dump", "bdb") if self.is_sqlite_compiled(): self.do_tool_createfromdump("load-sqlite", "wallet.dump", "sqlite") diff --git a/test/functional/wallet_avoidreuse.py b/test/functional/wallet_avoidreuse.py index 229c134a4b..bc4fa90e83 100755 --- a/test/functional/wallet_avoidreuse.py +++ b/test/functional/wallet_avoidreuse.py @@ -65,7 +65,6 @@ def assert_balances(node, mine): class AvoidReuseTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 2 # This test isn't testing txn relay/timing, so set whitelist on the # peers for instant txn relay. This speeds up the test run time 2-3x. diff --git a/test/functional/wallet_createwallet.py b/test/functional/wallet_createwallet.py index cf3317121f..16a0a50b07 100755 --- a/test/functional/wallet_createwallet.py +++ b/test/functional/wallet_createwallet.py @@ -17,7 +17,6 @@ from test_framework.wallet_util import bytes_to_wif, generate_wif_key class CreateWalletTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def skip_test_if_missing_module(self): diff --git a/test/functional/wallet_descriptor.py b/test/functional/wallet_descriptor.py index 1de41a5f96..1e032bdd6c 100755 --- a/test/functional/wallet_descriptor.py +++ b/test/functional/wallet_descriptor.py @@ -23,11 +23,14 @@ class WalletDescriptorTest(BitcoinTestFramework): self.skip_if_no_sqlite() def run_test(self): - # Make a legacy wallet and check it is BDB - self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False) - wallet_info = self.nodes[0].getwalletinfo() - assert_equal(wallet_info['format'], 'bdb') - self.nodes[0].unloadwallet("legacy1") + if self.is_bdb_compiled(): + # Make a legacy wallet and check it is BDB + self.nodes[0].createwallet(wallet_name="legacy1", descriptors=False) + wallet_info = self.nodes[0].getwalletinfo() + assert_equal(wallet_info['format'], 'bdb') + self.nodes[0].unloadwallet("legacy1") + else: + self.log.warning("Skipping BDB test") # Make a descriptor wallet self.log.info("Making a descriptor wallet") @@ -148,5 +151,62 @@ class WalletDescriptorTest(BitcoinTestFramework): nopriv_rpc = self.nodes[0].get_wallet_rpc('desc_no_priv') assert_raises_rpc_error(-4, 'This wallet has no available keys', nopriv_rpc.getnewaddress) + self.log.info("Test descriptor exports") + self.nodes[0].createwallet(wallet_name='desc_export', descriptors=True) + exp_rpc = self.nodes[0].get_wallet_rpc('desc_export') + self.nodes[0].createwallet(wallet_name='desc_import', disable_private_keys=True, descriptors=True) + imp_rpc = self.nodes[0].get_wallet_rpc('desc_import') + + addr_types = [('legacy', False, 'pkh(', '44\'/1\'/0\'', -13), + ('p2sh-segwit', False, 'sh(wpkh(', '49\'/1\'/0\'', -14), + ('bech32', False, 'wpkh(', '84\'/1\'/0\'', -13), + ('legacy', True, 'pkh(', '44\'/1\'/0\'', -13), + ('p2sh-segwit', True, 'sh(wpkh(', '49\'/1\'/0\'', -14), + ('bech32', True, 'wpkh(', '84\'/1\'/0\'', -13)] + + for addr_type, internal, desc_prefix, deriv_path, int_idx in addr_types: + int_str = 'internal' if internal else 'external' + + self.log.info("Testing descriptor address type for {} {}".format(addr_type, int_str)) + if internal: + addr = exp_rpc.getrawchangeaddress(address_type=addr_type) + else: + addr = exp_rpc.getnewaddress(address_type=addr_type) + desc = exp_rpc.getaddressinfo(addr)['parent_desc'] + assert_equal(desc_prefix, desc[0:len(desc_prefix)]) + idx = desc.index('/') + 1 + assert_equal(deriv_path, desc[idx:idx + 9]) + if internal: + assert_equal('1', desc[int_idx]) + else: + assert_equal('0', desc[int_idx]) + + self.log.info("Testing the same descriptor is returned for address type {} {}".format(addr_type, int_str)) + for i in range(0, 10): + if internal: + addr = exp_rpc.getrawchangeaddress(address_type=addr_type) + else: + addr = exp_rpc.getnewaddress(address_type=addr_type) + test_desc = exp_rpc.getaddressinfo(addr)['parent_desc'] + assert_equal(desc, test_desc) + + self.log.info("Testing import of exported {} descriptor".format(addr_type)) + imp_rpc.importdescriptors([{ + 'desc': desc, + 'active': True, + 'next_index': 11, + 'timestamp': 'now', + 'internal': internal + }]) + + for i in range(0, 10): + if internal: + exp_addr = exp_rpc.getrawchangeaddress(address_type=addr_type) + imp_addr = imp_rpc.getrawchangeaddress(address_type=addr_type) + else: + exp_addr = exp_rpc.getnewaddress(address_type=addr_type) + imp_addr = imp_rpc.getnewaddress(address_type=addr_type) + assert_equal(exp_addr, imp_addr) + if __name__ == '__main__': WalletDescriptorTest().main () diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py index 9835c5a2af..880341fdd9 100755 --- a/test/functional/wallet_send.py +++ b/test/functional/wallet_send.py @@ -8,6 +8,7 @@ from decimal import Decimal, getcontext from itertools import product from test_framework.authproxy import JSONRPCException +from test_framework.descriptors import descsum_create from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -168,49 +169,91 @@ class WalletSendTest(BitcoinTestFramework): self.nodes[1].createwallet(wallet_name="w1") w1 = self.nodes[1].get_wallet_rpc("w1") # w2 contains the private keys for w3 - self.nodes[1].createwallet(wallet_name="w2") + self.nodes[1].createwallet(wallet_name="w2", blank=True) w2 = self.nodes[1].get_wallet_rpc("w2") + xpriv = "tprv8ZgxMBicQKsPfHCsTwkiM1KT56RXbGGTqvc2hgqzycpwbHqqpcajQeMRZoBD35kW4RtyCemu6j34Ku5DEspmgjKdt2qe4SvRch5Kk8B8A2v" + xpub = "tpubD6NzVbkrYhZ4YkEfMbRJkQyZe7wTkbTNRECozCtJPtdLRn6cT1QKb8yHjwAPcAr26eHBFYs5iLiFFnCbwPRsncCKUKCfubHDMGKzMVcN1Jg" + if self.options.descriptors: + w2.importdescriptors([{ + "desc": descsum_create("wpkh(" + xpriv + "/0/0/*)"), + "timestamp": "now", + "range": [0, 100], + "active": True + },{ + "desc": descsum_create("wpkh(" + xpriv + "/0/1/*)"), + "timestamp": "now", + "range": [0, 100], + "active": True, + "internal": True + }]) + else: + w2.sethdseed(True) + # w3 is a watch-only wallet, based on w2 self.nodes[1].createwallet(wallet_name="w3", disable_private_keys=True) w3 = self.nodes[1].get_wallet_rpc("w3") - for _ in range(3): - a2_receive = w2.getnewaddress() - a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation - res = w3.importmulti([{ - "desc": w2.getaddressinfo(a2_receive)["desc"], + if self.options.descriptors: + # Match the privkeys in w2 for descriptors + res = w3.importdescriptors([{ + "desc": descsum_create("wpkh(" + xpub + "/0/0/*)"), "timestamp": "now", + "range": [0, 100], "keypool": True, + "active": True, "watchonly": True },{ - "desc": w2.getaddressinfo(a2_change)["desc"], + "desc": descsum_create("wpkh(" + xpub + "/0/1/*)"), "timestamp": "now", + "range": [0, 100], "keypool": True, + "active": True, "internal": True, "watchonly": True }]) assert_equal(res, [{"success": True}, {"success": True}]) - w0.sendtoaddress(a2_receive, 10) # fund w3 - self.nodes[0].generate(1) - self.sync_blocks() - - # w4 has private keys enabled, but only contains watch-only keys (from w2) - self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False) - w4 = self.nodes[1].get_wallet_rpc("w4") for _ in range(3): a2_receive = w2.getnewaddress() - res = w4.importmulti([{ - "desc": w2.getaddressinfo(a2_receive)["desc"], - "timestamp": "now", - "keypool": False, - "watchonly": True - }]) - assert_equal(res, [{"success": True}]) + if not self.options.descriptors: + # Because legacy wallets use exclusively hardened derivation, we can't do a ranged import like we do for descriptors + a2_change = w2.getrawchangeaddress() # doesn't actually use change derivation + res = w3.importmulti([{ + "desc": w2.getaddressinfo(a2_receive)["desc"], + "timestamp": "now", + "keypool": True, + "watchonly": True + },{ + "desc": w2.getaddressinfo(a2_change)["desc"], + "timestamp": "now", + "keypool": True, + "internal": True, + "watchonly": True + }]) + assert_equal(res, [{"success": True}, {"success": True}]) - w0.sendtoaddress(a2_receive, 10) # fund w4 + w0.sendtoaddress(a2_receive, 10) # fund w3 self.nodes[0].generate(1) self.sync_blocks() + if not self.options.descriptors: + # w4 has private keys enabled, but only contains watch-only keys (from w2) + # This is legacy wallet behavior only as descriptor wallets don't allow watchonly and non-watchonly things in the same wallet. + self.nodes[1].createwallet(wallet_name="w4", disable_private_keys=False) + w4 = self.nodes[1].get_wallet_rpc("w4") + for _ in range(3): + a2_receive = w2.getnewaddress() + res = w4.importmulti([{ + "desc": w2.getaddressinfo(a2_receive)["desc"], + "timestamp": "now", + "keypool": False, + "watchonly": True + }]) + assert_equal(res, [{"success": True}]) + + w0.sendtoaddress(a2_receive, 10) # fund w4 + self.nodes[0].generate(1) + self.sync_blocks() + self.log.info("Send to address...") self.test_send(from_wallet=w0, to_wallet=w1, amount=1) self.test_send(from_wallet=w0, to_wallet=w1, amount=1, add_to_wallet=True) @@ -241,11 +284,15 @@ class WalletSendTest(BitcoinTestFramework): res = w2.walletprocesspsbt(res["psbt"]) assert res["complete"] - self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...") - self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds")) - res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False) - res = w2.walletprocesspsbt(res["psbt"]) - assert res["complete"] + if not self.options.descriptors: + # Descriptor wallets do not allow mixed watch-only and non-watch-only things in the same wallet. + # This is specifically testing that w4 ignores its own private keys and creates a psbt with send + # which is not something that needs to be tested in descriptor wallets. + self.log.info("Create PSBT from wallet w4 with watch-only keys, sign with w2...") + self.test_send(from_wallet=w4, to_wallet=w1, amount=1, expect_error=(-4, "Insufficient funds")) + res = self.test_send(from_wallet=w4, to_wallet=w1, amount=1, include_watching=True, add_to_wallet=False) + res = w2.walletprocesspsbt(res["psbt"]) + assert res["complete"] self.log.info("Create OP_RETURN...") self.test_send(from_wallet=w0, to_wallet=w1, amount=1) diff --git a/test/functional/wallet_upgradewallet.py b/test/functional/wallet_upgradewallet.py index d0bb6135a8..fbc0f995d2 100755 --- a/test/functional/wallet_upgradewallet.py +++ b/test/functional/wallet_upgradewallet.py @@ -57,6 +57,7 @@ class UpgradeWalletTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() + self.skip_if_no_bdb() self.skip_if_no_previous_releases() def setup_network(self): diff --git a/test/functional/wallet_watchonly.py b/test/functional/wallet_watchonly.py index b0c41b2738..c345c382d0 100755 --- a/test/functional/wallet_watchonly.py +++ b/test/functional/wallet_watchonly.py @@ -2,7 +2,7 @@ # Copyright (c) 2018-2019 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test createwallet arguments. +"""Test createwallet watchonly arguments. """ from test_framework.test_framework import BitcoinTestFramework @@ -14,7 +14,6 @@ from test_framework.util import ( class CreateWalletWatchonlyTest(BitcoinTestFramework): def set_test_params(self): - self.setup_clean_chain = False self.num_nodes = 1 def skip_test_if_missing_module(self): @@ -50,6 +49,11 @@ class CreateWalletWatchonlyTest(BitcoinTestFramework): assert_equal(len(wo_wallet.listtransactions()), 1) assert_equal(wo_wallet.getbalance(include_watchonly=False), 0) + self.log.info('Test sending from a watch-only wallet raises RPC error') + msg = "Error: Private keys are disabled for this wallet" + assert_raises_rpc_error(-4, msg, wo_wallet.sendtoaddress, a1, 0.1) + assert_raises_rpc_error(-4, msg, wo_wallet.sendmany, amounts={a1: 0.1}) + self.log.info('Testing listreceivedbyaddress watch-only defaults') result = wo_wallet.listreceivedbyaddress() assert_equal(len(result), 1) diff --git a/test/fuzz/test_runner.py b/test/fuzz/test_runner.py index aa0aa11d15..611061072f 100755 --- a/test/fuzz/test_runner.py +++ b/test/fuzz/test_runner.py @@ -225,6 +225,8 @@ def merge_inputs(*, fuzz_pool, corpus, test_list, src_dir, build_dir, merge_dir) args = [ os.path.join(build_dir, 'src', 'test', 'fuzz', 'fuzz'), '-merge=1', + '-shuffle=0', + '-prefer_small=1', '-use_value_profile=1', # Also done by oss-fuzz https://github.com/google/oss-fuzz/issues/1406#issuecomment-387790487 os.path.join(corpus, t), os.path.join(merge_dir, t), diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh index c4ad00e954..0b15f99448 100755 --- a/test/lint/lint-circular-dependencies.sh +++ b/test/lint/lint-circular-dependencies.sh @@ -11,6 +11,7 @@ export LC_ALL=C EXPECTED_CIRCULAR_DEPENDENCIES=( "chainparamsbase -> util/system -> chainparamsbase" "index/txindex -> validation -> index/txindex" + "index/blockfilterindex -> validation -> index/blockfilterindex" "policy/fees -> txmempool -> policy/fees" "qt/addresstablemodel -> qt/walletmodel -> qt/addresstablemodel" "qt/bitcoingui -> qt/walletframe -> qt/bitcoingui" @@ -20,6 +21,7 @@ EXPECTED_CIRCULAR_DEPENDENCIES=( "txmempool -> validation -> txmempool" "wallet/fees -> wallet/wallet -> wallet/fees" "wallet/wallet -> wallet/walletdb -> wallet/wallet" + "node/coinstats -> validation -> node/coinstats" ) EXIT_CODE=0 diff --git a/test/lint/lint-includes.sh b/test/lint/lint-includes.sh index 6623f9ce4c..bf7aeb5b4f 100755 --- a/test/lint/lint-includes.sh +++ b/test/lint/lint-includes.sh @@ -60,15 +60,11 @@ EXPECTED_BOOST_INCLUDES=( boost/multi_index/ordered_index.hpp boost/multi_index/sequenced_index.hpp boost/multi_index_container.hpp - boost/preprocessor/cat.hpp - boost/preprocessor/stringize.hpp boost/process.hpp boost/signals2/connection.hpp boost/signals2/optional_last_value.hpp boost/signals2/signal.hpp boost/test/unit_test.hpp - boost/thread/lock_types.hpp - boost/thread/shared_mutex.hpp ) for BOOST_INCLUDE in $(git grep '^#include <boost/' -- "*.cpp" "*.h" | cut -f2 -d: | cut -f2 -d'<' | cut -f1 -d'>' | sort -u); do diff --git a/test/lint/lint-python-dead-code.sh b/test/lint/lint-python-dead-code.sh new file mode 100755 index 0000000000..c3b6ff3c98 --- /dev/null +++ b/test/lint/lint-python-dead-code.sh @@ -0,0 +1,23 @@ +#!/usr/bin/env bash +# +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +# +# Find dead Python code. + +export LC_ALL=C + +if ! command -v vulture > /dev/null; then + echo "Skipping Python dead code linting since vulture is not installed. Install by running \"pip3 install vulture\"" + exit 0 +fi + +# --min-confidence 100 will only report code that is guaranteed to be unused within the analyzed files. +# Any value below 100 introduces the risk of false positives, which would create an unacceptable maintenance burden. +if ! vulture \ + --min-confidence 100 \ + $(git ls-files -- "*.py"); then + echo "Python dead code detection found some issues" + exit 1 +fi diff --git a/test/sanitizer_suppressions/tsan b/test/sanitizer_suppressions/tsan index 3a04418e8b..3fc9fac25c 100644 --- a/test/sanitizer_suppressions/tsan +++ b/test/sanitizer_suppressions/tsan @@ -28,6 +28,7 @@ race:BerkeleyBatch race:BerkeleyDatabase race:DatabaseBatch race:leveldb::DBImpl::DeleteObsoleteFiles +race:validation_chainstatemanager_tests race:zmq::* race:bitcoin-qt |