diff options
235 files changed, 4033 insertions, 2024 deletions
diff --git a/.appveyor.yml b/.appveyor.yml index fb95876c36..3ca7818eca 100644 --- a/.appveyor.yml +++ b/.appveyor.yml @@ -7,9 +7,9 @@ clone_depth: 5 environment: PATH: 'C:\Python37-x64;C:\Python37-x64\Scripts;%PATH%' PYTHONUTF8: 1 - QT_DOWNLOAD_URL: 'https://github.com/sipsorcery/qt_win_binary/releases/download/qt51210x64_vs2019_1694/Qt5.12.10_x64_static_vs2019_1694.zip' - QT_DOWNLOAD_HASH: '3035a1307e8302bb3a76eba9bb3102979f945ab4022cc3bc2e1583edd44bdc99' - QT_LOCAL_PATH: 'C:\Qt5.12.10_x64_static_vs2019_1694' + QT_DOWNLOAD_URL: 'https://github.com/sipsorcery/qt_win_binary/releases/download/qt51211x64_static_vs2019_16101/Qt5.12.11_x64_static_vs2019_16101.zip' + QT_DOWNLOAD_HASH: 'cf1b58107fadbf0d9a957d14dab16cde6b6eb6936a1908472da1f967dda34a3a' + QT_LOCAL_PATH: 'C:\Qt5.12.11_x64_static_vs2019_16101' VCPKG_TAG: '75522bb1f2e7d863078bcd06322348f053a9e33f' install: # Disable zmq test for now since python zmq library on Windows would cause Access violation sometimes. @@ -57,8 +57,6 @@ /src/util/settings.* @ryanofsky # Fuzzing -/src/test/fuzz/ @practicalswift -/doc/fuzzing.md @practicalswift # Tests /src/test/net_peer_eviction_tests.cpp @jonatack @@ -117,7 +115,6 @@ /src/dbwrapper.* @jamesob # Linter -/test/lint/ @practicalswift /test/lint/lint-shell.sh @hebasto # Bech32 diff --git a/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj b/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj index 65ce1ee9da..a697c1dfb6 100644 --- a/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj +++ b/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj @@ -55,6 +55,7 @@ <AdditionalIncludeDirectories>$(QtIncludes);%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories> </ClCompile> <Link> + <SubSystem>Windows</SubSystem> <AdditionalDependencies>$(QtReleaseLibraries);%(AdditionalDependencies)</AdditionalDependencies> <AdditionalOptions>/ignore:4206 /LTCG:OFF</AdditionalOptions> </Link> diff --git a/build_msvc/bitcoin-util/bitcoin-util.vcxproj b/build_msvc/bitcoin-util/bitcoin-util.vcxproj new file mode 100644 index 0000000000..3a6aa4a837 --- /dev/null +++ b/build_msvc/bitcoin-util/bitcoin-util.vcxproj @@ -0,0 +1,37 @@ +<?xml version="1.0" encoding="utf-8"?> +<Project DefaultTargets="Build" xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> + <Import Project="..\common.init.vcxproj" /> + <PropertyGroup Label="Globals"> + <ProjectGuid>{D3022AF6-AD33-4CE3-B358-87CB6A1B29CF}</ProjectGuid> + </PropertyGroup> + <PropertyGroup Label="Configuration"> + <ConfigurationType>Application</ConfigurationType> + <OutDir>$(SolutionDir)$(Platform)\$(Configuration)\</OutDir> + </PropertyGroup> + <ItemGroup> + <ClCompile Include="..\..\src\bitcoin-util.cpp" /> + </ItemGroup> + <ItemGroup> + <ProjectReference Include="..\libbitcoinconsensus\libbitcoinconsensus.vcxproj"> + <Project>{2b384fa8-9ee1-4544-93cb-0d733c25e8ce}</Project> + </ProjectReference> + <ProjectReference Include="..\libbitcoin_common\libbitcoin_common.vcxproj"> + <Project>{7c87e378-df58-482e-aa2f-1bc129bc19ce}</Project> + </ProjectReference> + <ProjectReference Include="..\libbitcoin_crypto\libbitcoin_crypto.vcxproj"> + <Project>{6190199c-6cf4-4dad-bfbd-93fa72a760c1}</Project> + </ProjectReference> + <ProjectReference Include="..\libbitcoin_util\libbitcoin_util.vcxproj"> + <Project>{b53a5535-ee9d-4c6f-9a26-f79ee3bc3754}</Project> + </ProjectReference> + <ProjectReference Include="..\libunivalue\libunivalue.vcxproj"> + <Project>{5724ba7d-a09a-4ba8-800b-c4c1561b3d69}</Project> + </ProjectReference> + <ProjectReference Include="..\libsecp256k1\libsecp256k1.vcxproj"> + <Project>{bb493552-3b8c-4a8c-bf69-a6e7a51d2ea6}</Project> + </ProjectReference> + </ItemGroup> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" /> + <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" /> + <Import Project="..\common.vcxproj" /> +</Project> diff --git a/build_msvc/bitcoin.sln b/build_msvc/bitcoin.sln index 5e9715451f..7d8591c10b 100644 --- a/build_msvc/bitcoin.sln +++ b/build_msvc/bitcoin.sln @@ -32,6 +32,8 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bench_bitcoin", "bench_bitc EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoin-tx", "bitcoin-tx\bitcoin-tx.vcxproj", "{D3022AF6-AD33-4CE3-B358-87CB6A1B29CF}" EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoin-util", "bitcoin-util\bitcoin-util.vcxproj", "{D3022AF6-AD33-4CE3-B358-87CB6A1B29CF}" +EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "bitcoin-wallet", "bitcoin-wallet\bitcoin-wallet.vcxproj", "{84DE8790-EDE3-4483-81AC-C32F15E861F4}" EndProject Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_wallet_tool", "libbitcoin_wallet_tool\libbitcoin_wallet_tool.vcxproj", "{F91AC55E-6F5E-4C58-9AC5-B40DB7DEEF93}" diff --git a/build_msvc/common.qt.init.vcxproj b/build_msvc/common.qt.init.vcxproj index 68ad06c4ac..ce66a7ab34 100644 --- a/build_msvc/common.qt.init.vcxproj +++ b/build_msvc/common.qt.init.vcxproj @@ -2,7 +2,7 @@ <Project xmlns="http://schemas.microsoft.com/developer/msbuild/2003"> <PropertyGroup Label="QtGlobals"> - <QtBaseDir>C:\Qt5.12.10_x64_static_vs2019_1694</QtBaseDir> + <QtBaseDir>C:\Qt5.12.11_x64_static_vs2019_16101</QtBaseDir> <QtPluginsLibraryDir>$(QtBaseDir)\plugins</QtPluginsLibraryDir> <QtLibraryDir>$(QtBaseDir)\lib</QtLibraryDir> <QtIncludeDir>$(QtBaseDir)\include</QtIncludeDir> diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh index 07f099b85c..8d2b70e549 100755 --- a/ci/test/00_setup_env_arm.sh +++ b/ci/test/00_setup_env_arm.sh @@ -25,4 +25,4 @@ export RUN_FUNCTIONAL_TESTS=false export GOAL="install" # -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1" # This could be removed once the ABI change warning does not show up by default -export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi --enable-external-signer" +export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi" diff --git a/ci/test/00_setup_env_i686_centos.sh b/ci/test/00_setup_env_i686_centos.sh index 05c724fc0b..2ddb932907 100755 --- a/ci/test/00_setup_env_i686_centos.sh +++ b/ci/test/00_setup_env_i686_centos.sh @@ -11,6 +11,6 @@ export CONTAINER_NAME=ci_i686_centos_8 export DOCKER_NAME_TAG=centos:8 export DOCKER_PACKAGES="gcc-c++ glibc-devel.x86_64 libstdc++-devel.x86_64 glibc-devel.i686 libstdc++-devel.i686 ccache libtool make git python3 python3-zmq which patch lbzip2 dash rsync coreutils bison" export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports --enable-external-signer" +export BITCOIN_CONFIG="--enable-zmq --with-gui=qt5 --enable-reduce-exports" export CONFIG_SHELL="/bin/dash" export TEST_RUNNER_ENV="LC_ALL=en_US.UTF-8" diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh index 196394e908..73ac09c1de 100755 --- a/ci/test/00_setup_env_mac.sh +++ b/ci/test/00_setup_env_mac.sh @@ -15,4 +15,4 @@ export XCODE_BUILD_ID=12A7403 export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" -export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-external-signer" +export BITCOIN_CONFIG="--with-gui --enable-reduce-exports" diff --git a/ci/test/00_setup_env_mac_host.sh b/ci/test/00_setup_env_mac_host.sh index 898c1530a1..c0d951a041 100755 --- a/ci/test/00_setup_env_mac_host.sh +++ b/ci/test/00_setup_env_mac_host.sh @@ -9,7 +9,7 @@ export LC_ALL=C.UTF-8 export HOST=x86_64-apple-darwin18 export PIP_PACKAGES="zmq lief" export GOAL="install" -export BITCOIN_CONFIG="--with-gui --enable-reduce-exports --enable-external-signer" +export BITCOIN_CONFIG="--with-gui --enable-reduce-exports" export CI_OS_NAME="macos" export NO_DEPENDS=1 export OSX_SDK="" diff --git a/ci/test/00_setup_env_native_asan.sh b/ci/test/00_setup_env_native_asan.sh index 92af98aa9b..ab185b6e71 100755 --- a/ci/test/00_setup_env_native_asan.sh +++ b/ci/test/00_setup_env_native_asan.sh @@ -11,4 +11,4 @@ export PACKAGES="clang llvm python3-zmq qtbase5-dev qttools5-dev-tools libevent- export DOCKER_NAME_TAG=ubuntu:hirsute export NO_DEPENDS=1 export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++ --enable-external-signer" +export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=qt5 CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' --with-sanitizers=address,integer,undefined CC=clang CXX=clang++" diff --git a/ci/test/00_setup_env_native_fuzz.sh b/ci/test/00_setup_env_native_fuzz.sh index bedd0cf9aa..58388fa928 100755 --- a/ci/test/00_setup_env_native_fuzz.sh +++ b/ci/test/00_setup_env_native_fuzz.sh @@ -14,5 +14,5 @@ export RUN_UNIT_TESTS=false export RUN_FUNCTIONAL_TESTS=false export RUN_FUZZ_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined,integer CC=clang CXX=clang++ --enable-external-signer" +export BITCOIN_CONFIG="--enable-fuzz --with-sanitizers=fuzzer,address,undefined,integer CC=clang CXX=clang++" export CCACHE_SIZE=200M diff --git a/ci/test/00_setup_env_native_multiprocess.sh b/ci/test/00_setup_env_native_multiprocess.sh index 37d714400b..1418dfbc51 100755 --- a/ci/test/00_setup_env_native_multiprocess.sh +++ b/ci/test/00_setup_env_native_multiprocess.sh @@ -11,7 +11,7 @@ export DOCKER_NAME_TAG=ubuntu:20.04 export PACKAGES="cmake python3 python3-pip llvm clang" export DEP_OPTS="DEBUG=1 MULTIPROCESS=1" export GOAL="install" -export BITCOIN_CONFIG="--enable-external-signer --enable-debug CC=clang CXX=clang++" # Use clang to avoid OOM +export BITCOIN_CONFIG="--enable-debug CC=clang CXX=clang++" # Use clang to avoid OOM export TEST_RUNNER_ENV="BITCOIND=bitcoin-node" export RUN_SECURITY_TESTS="true" export PIP_PACKAGES="lief" diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh index a496b5af6e..d167c9198a 100755 --- a/ci/test/00_setup_env_native_nowallet.sh +++ b/ci/test/00_setup_env_native_nowallet.sh @@ -11,4 +11,4 @@ export DOCKER_NAME_TAG=ubuntu:18.04 # Use bionic to have one config run the tes export PACKAGES="python3-zmq clang-5.0 llvm-5.0" # Use clang-5 to test C++17 compatibility, see doc/dependencies.md export DEP_OPTS="NO_WALLET=1" export GOAL="install" -export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-5.0 CXX=clang++-5.0 --enable-external-signer" +export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-5.0 CXX=clang++-5.0" diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh index 61948ab221..9c57eba62a 100755 --- a/ci/test/00_setup_env_native_qt5.sh +++ b/ci/test/00_setup_env_native_qt5.sh @@ -16,4 +16,4 @@ export RUN_UNIT_TESTS="false" export GOAL="install" export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.2 v0.18.1 v0.19.1" export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports ---enable-debug --disable-fuzz-binary CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\" --enable-external-signer" +--enable-debug --disable-fuzz-binary CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\"" diff --git a/ci/test/00_setup_env_native_tsan.sh b/ci/test/00_setup_env_native_tsan.sh index 33f63fa9ba..a5082bdaab 100755 --- a/ci/test/00_setup_env_native_tsan.sh +++ b/ci/test/00_setup_env_native_tsan.sh @@ -11,4 +11,4 @@ export DOCKER_NAME_TAG=ubuntu:hirsute export PACKAGES="clang llvm libc++abi-dev libc++-dev python3-zmq" export DEP_OPTS="CC=clang CXX='clang++ -stdlib=libc++'" export GOAL="install" -export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++' --enable-external-signer" +export BITCOIN_CONFIG="--enable-zmq --with-gui=no CPPFLAGS='-DARENA_DEBUG -DDEBUG_LOCKORDER' CXXFLAGS='-g' --with-sanitizers=thread CC=clang CXX='clang++ -stdlib=libc++'" diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh index 88b431f3c7..51a0fd9117 100755 --- a/ci/test/00_setup_env_s390x.sh +++ b/ci/test/00_setup_env_s390x.sh @@ -23,4 +23,4 @@ export RUN_UNIT_TESTS=true export TEST_RUNNER_ENV="LC_ALL=C" export RUN_FUNCTIONAL_TESTS=true export GOAL="install" -export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb --enable-external-signer" +export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb" diff --git a/configure.ac b/configure.ac index aaa932b093..88f91004af 100644 --- a/configure.ac +++ b/configure.ac @@ -333,9 +333,9 @@ AC_ARG_ENABLE([werror], [enable_werror=no]) AC_ARG_ENABLE([external-signer], - [AS_HELP_STRING([--enable-external-signer],[compile external signer support (default is no, requires Boost::Process)])], + [AS_HELP_STRING([--enable-external-signer],[compile external signer support (default is yes, requires Boost::Process)])], [use_external_signer=$enableval], - [use_external_signer=no]) + [use_external_signer=yes]) AC_LANG_PUSH([C++]) @@ -475,7 +475,9 @@ if test "x$CXXFLAGS_overridden" = "xno"; then AX_CHECK_COMPILE_FLAG([-Wself-assign],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-self-assign"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wunused-local-typedef],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-unused-local-typedef"],,[[$CXXFLAG_WERROR]]) AX_CHECK_COMPILE_FLAG([-Wimplicit-fallthrough],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-implicit-fallthrough"],,[[$CXXFLAG_WERROR]]) - AX_CHECK_COMPILE_FLAG([-Wdeprecated-copy],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-deprecated-copy"],,[[$CXXFLAG_WERROR]]) + if test x$suppress_external_warnings != xyes ; then + AX_CHECK_COMPILE_FLAG([-Wdeprecated-copy],[NOWARN_CXXFLAGS="$NOWARN_CXXFLAGS -Wno-deprecated-copy"],,[[$CXXFLAG_WERROR]]) + fi fi dnl Don't allow extended (non-ASCII) symbols in identifiers. This is easier for code review. @@ -1357,13 +1359,15 @@ if test x$enable_wallet != xno; then fi if test x$use_ebpf != xno; then - AC_CHECK_HEADER([sys/sdt.h], [have_sdt=yes], [have_sdt=no]) -else - have_sdt=no -fi - -if test x$have_sdt = xyes; then - AC_DEFINE([ENABLE_TRACING], [1], [Define to 1 to enable eBPF user static defined tracepoints]) + AC_MSG_CHECKING([whether eBPF tracepoints are supported]) + AC_COMPILE_IFELSE([ + AC_LANG_PROGRAM( + [#include <sys/sdt.h>], + [DTRACE_PROBE("context", "event");] + )], + [AC_MSG_RESULT(yes); have_sdt=yes; AC_DEFINE([ENABLE_TRACING], [1], [Define to 1 to enable eBPF user static defined tracepoints])], + [AC_MSG_RESULT(no); have_sdt=no;] + ) fi dnl Check for libminiupnpc (optional) @@ -1411,37 +1415,23 @@ fi if test x$use_boost = xyes; then dnl Check for Boost headers - AX_BOOST_BASE([1.58.0],[],[AC_MSG_ERROR([Boost is not available!])]) + AX_BOOST_BASE([1.64.0],[],[AC_MSG_ERROR([Boost is not available!])]) if test x$want_boost = xno; then AC_MSG_ERROR([[only libbitcoinconsensus can be built without boost]]) fi AX_BOOST_SYSTEM AX_BOOST_FILESYSTEM - dnl Opt-in to Boost Process if external signer support is requested - if test "x$use_external_signer" != xno; then - AC_MSG_CHECKING(for Boost Process) - AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <boost/process.hpp>]], - [[ boost::process::child* child = new boost::process::child; delete child; ]])], - [ AC_MSG_RESULT(yes) - AC_DEFINE([ENABLE_EXTERNAL_SIGNER],,[define if external signer support is enabled]) - ], - [ AC_MSG_ERROR([Boost::Process is required for external signer support, but not available!])] - ) - fi - if test x$suppress_external_warnings != xno; then BOOST_CPPFLAGS=SUPPRESS_WARNINGS($BOOST_CPPFLAGS) fi - dnl Boost 1.56 through 1.62 allow using std::atomic instead of its own atomic - dnl counter implementations. In 1.63 and later the std::atomic approach is default. - m4_pattern_allow(DBOOST_AC_USE_STD_ATOMIC) dnl otherwise it's treated like a macro - BOOST_CPPFLAGS="-DBOOST_SP_USE_STD_ATOMIC -DBOOST_AC_USE_STD_ATOMIC $BOOST_CPPFLAGS" - BOOST_LIBS="$BOOST_LDFLAGS $BOOST_SYSTEM_LIB $BOOST_FILESYSTEM_LIB" fi +if test "x$use_external_signer" != xno; then + AC_DEFINE([ENABLE_EXTERNAL_SIGNER],,[define if external signer support is enabled]) +fi AM_CONDITIONAL([ENABLE_EXTERNAL_SIGNER], [test "x$use_external_signer" = "xyes"]) dnl Check for reduced exports diff --git a/contrib/devtools/pixie.py b/contrib/devtools/pixie.py index 8cf06a799a..64660968ad 100644 --- a/contrib/devtools/pixie.py +++ b/contrib/devtools/pixie.py @@ -217,7 +217,7 @@ def _parse_verneed(section: Section, strings: bytes, eh: ELFHeader) -> Dict[int, result = {} while True: verneed = Verneed(data, ofs, eh) - aofs = verneed.vn_aux + aofs = ofs + verneed.vn_aux while True: vernaux = Vernaux(data, aofs, eh, strings) result[vernaux.vna_other] = vernaux.name diff --git a/contrib/devtools/symbol-check.py b/contrib/devtools/symbol-check.py index d740a94560..56e4313d78 100755 --- a/contrib/devtools/symbol-check.py +++ b/contrib/devtools/symbol-check.py @@ -41,8 +41,16 @@ import pixie # MAX_VERSIONS = { 'GCC': (4,8,0), -'GLIBC': (2,17), -'LIBATOMIC': (1,0) +'GLIBC': { + pixie.EM_386: (2,17), + pixie.EM_X86_64: (2,17), + pixie.EM_ARM: (2,17), + pixie.EM_AARCH64:(2,17), + pixie.EM_PPC64: (2,17), + pixie.EM_RISCV: (2,27), +}, +'LIBATOMIC': (1,0), +'V': (0,5,0), # xkb (bitcoin-qt only) } # See here for a description of _IO_stdin_used: # https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=634261#109 @@ -78,14 +86,6 @@ ELF_ALLOWED_LIBRARIES = { 'libfreetype.so.6', # font parsing 'libdl.so.2' # programming interface to dynamic linker } -ARCH_MIN_GLIBC_VER = { -pixie.EM_386: (2,1), -pixie.EM_X86_64: (2,2,5), -pixie.EM_ARM: (2,4), -pixie.EM_AARCH64:(2,17), -pixie.EM_PPC64: (2,17), -pixie.EM_RISCV: (2,27) -} MACHO_ALLOWED_LIBRARIES = { # bitcoind and bitcoin-qt @@ -161,7 +161,10 @@ def check_version(max_versions, version, arch) -> bool: ver = tuple([int(x) for x in ver.split('.')]) if not lib in max_versions: return False - return ver <= max_versions[lib] or lib == 'GLIBC' and ver <= ARCH_MIN_GLIBC_VER[arch] + if isinstance(max_versions[lib], tuple): + return ver <= max_versions[lib] + else: + return ver <= max_versions[lib][arch] def check_imported_symbols(filename) -> bool: elf = pixie.load(filename) @@ -212,6 +215,18 @@ def check_MACHO_libraries(filename) -> bool: ok = False return ok +def check_MACHO_min_os(filename) -> bool: + binary = lief.parse(filename) + if binary.build_version.minos == [10,14,0]: + return True + return False + +def check_MACHO_sdk(filename) -> bool: + binary = lief.parse(filename) + if binary.build_version.sdk == [10, 15, 6]: + return True + return False + def check_PE_libraries(filename) -> bool: ok: bool = True binary = lief.parse(filename) @@ -221,6 +236,14 @@ def check_PE_libraries(filename) -> bool: ok = False return ok +def check_PE_subsystem_version(filename) -> bool: + binary = lief.parse(filename) + major: int = binary.optional_header.major_subsystem_version + minor: int = binary.optional_header.minor_subsystem_version + if major == 6 and minor == 1: + return True + return False + CHECKS = { 'ELF': [ ('IMPORTED_SYMBOLS', check_imported_symbols), @@ -228,10 +251,13 @@ CHECKS = { ('LIBRARY_DEPENDENCIES', check_ELF_libraries) ], 'MACHO': [ - ('DYNAMIC_LIBRARIES', check_MACHO_libraries) + ('DYNAMIC_LIBRARIES', check_MACHO_libraries), + ('MIN_OS', check_MACHO_min_os), + ('SDK', check_MACHO_sdk), ], 'PE' : [ - ('DYNAMIC_LIBRARIES', check_PE_libraries) + ('DYNAMIC_LIBRARIES', check_PE_libraries), + ('SUBSYSTEM_VERSION', check_PE_subsystem_version), ] } diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py index 106dfd2c5a..6ce2fa3560 100755 --- a/contrib/devtools/test-symbol-check.py +++ b/contrib/devtools/test-symbol-check.py @@ -98,7 +98,7 @@ class TestSymbolChecks(unittest.TestCase): self.assertEqual(call_symbol_check(cc, source, executable, ['-lexpat']), (1, 'libexpat.1.dylib is not in ALLOWED_LIBRARIES!\n' + - executable + ': failed DYNAMIC_LIBRARIES')) + f'{executable}: failed DYNAMIC_LIBRARIES MIN_OS SDK')) source = 'test2.c' executable = 'test2' @@ -114,7 +114,20 @@ class TestSymbolChecks(unittest.TestCase): ''') self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics']), - (0, '')) + (1, f'{executable}: failed MIN_OS SDK')) + + source = 'test3.c' + executable = 'test3' + with open(source, 'w', encoding="utf8") as f: + f.write(''' + int main() + { + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-mmacosx-version-min=10.14']), + (1, f'{executable}: failed SDK')) def test_PE(self): source = 'test1.c' @@ -132,12 +145,26 @@ class TestSymbolChecks(unittest.TestCase): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-lpdh']), + self.assertEqual(call_symbol_check(cc, source, executable, ['-lpdh', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), (1, 'pdh.dll is not in ALLOWED_LIBRARIES!\n' + executable + ': failed DYNAMIC_LIBRARIES')) source = 'test2.c' executable = 'test2.exe' + + with open(source, 'w', encoding="utf8") as f: + f.write(''' + int main() + { + return 0; + } + ''') + + self.assertEqual(call_symbol_check(cc, source, executable, ['-Wl,--major-subsystem-version', '-Wl,9', '-Wl,--minor-subsystem-version', '-Wl,9']), + (1, executable + ': failed SUBSYSTEM_VERSION')) + + source = 'test3.c' + executable = 'test3.exe' with open(source, 'w', encoding="utf8") as f: f.write(''' #include <windows.h> @@ -149,7 +176,7 @@ class TestSymbolChecks(unittest.TestCase): } ''') - self.assertEqual(call_symbol_check(cc, source, executable, ['-lole32']), + self.assertEqual(call_symbol_check(cc, source, executable, ['-lole32', '-Wl,--major-subsystem-version', '-Wl,6', '-Wl,--minor-subsystem-version', '-Wl,1']), (0, '')) diff --git a/contrib/gitian-descriptors/gitian-linux.yml b/contrib/gitian-descriptors/gitian-linux.yml index bed3531720..e6dce7a8c6 100644 --- a/contrib/gitian-descriptors/gitian-linux.yml +++ b/contrib/gitian-descriptors/gitian-linux.yml @@ -99,7 +99,7 @@ script: | done } - pip3 install lief==0.11.4 + pip3 install lief==0.11.5 # Faketime for depends so intermediate results are comparable export PATH_orig=${PATH} diff --git a/contrib/gitian-descriptors/gitian-osx-signer.yml b/contrib/gitian-descriptors/gitian-osx-signer.yml index 3f0c0c3332..addad0a5d2 100644 --- a/contrib/gitian-descriptors/gitian-osx-signer.yml +++ b/contrib/gitian-descriptors/gitian-osx-signer.yml @@ -14,7 +14,7 @@ remotes: "dir": "signature" - "url": "https://github.com/achow101/signapple.git" "dir": "signapple" - "commit": "c7e73aa27a7615ac9506559173f787e2906b25eb" + "commit": "b084cbbf44d5330448ffce0c7d118f75781b64bd" files: - "bitcoin-osx-unsigned.tar.gz" script: | diff --git a/contrib/gitian-descriptors/gitian-osx.yml b/contrib/gitian-descriptors/gitian-osx.yml index 1d4506e3c2..a39618adb7 100644 --- a/contrib/gitian-descriptors/gitian-osx.yml +++ b/contrib/gitian-descriptors/gitian-osx.yml @@ -78,7 +78,7 @@ script: | done } - pip3 install lief==0.11.4 + pip3 install lief==0.11.5 # Faketime for depends so intermediate results are comparable export PATH_orig=${PATH} diff --git a/contrib/gitian-descriptors/gitian-win.yml b/contrib/gitian-descriptors/gitian-win.yml index 03eba71366..ffe228a032 100644 --- a/contrib/gitian-descriptors/gitian-win.yml +++ b/contrib/gitian-descriptors/gitian-win.yml @@ -86,7 +86,7 @@ script: | done } - pip3 install lief==0.11.4 + pip3 install lief==0.11.5 # Faketime for depends so intermediate results are comparable export PATH_orig=${PATH} diff --git a/contrib/guix/guix-attest b/contrib/guix/guix-attest index 081d1c0465..c8cf73d400 100755 --- a/contrib/guix/guix-attest +++ b/contrib/guix/guix-attest @@ -99,24 +99,34 @@ fi # We should be able to find at least one output ################ -echo "Looking for build output directories in ${OUTDIR_BASE}" +echo "Looking for build output SHA256SUMS fragments in ${OUTDIR_BASE}" shopt -s nullglob -OUTDIRS=( "${OUTDIR_BASE}"/* ) # This expands to an array of directories... +sha256sum_fragments=( "$OUTDIR_BASE"/*/SHA256SUMS.part ) # This expands to an array of directories... shopt -u nullglob -if (( ${#OUTDIRS[@]} )); then - echo "Found build output directories:" - for outdir in "${OUTDIRS[@]}"; do +noncodesigned_fragments=() +codesigned_fragments=() + +if (( ${#sha256sum_fragments[@]} )); then + echo "Found build output SHA256SUMS fragments:" + for outdir in "${sha256sum_fragments[@]}"; do echo " '$outdir'" + case "$outdir" in + "$OUTDIR_BASE"/*-codesigned/SHA256SUMS.part) + codesigned_fragments+=("$outdir") + ;; + *) + noncodesigned_fragments+=("$outdir") + ;; + esac done echo else - echo "ERR: Could not find any build output directories in ${OUTDIR_BASE}" + echo "ERR: Could not find any build output SHA256SUMS fragments in ${OUTDIR_BASE}" exit 1 fi - ############## ## Attest ## ############## @@ -126,82 +136,105 @@ fi # HOST: The output directory being attested # out_name() { - basename "$1" + basename "$(dirname "$1")" } -# Usage: out_sig_dir $outdir -# -# outdir: The output directory being attested -# -out_sig_dir() { - echo "$GUIX_SIGS_REPO/$VERSION/$(out_name "$1")/$signer_name" -} +shasum_already_exists() { +cat <<EOF +-- -# Accumulate a list of signature directories that already exist... -outdirs_already_attested_to=() +ERR: An ${1} file already exists for '${VERSION}' and attests + differently. You likely previously attested to a partial build (e.g. one + where you specified the HOST environment variable). -echo "Attesting to build outputs for version: '${VERSION}'" -echo "" + See the diff above for more context. -# MAIN LOGIC: Loop through each output for VERSION and attest to output in -# GUIX_SIGS_REPO as SIGNER, if attestation does not exist -for outdir in "${OUTDIRS[@]}"; do - if [ -e "${outdir}/SKIPATTEST.TAG" ]; then - echo "${outname}: SKIPPING: Output directory marked with SKIPATTEST.TAG file" - continue - fi - outname="$(out_name "$outdir")" - outsigdir="$(out_sig_dir "$outdir")" - if [ -e "$outsigdir" ]; then - echo "${outname}: SKIPPING: Signature directory already exists in the specified guix.sigs repository" - outdirs_already_attested_to+=("$outdir") - else - # Clean up incomplete sigdir if something fails (likely gpg) - trap 'rm -rf "$outsigdir"' ERR +Hint: You may wish to remove the existing attestations and their signatures by + invoking: - mkdir -p "$outsigdir" + rm '${PWD}/${1}'{,.asc} - ( - cd "$outdir" + Then try running this script again. - if [ -e inputs.SHA256SUMS ]; then - echo "${outname}: Including existent input SHA256SUMS" - cat inputs.SHA256SUMS >> "$outsigdir"/SHA256SUMS - fi +EOF +} - echo "${outname}: Hashing build outputs to produce SHA256SUMS" - files="$(find -L . -type f ! -iname '*.SHA256SUMS')" - if [ -n "$files" ]; then - cut -c3- <<< "$files" | env LC_ALL=C sort | xargs sha256sum >> "$outsigdir"/SHA256SUMS +echo "Attesting to build outputs for version: '${VERSION}'" +echo "" + +outsigdir="$GUIX_SIGS_REPO/$VERSION/$signer_name" +mkdir -p "$outsigdir" +( + cd "$outsigdir" + + temp_noncodesigned="$(mktemp)" + trap 'rm -rf -- "$temp_noncodesigned"' EXIT + + if (( ${#noncodesigned_fragments[@]} )); then + cat "${noncodesigned_fragments[@]}" \ + | sort -u \ + | sort -k2 \ + > "$temp_noncodesigned" + if [ -e noncodesigned.SHA256SUMS ]; then + # The SHA256SUMS already exists, make sure it's exactly what we + # expect, error out if not + if diff -u noncodesigned.SHA256SUMS "$temp_noncodesigned"; then + echo "A noncodesigned.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." else - echo "ERR: ${outname}: No outputs found in '${outdir}'" + shasum_already_exists noncodesigned.SHA256SUMS exit 1 fi - ) - if [ -z "$NO_SIGN" ]; then - echo "${outname}: Signing SHA256SUMS to produce SHA256SUMS.asc" - gpg --detach-sign --local-user "$gpg_key_name" --armor --output "$outsigdir"/SHA256SUMS.asc "$outsigdir"/SHA256SUMS else - echo "${outname}: Not signing SHA256SUMS as \$NO_SIGN is not empty" + mv "$temp_noncodesigned" noncodesigned.SHA256SUMS fi - echo "" - - trap - ERR # Reset ERR trap + else + echo "ERR: No noncodesigned outputs found for '${VERSION}', exiting..." + exit 1 fi -done - -if (( ${#outdirs_already_attested_to[@]} )); then -# ...so that we can print them out nicely in a warning message -cat << EOF -WARN: Signature directories from '$signer_name' already exist in the specified - guix.sigs repository for the following output directories and were - skipped: + temp_codesigned="$(mktemp)" + trap 'rm -rf -- "$temp_codesigned"' EXIT + + if (( ${#codesigned_fragments[@]} )); then + # Note: all.SHA256SUMS attests to all of $sha256sum_fragments, but is + # not needed if there are no $codesigned_fragments + cat "${sha256sum_fragments[@]}" \ + | sort -u \ + | sort -k2 \ + > "$temp_codesigned" + if [ -e codesigned.SHA256SUMS ]; then + # The SHA256SUMS already exists, make sure it's exactly what we + # expect, error out if not + if diff -u all.SHA256SUMS "$temp_codesigned"; then + echo "An all.SHA256SUMS file already exists for '${VERSION}' and is up-to-date." + else + shasum_already_exists all.SHA256SUMS + exit 1 + fi + else + mv "$temp_codesigned" codesigned.SHA256SUMS + fi + else + # It is fine to have the codesigned outputs be missing (perhaps the + # detached codesigs have not been published yet), just print a log + # message instead of erroring out + echo "INFO: No codesigned outputs found for '${VERSION}', skipping..." + fi -EOF -for outdir in "${outdirs_already_attested_to[@]}"; do - echo " '${outdir}'" - echo " Corresponds to: '$(out_sig_dir "$outdir")'" + if [ -z "$NO_SIGN" ]; then + echo "Signing SHA256SUMS to produce SHA256SUMS.asc" + for i in *.SHA256SUMS; do + if [ ! -e "$i".asc ]; then + gpg --detach-sign \ + --local-user "$gpg_key_name" \ + --armor \ + --output "$i".asc "$i" + else + echo "Signature already there" + fi + done + else + echo "Not signing SHA256SUMS as \$NO_SIGN is not empty" + fi echo "" -done -fi +) diff --git a/contrib/guix/guix-verify b/contrib/guix/guix-verify index 629050956c..a6e2c4065e 100755 --- a/contrib/guix/guix-verify +++ b/contrib/guix/guix-verify @@ -56,58 +56,87 @@ cmd_usage exit 1 fi -################ -# We should be able to find at least one output -################ +############## +## Verify ## +############## OUTSIGDIR_BASE="${GUIX_SIGS_REPO}/${VERSION}" -echo "Looking for output signature directories in '${OUTSIGDIR_BASE}'" +echo "Looking for signature directories in '${OUTSIGDIR_BASE}'" +echo "" + +# Usage: verify compare_manifest current_manifest +verify() { + local compare_manifest="$1" + local current_manifest="$2" + if ! gpg --quiet --batch --verify "$current_manifest".asc "$current_manifest" 1>&2; then + echo "ERR: Failed to verify GPG signature in '${current_manifest}'" + echo "" + echo "Hint: Either the signature is invalid or the public key is missing" + echo "" + elif ! diff --report-identical "$compare_manifest" "$current_manifest" 1>&2; then + echo "ERR: The SHA256SUMS attestation in these two directories differ:" + echo " '${compare_manifest}'" + echo " '${current_manifest}'" + echo "" + else + echo "Verified: '${current_manifest}'" + echo "" + fi +} shopt -s nullglob -OUTSIGDIRS=( "$OUTSIGDIR_BASE"/* ) # This expands to an array of directories... +all_noncodesigned=( "$OUTSIGDIR_BASE"/*/noncodesigned.SHA256SUMS ) shopt -u nullglob -if (( ${#OUTSIGDIRS[@]} )); then - echo "Found output signature directories:" - for outsigdir in "${OUTSIGDIRS[@]}"; do - echo " '$outsigdir'" +echo "--------------------" +echo "" +if (( ${#all_noncodesigned[@]} )); then + compare_noncodesigned="${all_noncodesigned[0]}" + + for current_manifest in "${all_noncodesigned[@]}"; do + verify "$compare_noncodesigned" "$current_manifest" done - echo + + echo "DONE: Checking output signatures for noncodesigned.SHA256SUMS" + echo "" else - echo "ERR: Could not find any output signature directories in ${OUTSIGDIR_BASE}" - exit 1 + echo "WARN: No signature directories with noncodesigned.SHA256SUMS found" + echo "" fi +shopt -s nullglob +all_all=( "$OUTSIGDIR_BASE"/*/all.SHA256SUMS ) +shopt -u nullglob -############## -## Verify ## -############## +echo "--------------------" +echo "" +if (( ${#all_all[@]} )); then + compare_all="${all_all[0]}" -# MAIN LOGIC: Loop through each output for VERSION and check that the SHA256SUMS -# and SHA256SUMS.asc file match between signers, using the first -# available signer as the arbitrary comparison base. -for outsigdir in "${OUTSIGDIRS[@]}"; do - echo "BEGIN: Checking output signatures for $(basename "$outsigdir")" - echo "" - signer_dirs=( "$outsigdir"/* ) # This expands to an array of directories... - compare_signer_dir="${signer_dirs[0]}" # ...we just want the first one - for current_signer_dir in "${signer_dirs[@]}"; do - if ! gpg --quiet --batch --verify "$current_signer_dir"/SHA256SUMS.asc "$current_signer_dir"/SHA256SUMS; then - echo "ERR: Failed to verify GPG signature in '${current_signer_dir}/SHA256SUMS.asc'" - echo "" - echo "Hint: Either the signature is invalid or the public key is missing" - echo "" - elif ! diff --report-identical "$compare_signer_dir"/SHA256SUMS "$current_signer_dir"/SHA256SUMS; then - echo "ERR: The SHA256SUMS attestation in these two directories differ:" - echo " '${compare_signer_dir}'" - echo " '${current_signer_dir}'" - echo "" - else - echo "Verified: '${current_signer_dir}'" - echo "" - fi + for current_manifest in "${all_all[@]}"; do + verify "$compare_all" "$current_manifest" done - echo "DONE: Checking output signatures for $(basename "$outsigdir")" + + # Sanity check: there should be no entries that exist in + # noncodesigned.SHA256SUMS that doesn't exist in all.SHA256SUMS + if [[ "$(comm -23 <(sort "$compare_noncodesigned") <(sort "$compare_all") | wc -c)" -ne 0 ]]; then + echo "ERR: There are unique lines in noncodesigned.SHA256SUMS which" + echo " do not exist in all.SHA256SUMS, something went very wrong." + exit 1 + fi + + echo "DONE: Checking output signatures for all.SHA256SUMS" echo "" +else + echo "WARN: No signature directories with all.SHA256SUMS found" + echo "" +fi + +echo "====================" +echo "" +if (( ${#all_noncodesigned[@]} + ${#all_all[@]} == 0 )); then + echo "ERR: Unable to perform any verifications as no signature directories" + echo " were found" echo "" -done + exit 1 +fi diff --git a/contrib/guix/libexec/build.sh b/contrib/guix/libexec/build.sh index 3073b41baf..6741328473 100755 --- a/contrib/guix/libexec/build.sh +++ b/contrib/guix/libexec/build.sh @@ -230,20 +230,7 @@ if [ ! -e "$GIT_ARCHIVE" ]; then git archive --prefix="${DISTNAME}/" --output="$GIT_ARCHIVE" HEAD fi -# tmpdir="$(mktemp -d)" -# ( -# cd "$tmpdir" -# mkdir -p inputs -# ln -sf --target-directory=inputs "$GIT_ARCHIVE" - -# mkdir -p "$OUTDIR" -# find -L inputs -type f -print0 | xargs -0 sha256sum > "${OUTDIR}/inputs.SHA256SUMS" -# ) - mkdir -p "$OUTDIR" -cat << EOF > "$OUTDIR"/inputs.SHA256SUMS -$(sha256sum "$GIT_ARCHIVE" | cut -d' ' -f1) inputs/$(basename "$GIT_ARCHIVE") -EOF ########################### # Binary Tarball Building # @@ -450,3 +437,13 @@ mkdir -p "$DISTSRC" rm -rf "$ACTUAL_OUTDIR" mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/libexec/codesign.sh b/contrib/guix/libexec/codesign.sh index 1822da7ca4..b1eec686ec 100755 --- a/contrib/guix/libexec/codesign.sh +++ b/contrib/guix/libexec/codesign.sh @@ -55,10 +55,6 @@ if [ ! -e "$CODESIGNATURE_GIT_ARCHIVE" ]; then fi mkdir -p "$OUTDIR" -cat << EOF > "$OUTDIR"/inputs.SHA256SUMS -$(sha256sum "$UNSIGNED_TARBALL" | cut -d' ' -f1) inputs/$(basename "$UNSIGNED_TARBALL") -$(sha256sum "$CODESIGNATURE_GIT_ARCHIVE" | cut -d' ' -f1) inputs/$(basename "$CODESIGNATURE_GIT_ARCHIVE") -EOF mkdir -p "$DISTSRC" ( @@ -103,3 +99,14 @@ mkdir -p "$DISTSRC" rm -rf "$ACTUAL_OUTDIR" mv --no-target-directory "$OUTDIR" "$ACTUAL_OUTDIR" \ || ( rm -rf "$ACTUAL_OUTDIR" && exit 1 ) + +( + cd /outdir-base + { + echo "$UNSIGNED_TARBALL" + echo "$CODESIGNATURE_GIT_ARCHIVE" + find "$ACTUAL_OUTDIR" -type f + } | xargs realpath --relative-base="$PWD" \ + | xargs sha256sum \ + | sponge "$ACTUAL_OUTDIR"/SHA256SUMS.part +) diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index d2bc789b60..ba168a2a4a 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -22,6 +22,7 @@ (gnu packages linux) (gnu packages llvm) (gnu packages mingw) + (gnu packages moreutils) (gnu packages perl) (gnu packages pkg-config) (gnu packages python) @@ -205,7 +206,7 @@ chain for " target " development.")) (define-public lief (package (name "python-lief") - (version "0.11.4") + (version "0.11.5") (source (origin (method git-fetch) @@ -215,7 +216,7 @@ chain for " target " development.")) (file-name (git-file-name name version)) (sha256 (base32 - "0h4kcwr9z478almjqhmils8imfpflzk0r7d05g4xbkdyknn162qf")))) + "0qahjfg1n0x76ps2mbyljvws1l3qhkqvmxqbahps4qgywl2hbdkj")))) (build-system python-build-system) (native-inputs `(("cmake" ,cmake))) @@ -524,7 +525,7 @@ and endian independent.") (license license:expat))) (define-public python-signapple - (let ((commit "4ff1c1754e37042c002a3f6375c47fd931f2030b")) + (let ((commit "b084cbbf44d5330448ffce0c7d118f75781b64bd")) (package (name "python-signapple") (version (git-version "0.1" "1" commit)) @@ -532,12 +533,12 @@ and endian independent.") (origin (method git-fetch) (uri (git-reference - (url "https://github.com/dongcarl/signapple") + (url "https://github.com/achow101/signapple") (commit commit))) (file-name (git-file-name name commit)) (sha256 (base32 - "043czyzfm04rcx5xsp59vsppla3vm5g45dbp1npy2hww4066rlnh")))) + "0k7inccl2mzac3wq4asbr0kl8s4cghm8982z54kfascqg45shv01")))) (build-system python-build-system) (propagated-inputs `(("python-asn1crypto" ,python-asn1crypto) @@ -572,6 +573,7 @@ inspecting signatures in Mach-O binaries.") patch gawk sed + moreutils ;; Compression and archiving tar bzip2 diff --git a/contrib/signet/README.md b/contrib/signet/README.md index 71dc2f9638..706b296c54 100644 --- a/contrib/signet/README.md +++ b/contrib/signet/README.md @@ -21,27 +21,28 @@ accept one claim per day. See `--password` above. miner ===== -To mine the first block in your custom chain, you can run: +You will first need to pick a difficulty target. Since signet chains are primarily protected by a signature rather than proof of work, there is no need to spend as much energy as possible mining, however you may wish to choose to spend more time than the absolute minimum. The calibrate subcommand can be used to pick a target appropriate for your hardware, eg: cd src/ - CLI="./bitcoin-cli -conf=mysignet.conf" - MINER="..contrib/signet/miner" + MINER="../contrib/signet/miner" GRIND="./bitcoin-util grind" - ADDR=$($CLI -signet getnewaddress) - $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --set-block-time=-1 - -This will mine a block with the current timestamp. If you want to backdate the chain, you can give a different timestamp to --set-block-time. - -You will then need to pick a difficulty target. Since signet chains are primarily protected by a signature rather than proof of work, there is no need to spend as much energy as possible mining, however you may wish to choose to spend more time than the absolute minimum. The calibrate subcommand can be used to pick a target, eg: - $MINER calibrate --grind-cmd="$GRIND" nbits=1e00f403 for 25s average mining time It defaults to estimating an nbits value resulting in 25s average time to find a block, but the --seconds parameter can be used to pick a different target, or the --nbits parameter can be used to estimate how long it will take for a given difficulty. -Using the --ongoing parameter will then cause the signet miner to create blocks indefinitely. It will pick the time between blocks so that difficulty is adjusted to match the provided --nbits value. +To mine the first block in your custom chain, you can run: - $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=1e00f403 --ongoing + CLI="./bitcoin-cli -conf=mysignet.conf" + ADDR=$($CLI -signet getnewaddress) + NBITS=1e00f403 + $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=$NBITS + +This will mine a single block with a backdated timestamp designed to allow 100 blocks to be mined as quickly as possible, so that it is possible to do transactions. + +Adding the --ongoing parameter will then cause the signet miner to create blocks indefinitely. It will pick the time between blocks so that difficulty is adjusted to match the provided --nbits value. + + $MINER --cli="$CLI" generate --grind-cmd="$GRIND" --address="$ADDR" --nbits=$NBITS --ongoing Other options ------------- @@ -50,9 +51,11 @@ The --debug and --quiet options are available to control how noisy the signet mi Instead of specifying --ongoing, you can specify --max-blocks=N to mine N blocks and stop. -Instead of using a single address, a ranged descriptor may be provided instead (via the --descriptor parameter), with the reward for the block at height H being sent to the H'th address generated from the descriptor. +The --set-block-time option is available to manually move timestamps forward or backward (subject to the rules that blocktime must be greater than mediantime, and dates can't be more than two hours in the future). It can only be used when mining a single block (ie, not when using --ongoing or --max-blocks greater than 1). + +Instead of using a single address, a ranged descriptor may be provided via the --descriptor parameter, with the reward for the block at height H being sent to the H'th address generated from the descriptor. -Instead of calculating a specific nbits value, --min-nbits can be specified instead, in which case the mininmum signet difficulty will be targeted. +Instead of calculating a specific nbits value, --min-nbits can be specified instead, in which case the minimum signet difficulty will be targeted. Signet's minimum difficulty corresponds to --nbits=1e0377ae. By default, the signet miner mines blocks at fixed intervals with minimal variation. If you want blocks to appear more randomly, as they do in mainnet, specify the --poisson option. @@ -76,5 +79,5 @@ These steps can instead be done explicitly: $MINER --cli="$CLI" solvepsbt --grind-cmd="$GRIND" | $CLI -signet -stdin submitblock -This is intended to allow you to replace part of the pipeline for further experimentation, if desired. +This is intended to allow you to replace part of the pipeline for further experimentation (eg, to sign the block with a hardware wallet). diff --git a/contrib/signet/miner b/contrib/signet/miner index a3fba49d0e..78e1fa5ecd 100755 --- a/contrib/signet/miner +++ b/contrib/signet/miner @@ -23,7 +23,7 @@ PATH_BASE_TEST_FUNCTIONAL = os.path.abspath(os.path.join(PATH_BASE_CONTRIB_SIGNE sys.path.insert(0, PATH_BASE_TEST_FUNCTIONAL) from test_framework.blocktools import WITNESS_COMMITMENT_HEADER, script_BIP34_coinbase_height # noqa: E402 -from test_framework.messages import CBlock, CBlockHeader, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, ToHex, deser_string, hash256, ser_compact_size, ser_string, ser_uint256, uint256_from_str # noqa: E402 +from test_framework.messages import CBlock, CBlockHeader, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, from_hex, deser_string, hash256, ser_compact_size, ser_string, ser_uint256, tx_from_hex, uint256_from_str # noqa: E402 from test_framework.script import CScriptOp # noqa: E402 logging.basicConfig( @@ -37,7 +37,7 @@ RE_MULTIMINER = re.compile("^(\d+)(-(\d+))?/(\d+)$") # #### some helpers that could go into test_framework -# like FromHex, but without the hex part +# like from_hex, but without the hex part def FromBinary(cls, stream): """deserialize a binary stream (or bytes object) into an object""" # handle bytes object by turning it into a stream @@ -195,7 +195,7 @@ def finish_block(block, signet_solution, grind_cmd): headhex = CBlockHeader.serialize(block).hex() cmd = grind_cmd.split(" ") + [headhex] newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip() - newhead = FromHex(CBlockHeader(), newheadhex.decode('utf8')) + newhead = from_hex(CBlockHeader(), newheadhex.decode('utf8')) block.nNonce = newhead.nNonce block.rehash() return block @@ -216,7 +216,7 @@ def generate_psbt(tmpl, reward_spk, *, blocktime=None): block.nTime = tmpl["mintime"] block.nBits = int(tmpl["bits"], 16) block.nNonce = 0 - block.vtx = [cbtx] + [FromHex(CTransaction(), t["data"]) for t in tmpl["transactions"]] + block.vtx = [cbtx] + [tx_from_hex(t["data"]) for t in tmpl["transactions"]] witnonce = 0 witroot = block.calc_witness_merkle_root() @@ -274,7 +274,7 @@ def do_genpsbt(args): def do_solvepsbt(args): block, signet_solution = do_decode_psbt(sys.stdin.read()) block = finish_block(block, signet_solution, args.grind_cmd) - print(ToHex(block)) + print(block.serialize().hex()) def nbits_to_target(nbits): shift = (nbits >> 24) & 0xff @@ -428,10 +428,13 @@ def do_generate(args): action_time = now is_mine = True elif bestheader["height"] == 0: - logging.error("When mining first block in a new signet, must specify --set-block-time") - return 1 + time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson) + time_delta *= 100 # 100 blocks + logging.info("Backdating time for first block to %d minutes ago" % (time_delta/60)) + mine_time = now - time_delta + action_time = now + is_mine = True else: - time_delta = next_block_delta(int(bestheader["bits"], 16), bci["bestblockhash"], ultimate_target, args.poisson) mine_time = bestheader["time"] + time_delta @@ -500,7 +503,7 @@ def do_generate(args): block = finish_block(block, signet_solution, args.grind_cmd) # submit block - r = args.bcli("-stdin", "submitblock", input=ToHex(block).encode('utf8')) + r = args.bcli("-stdin", "submitblock", input=block.serialize().hex().encode('utf8')) # report bstr = "block" if is_mine else "backup block" @@ -520,12 +523,11 @@ def do_calibrate(args): sys.stderr.write("Can only specify one of --nbits or --seconds\n") return 1 if args.nbits is not None and len(args.nbits) != 8: - sys.stderr.write("Must specify 8 hex digits for --nbits") + sys.stderr.write("Must specify 8 hex digits for --nbits\n") return 1 TRIALS = 600 # gets variance down pretty low TRIAL_BITS = 0x1e3ea75f # takes about 5m to do 600 trials - #TRIAL_BITS = 0x1e7ea75f # XXX header = CBlockHeader() header.nBits = TRIAL_BITS @@ -533,23 +535,14 @@ def do_calibrate(args): start = time.time() count = 0 - #CHECKS=[] for i in range(TRIALS): header.nTime = i header.nNonce = 0 headhex = header.serialize().hex() cmd = args.grind_cmd.split(" ") + [headhex] newheadhex = subprocess.run(cmd, stdout=subprocess.PIPE, input=b"", check=True).stdout.strip() - #newhead = FromHex(CBlockHeader(), newheadhex.decode('utf8')) - #count += newhead.nNonce - #if (i+1) % 100 == 0: - # CHECKS.append((i+1, count, time.time()-start)) - - #print("checks =", [c*1.0 / (b*targ*2**-256) for _,b,c in CHECKS]) avg = (time.time() - start) * 1.0 / TRIALS - #exp_count = 2**256 / targ * TRIALS - #print("avg =", avg, "count =", count, "exp_count =", exp_count) if args.nbits is not None: want_targ = nbits_to_target(int(args.nbits,16)) @@ -590,7 +583,6 @@ def main(): generate.add_argument("--nbits", default=None, type=str, help="Target nBits (specify difficulty)") generate.add_argument("--min-nbits", action="store_true", help="Target minimum nBits (use min difficulty)") generate.add_argument("--poisson", action="store_true", help="Simulate randomised block times") - #generate.add_argument("--signcmd", default=None, type=str, help="Alternative signing command") generate.add_argument("--multiminer", default=None, type=str, help="Specify which set of blocks to mine (eg: 1-40/100 for the first 40%%, 2/3 for the second 3rd)") generate.add_argument("--backup-delay", default=300, type=int, help="Seconds to delay before mining blocks reserved for other miners (default=300)") generate.add_argument("--standby-delay", default=0, type=int, help="Seconds to delay before mining blocks (default=0)") @@ -605,7 +597,7 @@ def main(): sp.add_argument("--descriptor", default=None, type=str, help="Descriptor for block reward payment") for sp in [solvepsbt, generate, calibrate]: - sp.add_argument("--grind-cmd", default=None, type=str, help="Command to grind a block header for proof-of-work") + sp.add_argument("--grind-cmd", default=None, type=str, required=(sp==calibrate), help="Command to grind a block header for proof-of-work") args = parser.parse_args(sys.argv[1:]) diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk index 52df26eb50..165c27f5e3 100644 --- a/depends/packages/qt.mk +++ b/depends/packages/qt.mk @@ -10,7 +10,7 @@ $(package)_patches=fix_qt_pkgconfig.patch mac-qmake.conf fix_no_printer.patch no $(package)_patches+= fix_android_qmake_conf.patch fix_android_jni_static.patch dont_hardcode_pwd.patch $(package)_patches+= drop_lrelease_dependency.patch no_sdk_version_check.patch $(package)_patches+= fix_lib_paths.patch fix_android_pch.patch -$(package)_patches+= qtbase-moc-ignore-gcc-macro.patch +$(package)_patches+= qtbase-moc-ignore-gcc-macro.patch fix_limits_header.patch $(package)_qttranslations_file_name=qttranslations-$($(package)_suffix) $(package)_qttranslations_sha256_hash=577b0668a777eb2b451c61e8d026d79285371597ce9df06b6dee6c814164b7c3 @@ -231,6 +231,7 @@ define $(package)_preprocess_cmds patch -p1 -i $($(package)_patch_dir)/no_sdk_version_check.patch && \ patch -p1 -i $($(package)_patch_dir)/fix_lib_paths.patch && \ patch -p1 -i $($(package)_patch_dir)/qtbase-moc-ignore-gcc-macro.patch && \ + patch -p1 -i $($(package)_patch_dir)/fix_limits_header.patch && \ sed -i.old "s|updateqm.commands = \$$$$\$$$$LRELEASE|updateqm.commands = $($(package)_extract_dir)/qttools/bin/lrelease|" qttranslations/translations/translations.pro && \ mkdir -p qtbase/mkspecs/macx-clang-linux &&\ cp -f qtbase/mkspecs/macx-clang/qplatformdefs.h qtbase/mkspecs/macx-clang-linux/ &&\ diff --git a/depends/patches/qt/fix_limits_header.patch b/depends/patches/qt/fix_limits_header.patch new file mode 100644 index 0000000000..e4313770e5 --- /dev/null +++ b/depends/patches/qt/fix_limits_header.patch @@ -0,0 +1,44 @@ +Fix compiling with GCC 11 + +See: https://bugreports.qt.io/browse/QTBUG-90395. + +Upstream commits: + - Qt 5.15 -- unavailable as open source + - Qt 6.0: b2af6332ea37e45ab230a7a5d2d278f86d961b83 + - Qt 6.1: 9c56d4da2ff631a8c1c30475bd792f6c86bda53c + +--- old/qtbase/src/corelib/global/qendian.h ++++ new/qtbase/src/corelib/global/qendian.h +@@ -44,6 +44,8 @@ + #include <QtCore/qfloat16.h> + #include <QtCore/qglobal.h> + ++#include <limits> ++ + // include stdlib.h and hope that it defines __GLIBC__ for glibc-based systems + #include <stdlib.h> + #include <string.h> + +--- old/qtbase/src/corelib/tools/qbytearraymatcher.h ++++ new/qtbase/src/corelib/tools/qbytearraymatcher.h +@@ -42,6 +42,8 @@ + + #include <QtCore/qbytearray.h> + ++#include <limits> ++ + QT_BEGIN_NAMESPACE + + + +--- old/qtbase/src/tools/moc/generator.cpp ++++ new/qtbase/src/tools/moc/generator.cpp +@@ -40,6 +40,8 @@ + #include <QtCore/qplugin.h> + #include <QtCore/qstringview.h> + ++#include <limits> ++ + #include <math.h> + #include <stdio.h> + diff --git a/doc/README.md b/doc/README.md index c629c2ccfa..38f6b1d327 100644 --- a/doc/README.md +++ b/doc/README.md @@ -30,8 +30,8 @@ Drag Bitcoin Core to your applications folder, and then run Bitcoin Core. * See the documentation at the [Bitcoin Wiki](https://en.bitcoin.it/wiki/Main_Page) for help and more information. -* Ask for help on the [Bitcoin StackExchange](https://bitcoin.stackexchange.com) -* Ask for help on [#bitcoin](https://webchat.freenode.net/#bitcoin) on Freenode. If you don't have an IRC client, use [webchat here](https://webchat.freenode.net/#bitcoin). +* Ask for help on [Bitcoin StackExchange](https://bitcoin.stackexchange.com). +* Ask for help on #bitcoin on Libera Chat. If you don't have an IRC client, you can use [web.libera.chat](https://web.libera.chat/#bitcoin). * Ask for help on the [BitcoinTalk](https://bitcointalk.org/) forums, in the [Technical Support board](https://bitcointalk.org/index.php?board=4.0). Building @@ -68,20 +68,20 @@ The Bitcoin repo's [root README](/README.md) contains relevant information on th ### Resources * Discuss on the [BitcoinTalk](https://bitcointalk.org/) forums, in the [Development & Technical Discussion board](https://bitcointalk.org/index.php?board=6.0). -* Discuss project-specific development on #bitcoin-core-dev on Libera Chat. If you don't have an IRC client, use [webchat here](https://web.libera.chat/#bitcoin-core-dev). -* Discuss general Bitcoin development on #bitcoin-dev on Freenode. If you don't have an IRC client, use [webchat here](https://webchat.freenode.net/#bitcoin-dev). +* Discuss project-specific development on #bitcoin-core-dev on Libera Chat. If you don't have an IRC client, you can use [web.libera.chat](https://web.libera.chat/#bitcoin-core-dev). ### Miscellaneous - [Assets Attribution](assets-attribution.md) - [bitcoin.conf Configuration File](bitcoin-conf.md) - [Files](files.md) - [Fuzz-testing](fuzzing.md) +- [I2P Support](i2p.md) +- [Init Scripts (systemd/upstart/openrc)](init.md) +- [PSBT support](psbt.md) - [Reduce Memory](reduce-memory.md) - [Reduce Traffic](reduce-traffic.md) - [Tor Support](tor.md) -- [Init Scripts (systemd/upstart/openrc)](init.md) - [ZMQ](zmq.md) -- [PSBT support](psbt.md) License --------------------- diff --git a/doc/dependencies.md b/doc/dependencies.md index 9754952221..66c5a76b3b 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -6,8 +6,8 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct | Dependency | Version used | Minimum required | CVEs | Shared | [Bundled Qt library](https://doc.qt.io/qt-5/configure-options.html#third-party-libraries) | | --- | --- | --- | --- | --- | --- | | Berkeley DB | [4.8.30](https://www.oracle.com/technetwork/database/database-technologies/berkeleydb/downloads/index.html) | 4.8.x | No | | | -| Boost | [1.71.0](https://www.boost.org/users/download/) | [1.58.0](https://github.com/bitcoin/bitcoin/pull/19667) | No | | | -| Clang | | [5.0+](https://releases.llvm.org/download.html) (C++17 support) | | | | +| Boost | [1.71.0](https://www.boost.org/users/download/) | [1.64.0](https://github.com/bitcoin/bitcoin/pull/22320) | No | | | +| Clang<sup>[ \* ](#note1)</sup> | | [5.0+](https://releases.llvm.org/download.html) (C++17 support) | | | | | Expat | [2.2.7](https://libexpat.github.io/) | | No | Yes | | | fontconfig | [2.12.1](https://www.freedesktop.org/software/fontconfig/release/) | | No | Yes | | | FreeType | [2.7.1](https://download.savannah.gnu.org/releases/freetype) | | No | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Android only) | @@ -28,6 +28,8 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct | ZeroMQ | [4.3.1](https://github.com/zeromq/libzmq/releases) | 4.0.0 | No | | | | zlib | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) | +<a name="note1">Note \*</a> : When compiling with `-stdlib=libc++`, the minimum supported libc++ version is 7.0. + Controlling dependencies ------------------------ Some dependencies are not needed in all configurations. The following are some factors that affect the dependency list. diff --git a/doc/developer-notes.md b/doc/developer-notes.md index 2130332eec..c3a63b3523 100644 --- a/doc/developer-notes.md +++ b/doc/developer-notes.md @@ -1160,13 +1160,6 @@ A few guidelines for introducing and reviewing new RPC interfaces: - *Rationale*: If not, the call can not be used with name-based arguments. -- Set okSafeMode in the RPC command table to a sensible value: safe mode is when the - blockchain is regarded to be in a confused state, and the client deems it unsafe to - do anything irreversible such as send. Anything that just queries should be permitted. - - - *Rationale*: Troubleshooting a node in safe mode is difficult if half the - RPCs don't work. - - Add every non-string RPC argument `(method, idx, name)` to the table `vRPCConvertParams` in `rpc/client.cpp`. - *Rationale*: `bitcoin-cli` and the GUI debug console use this table to determine how to diff --git a/doc/files.md b/doc/files.md index 353efe348d..e670d77ae5 100644 --- a/doc/files.md +++ b/doc/files.md @@ -56,7 +56,8 @@ Subdirectory | File(s) | Description `indexes/coinstats/db/` | LevelDB database | Coinstats index; *optional*, used if `-coinstatsindex=1` `wallets/` | | [Contains wallets](#multi-wallet-environment); can be specified by `-walletdir` option; if `wallets/` subdirectory does not exist, wallets reside in the [data directory](#data-directory-location) `./` | `anchors.dat` | Anchor IP address database, created on shutdown and deleted at startup. Anchors are last known outgoing block-relay-only peers that are tried to re-connect to on startup -`./` | `banlist.dat` | Stores the IPs/subnets of banned nodes +`./` | `banlist.dat` | Stores the addresses/subnets of banned nodes (deprecated). `bitcoind` or `bitcoin-qt` no longer save the banlist to this file, but read it on startup if `banlist.json` is not present. +`./` | `banlist.json` | Stores the addresses/subnets of banned nodes. `./` | `bitcoin.conf` | User-defined [configuration settings](bitcoin-conf.md) for `bitcoind` or `bitcoin-qt`. File is not written to by the software and must be created manually. Path can be specified by `-conf` option `./` | `bitcoind.pid` | Stores the process ID (PID) of `bitcoind` or `bitcoin-qt` while running; created at start and deleted on shutdown; can be specified by `-pid` option `./` | `debug.log` | Contains debug information and general logging generated by `bitcoind` or `bitcoin-qt`; can be specified by `-debuglogfile` option @@ -109,7 +110,7 @@ Subdirectory | File | Description ## Legacy subdirectories and files -These subdirectories and files are no longer used by the Bitcoin Core: +These subdirectories and files are no longer used by Bitcoin Core: Path | Description | Repository notes ---------------|-------------|----------------- diff --git a/doc/i2p.md b/doc/i2p.md new file mode 100644 index 0000000000..8b4607208a --- /dev/null +++ b/doc/i2p.md @@ -0,0 +1,72 @@ +# I2P support in Bitcoin Core + +It is possible to run Bitcoin Core as an +[I2P (Invisible Internet Project)](https://en.wikipedia.org/wiki/I2P) +service and connect to such services. + +This [glossary](https://geti2p.net/en/about/glossary) may be useful to get +started with I2P terminology. + +## Run Bitcoin Core with an I2P router (proxy) + +A running I2P router (proxy) with [SAM](https://geti2p.net/en/docs/api/samv3) +enabled is required (there is an [official one](https://geti2p.net) and +[a few alternatives](https://en.wikipedia.org/wiki/I2P#Routers)). Notice the IP +address and port the SAM proxy is listening to; usually, it is +`127.0.0.1:7656`. Once it is up and running with SAM enabled, use the following +Bitcoin Core options: + +``` +-i2psam=<ip:port> + I2P SAM proxy to reach I2P peers and accept I2P connections (default: + none) + +-i2pacceptincoming + If set and -i2psam is also set then incoming I2P connections are + accepted via the SAM proxy. If this is not set but -i2psam is set + then only outgoing connections will be made to the I2P network. + Ignored if -i2psam is not set. Listening for incoming I2P + connections is done through the SAM proxy, not by binding to a + local address and port (default: 1) +``` + +In a typical situation, this suffices: + +``` +bitcoind -i2psam=127.0.0.1:7656 +``` + +The first time Bitcoin Core connects to the I2P router, its I2P address (and +corresponding private key) will be automatically generated and saved in a file +named `i2p_private_key` in the Bitcoin Core data directory. + +## Additional configuration options related to I2P + +You may set the `debug=i2p` config logging option to have additional +information in the debug log about your I2P configuration and connections. Run +`bitcoin-cli help logging` for more information. + +It is possible to restrict outgoing connections in the usual way with +`onlynet=i2p`. I2P support was added to Bitcoin Core in version 22.0 (mid 2021) +and there may be fewer I2P peers than Tor or IP ones. Therefore, using +`onlynet=i2p` alone (without other `onlynet=`) may make a node more susceptible +to [Sybil attacks](https://en.bitcoin.it/wiki/Weaknesses#Sybil_attack). Use +`bitcoin-cli -addrinfo` to see the number of I2P addresses known to your node. + +## I2P related information in Bitcoin Core + +There are several ways to see your I2P address in Bitcoin Core: +- in the debug log (grep for `AddLocal`, the I2P address ends in `.b32.i2p`) +- in the output of the `getnetworkinfo` RPC in the "localaddresses" section +- in the output of `bitcoin-cli -netinfo` peer connections dashboard + +To see which I2P peers your node is connected to, use `bitcoin-cli -netinfo 4` +or the `getpeerinfo` RPC (e.g. `bitcoin-cli getpeerinfo`). + +To see which I2P addresses your node knows, use the `getnodeaddresses 0 i2p` +RPC. + +## Compatibility + +Bitcoin Core uses the [SAM v3.1](https://geti2p.net/en/docs/api/samv3) protocol +to connect to the I2P network. Any I2P router that supports it can be used. diff --git a/doc/release-notes-20833.md b/doc/release-notes-20833.md deleted file mode 100644 index 9a02bbd275..0000000000 --- a/doc/release-notes-20833.md +++ /dev/null @@ -1,12 +0,0 @@ -Updated RPCs ------------- - -- The `testmempoolaccept` RPC now accepts multiple transactions (still experimental at the moment, - API may be unstable). This is intended for testing transaction packages with dependency - relationships; it is not recommended for batch-validating independent transactions. In addition to - mempool policy, package policies apply: the list cannot contain more than 25 transactions or have a - total size exceeding 101K virtual bytes, and cannot conflict with (spend the same inputs as) each other or - the mempool, even if it would be a valid BIP125 replace-by-fee. There are some known limitations to - the accuracy of the test accept: it's possible for `testmempoolaccept` to return "allowed"=True for a - group of transactions, but "too-long-mempool-chain" if they are actually submitted. (#20833) - diff --git a/doc/release-notes-20867.md b/doc/release-notes-20867.md deleted file mode 100644 index 60eed6838f..0000000000 --- a/doc/release-notes-20867.md +++ /dev/null @@ -1,11 +0,0 @@ -Wallet ------- - -- We now support up to 20 keys in `multi()` and `sortedmulti()` descriptors - under `wsh()`. (#20867) - -Updated RPCs ------------- - -- `addmultisigaddress` and `createmultisig` now support up to 20 keys for - Segwit addresses. diff --git a/doc/release-notes.md b/doc/release-notes.md index 53106c9f82..dc28ccb9ed 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -1,3 +1,5 @@ +# Release notes now being edited on https://github.com/bitcoin-core/bitcoin-devwiki/wiki/22.0-Release-Notes-draft + *After branching off for a major version release of Bitcoin Core, use this template to create the initial release notes draft.* @@ -59,6 +61,13 @@ Notable changes P2P and network changes ----------------------- +- This release removes support for Tor version 2 hidden services in favor of Tor + v3 only, as the Tor network [dropped support for Tor + v2](https://blog.torproject.org/v2-deprecation-timeline) with the release of + Tor version 0.4.6. Henceforth, Bitcoin Core ignores Tor v2 addresses; it + neither rumors them over the network to other peers, nor stores them in memory + or to `peers.dat`. (#22050) + - Added NAT-PMP port mapping support via [`libnatpmp`](https://miniupnp.tuxfamily.org/libnatpmp.html). (#18077) @@ -114,6 +123,18 @@ Updated RPCs - `getnodeaddresses` now also accepts a "network" argument (ipv4, ipv6, onion, or i2p) to return only addresses of the specified network. (#21843) +- The `testmempoolaccept` RPC now accepts multiple transactions (still experimental at the moment, + API may be unstable). This is intended for testing transaction packages with dependency + relationships; it is not recommended for batch-validating independent transactions. In addition to + mempool policy, package policies apply: the list cannot contain more than 25 transactions or have a + total size exceeding 101K virtual bytes, and cannot conflict with (spend the same inputs as) each other or + the mempool, even if it would be a valid BIP125 replace-by-fee. There are some known limitations to + the accuracy of the test accept: it's possible for `testmempoolaccept` to return "allowed"=True for a + group of transactions, but "too-long-mempool-chain" if they are actually submitted. (#20833) + +- `addmultisigaddress` and `createmultisig` now support up to 20 keys for + Segwit addresses. (#20867) + Changes to Wallet or GUI related RPCs can be found in the GUI or Wallet section below. New RPCs @@ -142,8 +163,12 @@ Tools and Utilities - A new CLI `-addrinfo` command returns the number of addresses known to the node per network type (including Tor v2 versus v3) and total. This can be useful to see if the node knows enough addresses in a network to use options - like `-onlynet=<network>` or to upgrade to current and future Tor releases - that support Tor v3 addresses only. (#21595) + like `-onlynet=<network>` or to upgrade to this release of Bitcoin Core 22.0 + that supports Tor v3 only. (#21595) + +- A new `-rpcwaittimeout` argument to `bitcoin-cli` sets the timeout + in seconds to use with `-rpcwait`. If the timeout expires, + `bitcoin-cli` will report a failure. (#21056) Wallet ------ @@ -160,6 +185,9 @@ Wallet Note that the resulting transaction may become invalid if one of the unsafe inputs disappears. If that happens, the transaction must be funded with different inputs and republished. (#21359) +- We now support up to 20 keys in `multi()` and `sortedmulti()` descriptors + under `wsh()`. (#20867) + GUI changes ----------- diff --git a/doc/tor.md b/doc/tor.md index 2640a6109b..7d134b64e0 100644 --- a/doc/tor.md +++ b/doc/tor.md @@ -5,6 +5,14 @@ It is possible to run Bitcoin Core as a Tor onion service, and connect to such s The following directions assume you have a Tor proxy running on port 9050. Many distributions default to having a SOCKS proxy listening on port 9050, but others may not. In particular, the Tor Browser Bundle defaults to listening on port 9150. See [Tor Project FAQ:TBBSocksPort](https://www.torproject.org/docs/faq.html.en#TBBSocksPort) for how to properly configure Tor. +## Compatibility + +- Starting with version 22.0, Bitcoin Core only supports Tor version 3 hidden + services (Tor v3). Tor v2 addresses are ignored by Bitcoin Core and neither + relayed nor stored. + +- Tor removed v2 support beginning with version 0.4.6. + ## How to see information about your Tor configuration via Bitcoin Core There are several ways to see your local onion address in Bitcoin Core: @@ -18,7 +26,7 @@ information in the debug log about your Tor configuration. CLI `-addrinfo` returns the number of addresses known to your node per network type, including Tor v2 and v3. This is useful to see how many onion addresses are known to your node for `-onlynet=onion` and how many Tor v3 addresses it -knows when upgrading to current and future Tor releases that support Tor v3 only. +knows when upgrading to Bitcoin Core v22.0 and up that supports Tor v3 only. ## 1. Run Bitcoin Core behind a Tor proxy diff --git a/src/Makefile.am b/src/Makefile.am index 66cb7cec2a..e2ed70556d 100644 --- a/src/Makefile.am +++ b/src/Makefile.am @@ -351,6 +351,7 @@ libbitcoin_server_a_SOURCES = \ node/ui_interface.cpp \ noui.cpp \ policy/fees.cpp \ + policy/packages.cpp \ policy/rbf.cpp \ policy/settings.cpp \ pow.cpp \ diff --git a/src/Makefile.test.include b/src/Makefile.test.include index 105d09f730..fc2fd80166 100644 --- a/src/Makefile.test.include +++ b/src/Makefile.test.include @@ -35,11 +35,12 @@ BITCOIN_TEST_SUITE = \ $(TEST_UTIL_H) FUZZ_SUITE_LD_COMMON = \ + $(LIBTEST_UTIL) \ + $(LIBTEST_FUZZ) \ $(LIBBITCOIN_SERVER) \ + $(LIBBITCOIN_WALLET) \ $(LIBBITCOIN_COMMON) \ $(LIBBITCOIN_UTIL) \ - $(LIBTEST_UTIL) \ - $(LIBTEST_FUZZ) \ $(LIBBITCOIN_CONSENSUS) \ $(LIBBITCOIN_CRYPTO) \ $(LIBBITCOIN_CLI) \ @@ -160,7 +161,6 @@ BITCOIN_TESTS += \ wallet/test/scriptpubkeyman_tests.cpp FUZZ_SUITE_LD_COMMON +=\ - $(LIBBITCOIN_WALLET) \ $(SQLITE_LIBS) \ $(BDB_LIBS) diff --git a/src/addrdb.cpp b/src/addrdb.cpp index c376aced10..b8fd019bab 100644 --- a/src/addrdb.cpp +++ b/src/addrdb.cpp @@ -11,19 +11,78 @@ #include <cstdint> #include <hash.h> #include <logging/timer.h> +#include <netbase.h> #include <random.h> #include <streams.h> #include <tinyformat.h> +#include <univalue.h> +#include <util/settings.h> #include <util/system.h> +CBanEntry::CBanEntry(const UniValue& json) + : nVersion(json["version"].get_int()), nCreateTime(json["ban_created"].get_int64()), + nBanUntil(json["banned_until"].get_int64()) +{ +} + +UniValue CBanEntry::ToJson() const +{ + UniValue json(UniValue::VOBJ); + json.pushKV("version", nVersion); + json.pushKV("ban_created", nCreateTime); + json.pushKV("banned_until", nBanUntil); + return json; +} + namespace { +static const char* BANMAN_JSON_ADDR_KEY = "address"; + +/** + * Convert a `banmap_t` object to a JSON array. + * @param[in] bans Bans list to convert. + * @return a JSON array, similar to the one returned by the `listbanned` RPC. Suitable for + * passing to `BanMapFromJson()`. + */ +UniValue BanMapToJson(const banmap_t& bans) +{ + UniValue bans_json(UniValue::VARR); + for (const auto& it : bans) { + const auto& address = it.first; + const auto& ban_entry = it.second; + UniValue j = ban_entry.ToJson(); + j.pushKV(BANMAN_JSON_ADDR_KEY, address.ToString()); + bans_json.push_back(j); + } + return bans_json; +} + +/** + * Convert a JSON array to a `banmap_t` object. + * @param[in] bans_json JSON to convert, must be as returned by `BanMapToJson()`. + * @param[out] bans Bans list to create from the JSON. + * @throws std::runtime_error if the JSON does not have the expected fields or they contain + * unparsable values. + */ +void BanMapFromJson(const UniValue& bans_json, banmap_t& bans) +{ + for (const auto& ban_entry_json : bans_json.getValues()) { + CSubNet subnet; + const auto& subnet_str = ban_entry_json[BANMAN_JSON_ADDR_KEY].get_str(); + if (!LookupSubNet(subnet_str, subnet)) { + throw std::runtime_error( + strprintf("Cannot parse banned address or subnet: %s", subnet_str)); + } + bans.insert_or_assign(subnet, CBanEntry{ban_entry_json}); + } +} + template <typename Stream, typename Data> bool SerializeDB(Stream& stream, const Data& data) { // Write and commit header, data try { - CHashWriter hasher(SER_DISK, CLIENT_VERSION); + CHashWriter hasher(stream.GetType(), stream.GetVersion()); stream << Params().MessageStart() << data; hasher << Params().MessageStart() << data; stream << hasher.GetHash(); @@ -35,7 +94,7 @@ bool SerializeDB(Stream& stream, const Data& data) } template <typename Data> -bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data& data) +bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data& data, int version) { // Generate random temporary filename uint16_t randv = 0; @@ -45,7 +104,7 @@ bool SerializeFileDB(const std::string& prefix, const fs::path& path, const Data // open temp output file, and associate with CAutoFile fs::path pathTmp = gArgs.GetDataDirNet() / tmpfn; FILE *file = fsbridge::fopen(pathTmp, "wb"); - CAutoFile fileout(file, SER_DISK, CLIENT_VERSION); + CAutoFile fileout(file, SER_DISK, version); if (fileout.IsNull()) { fileout.fclose(); remove(pathTmp); @@ -106,11 +165,11 @@ bool DeserializeDB(Stream& stream, Data& data, bool fCheckSum = true) } template <typename Data> -bool DeserializeFileDB(const fs::path& path, Data& data) +bool DeserializeFileDB(const fs::path& path, Data& data, int version) { // open input file, and associate with CAutoFile FILE* file = fsbridge::fopen(path, "rb"); - CAutoFile filein(file, SER_DISK, CLIENT_VERSION); + CAutoFile filein(file, SER_DISK, version); if (filein.IsNull()) { LogPrintf("Missing or invalid file %s\n", path.string()); return false; @@ -119,18 +178,54 @@ bool DeserializeFileDB(const fs::path& path, Data& data) } } // namespace -CBanDB::CBanDB(fs::path ban_list_path) : m_ban_list_path(std::move(ban_list_path)) +CBanDB::CBanDB(fs::path ban_list_path) + : m_banlist_dat(ban_list_path.string() + ".dat"), + m_banlist_json(ban_list_path.string() + ".json") { } bool CBanDB::Write(const banmap_t& banSet) { - return SerializeFileDB("banlist", m_ban_list_path, banSet); + std::vector<std::string> errors; + if (util::WriteSettings(m_banlist_json, {{JSON_KEY, BanMapToJson(banSet)}}, errors)) { + return true; + } + + for (const auto& err : errors) { + error("%s", err); + } + return false; } -bool CBanDB::Read(banmap_t& banSet) +bool CBanDB::Read(banmap_t& banSet, bool& dirty) { - return DeserializeFileDB(m_ban_list_path, banSet); + // If the JSON banlist does not exist, then try to read the non-upgraded banlist.dat. + if (!fs::exists(m_banlist_json)) { + // If this succeeds then we need to flush to disk in order to create the JSON banlist. + dirty = true; + return DeserializeFileDB(m_banlist_dat, banSet, CLIENT_VERSION); + } + + dirty = false; + + std::map<std::string, util::SettingsValue> settings; + std::vector<std::string> errors; + + if (!util::ReadSettings(m_banlist_json, settings, errors)) { + for (const auto& err : errors) { + LogPrintf("Cannot load banlist %s: %s\n", m_banlist_json.string(), err); + } + return false; + } + + try { + BanMapFromJson(settings[JSON_KEY], banSet); + } catch (const std::runtime_error& e) { + LogPrintf("Cannot parse banlist %s: %s\n", m_banlist_json.string(), e.what()); + return false; + } + + return true; } CAddrDB::CAddrDB() @@ -140,12 +235,12 @@ CAddrDB::CAddrDB() bool CAddrDB::Write(const CAddrMan& addr) { - return SerializeFileDB("peers", pathAddr, addr); + return SerializeFileDB("peers", pathAddr, addr, CLIENT_VERSION); } bool CAddrDB::Read(CAddrMan& addr) { - return DeserializeFileDB(pathAddr, addr); + return DeserializeFileDB(pathAddr, addr, CLIENT_VERSION); } bool CAddrDB::Read(CAddrMan& addr, CDataStream& ssPeers) @@ -161,13 +256,13 @@ bool CAddrDB::Read(CAddrMan& addr, CDataStream& ssPeers) void DumpAnchors(const fs::path& anchors_db_path, const std::vector<CAddress>& anchors) { LOG_TIME_SECONDS(strprintf("Flush %d outbound block-relay-only peer addresses to anchors.dat", anchors.size())); - SerializeFileDB("anchors", anchors_db_path, anchors); + SerializeFileDB("anchors", anchors_db_path, anchors, CLIENT_VERSION | ADDRV2_FORMAT); } std::vector<CAddress> ReadAnchors(const fs::path& anchors_db_path) { std::vector<CAddress> anchors; - if (DeserializeFileDB(anchors_db_path, anchors)) { + if (DeserializeFileDB(anchors_db_path, anchors, CLIENT_VERSION | ADDRV2_FORMAT)) { LogPrintf("Loaded %i addresses from %s\n", anchors.size(), anchors_db_path.filename()); } else { anchors.clear(); diff --git a/src/addrdb.h b/src/addrdb.h index 8953ebb169..399103c991 100644 --- a/src/addrdb.h +++ b/src/addrdb.h @@ -9,6 +9,7 @@ #include <fs.h> #include <net_types.h> // For banmap_t #include <serialize.h> +#include <univalue.h> #include <string> #include <vector> @@ -36,6 +37,13 @@ public: nCreateTime = nCreateTimeIn; } + /** + * Create a ban entry from JSON. + * @param[in] json A JSON representation of a ban entry, as created by `ToJson()`. + * @throw std::runtime_error if the JSON does not have the expected fields. + */ + explicit CBanEntry(const UniValue& json); + SERIALIZE_METHODS(CBanEntry, obj) { uint8_t ban_reason = 2; //! For backward compatibility @@ -48,6 +56,12 @@ public: nCreateTime = 0; nBanUntil = 0; } + + /** + * Generate a JSON representation of this ban entry. + * @return JSON suitable for passing to the `CBanEntry(const UniValue&)` constructor. + */ + UniValue ToJson() const; }; /** Access to the (IP) address database (peers.dat) */ @@ -62,15 +76,30 @@ public: static bool Read(CAddrMan& addr, CDataStream& ssPeers); }; -/** Access to the banlist database (banlist.dat) */ +/** Access to the banlist databases (banlist.json and banlist.dat) */ class CBanDB { private: - const fs::path m_ban_list_path; + /** + * JSON key under which the data is stored in the json database. + */ + static constexpr const char* JSON_KEY = "banned_nets"; + + const fs::path m_banlist_dat; + const fs::path m_banlist_json; public: explicit CBanDB(fs::path ban_list_path); bool Write(const banmap_t& banSet); - bool Read(banmap_t& banSet); + + /** + * Read the banlist from disk. + * @param[out] banSet The loaded list. Set if `true` is returned, otherwise it is left + * in an undefined state. + * @param[out] dirty Indicates whether the loaded list needs flushing to disk. Set if + * `true` is returned, otherwise it is left in an undefined state. + * @return true on success + */ + bool Read(banmap_t& banSet, bool& dirty); }; /** diff --git a/src/addrman.cpp b/src/addrman.cpp index 14b412a038..8f702b5a8c 100644 --- a/src/addrman.cpp +++ b/src/addrman.cpp @@ -12,6 +12,8 @@ #include <cmath> #include <optional> +#include <unordered_map> +#include <unordered_set> int CAddrInfo::GetTriedBucket(const uint256& nKey, const std::vector<bool> &asmap) const { @@ -77,12 +79,14 @@ double CAddrInfo::GetChance(int64_t nNow) const CAddrInfo* CAddrMan::Find(const CNetAddr& addr, int* pnId) { - std::map<CNetAddr, int>::iterator it = mapAddr.find(addr); + AssertLockHeld(cs); + + const auto it = mapAddr.find(addr); if (it == mapAddr.end()) return nullptr; if (pnId) *pnId = (*it).second; - std::map<int, CAddrInfo>::iterator it2 = mapInfo.find((*it).second); + const auto it2 = mapInfo.find((*it).second); if (it2 != mapInfo.end()) return &(*it2).second; return nullptr; @@ -90,6 +94,8 @@ CAddrInfo* CAddrMan::Find(const CNetAddr& addr, int* pnId) CAddrInfo* CAddrMan::Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId) { + AssertLockHeld(cs); + int nId = nIdCount++; mapInfo[nId] = CAddrInfo(addr, addrSource); mapAddr[addr] = nId; @@ -102,6 +108,8 @@ CAddrInfo* CAddrMan::Create(const CAddress& addr, const CNetAddr& addrSource, in void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) { + AssertLockHeld(cs); + if (nRndPos1 == nRndPos2) return; @@ -122,6 +130,8 @@ void CAddrMan::SwapRandom(unsigned int nRndPos1, unsigned int nRndPos2) void CAddrMan::Delete(int nId) { + AssertLockHeld(cs); + assert(mapInfo.count(nId) != 0); CAddrInfo& info = mapInfo[nId]; assert(!info.fInTried); @@ -136,6 +146,8 @@ void CAddrMan::Delete(int nId) void CAddrMan::ClearNew(int nUBucket, int nUBucketPos) { + AssertLockHeld(cs); + // if there is an entry in the specified bucket, delete it. if (vvNew[nUBucket][nUBucketPos] != -1) { int nIdDelete = vvNew[nUBucket][nUBucketPos]; @@ -151,6 +163,8 @@ void CAddrMan::ClearNew(int nUBucket, int nUBucketPos) void CAddrMan::MakeTried(CAddrInfo& info, int nId) { + AssertLockHeld(cs); + // remove the entry from all new buckets for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) { int pos = info.GetBucketPosition(nKey, true, bucket); @@ -199,6 +213,8 @@ void CAddrMan::MakeTried(CAddrInfo& info, int nId) void CAddrMan::Good_(const CService& addr, bool test_before_evict, int64_t nTime) { + AssertLockHeld(cs); + int nId; nLastGood = nTime; @@ -265,6 +281,8 @@ void CAddrMan::Good_(const CService& addr, bool test_before_evict, int64_t nTime bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimePenalty) { + AssertLockHeld(cs); + if (!addr.IsRoutable()) return false; @@ -338,6 +356,8 @@ bool CAddrMan::Add_(const CAddress& addr, const CNetAddr& source, int64_t nTimeP void CAddrMan::Attempt_(const CService& addr, bool fCountFailure, int64_t nTime) { + AssertLockHeld(cs); + CAddrInfo* pinfo = Find(addr); // if not found, bail out @@ -360,7 +380,9 @@ void CAddrMan::Attempt_(const CService& addr, bool fCountFailure, int64_t nTime) CAddrInfo CAddrMan::Select_(bool newOnly) { - if (size() == 0) + AssertLockHeld(cs); + + if (vRandom.empty()) return CAddrInfo(); if (newOnly && nNew == 0) @@ -408,8 +430,10 @@ CAddrInfo CAddrMan::Select_(bool newOnly) #ifdef DEBUG_ADDRMAN int CAddrMan::Check_() { - std::set<int> setTried; - std::map<int, int> mapNew; + AssertLockHeld(cs); + + std::unordered_set<int> setTried; + std::unordered_map<int, int> mapNew; if (vRandom.size() != (size_t)(nTried + nNew)) return -7; @@ -485,6 +509,8 @@ int CAddrMan::Check_() void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size_t max_pct, std::optional<Network> network) { + AssertLockHeld(cs); + size_t nNodes = vRandom.size(); if (max_pct != 0) { nNodes = max_pct * nNodes / 100; @@ -517,6 +543,8 @@ void CAddrMan::GetAddr_(std::vector<CAddress>& vAddr, size_t max_addresses, size void CAddrMan::Connected_(const CService& addr, int64_t nTime) { + AssertLockHeld(cs); + CAddrInfo* pinfo = Find(addr); // if not found, bail out @@ -537,6 +565,8 @@ void CAddrMan::Connected_(const CService& addr, int64_t nTime) void CAddrMan::SetServices_(const CService& addr, ServiceFlags nServices) { + AssertLockHeld(cs); + CAddrInfo* pinfo = Find(addr); // if not found, bail out @@ -555,6 +585,8 @@ void CAddrMan::SetServices_(const CService& addr, ServiceFlags nServices) void CAddrMan::ResolveCollisions_() { + AssertLockHeld(cs); + for (std::set<int>::iterator it = m_tried_collisions.begin(); it != m_tried_collisions.end();) { int id_new = *it; @@ -614,6 +646,8 @@ void CAddrMan::ResolveCollisions_() CAddrInfo CAddrMan::SelectTriedCollision_() { + AssertLockHeld(cs); + if (m_tried_collisions.size() == 0) return CAddrInfo(); std::set<int>::iterator it = m_tried_collisions.begin(); diff --git a/src/addrman.h b/src/addrman.h index 41994288db..665e253192 100644 --- a/src/addrman.h +++ b/src/addrman.h @@ -8,22 +8,22 @@ #include <clientversion.h> #include <config/bitcoin-config.h> +#include <fs.h> +#include <hash.h> #include <netaddress.h> #include <protocol.h> #include <random.h> +#include <streams.h> #include <sync.h> #include <timedata.h> #include <tinyformat.h> #include <util/system.h> -#include <fs.h> -#include <hash.h> #include <iostream> -#include <map> #include <optional> #include <set> #include <stdint.h> -#include <streams.h> +#include <unordered_map> #include <vector> /** @@ -231,6 +231,7 @@ public: */ template <typename Stream> void Serialize(Stream& s_) const + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); @@ -251,7 +252,7 @@ public: int nUBuckets = ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30); s << nUBuckets; - std::map<int, int> mapUnkIds; + std::unordered_map<int, int> mapUnkIds; int nIds = 0; for (const auto& entry : mapInfo) { mapUnkIds[entry.first] = nIds; @@ -296,10 +297,11 @@ public: template <typename Stream> void Unserialize(Stream& s_) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); - Clear(); + assert(vRandom.empty()); Format format; s_ >> Using<CustomUintFormatter<1>>(format); @@ -435,13 +437,13 @@ public: // Prune new entries with refcount 0 (as a result of collisions). int nLostUnk = 0; - for (std::map<int, CAddrInfo>::const_iterator it = mapInfo.begin(); it != mapInfo.end(); ) { + for (auto it = mapInfo.cbegin(); it != mapInfo.cend(); ) { if (it->second.fInTried == false && it->second.nRefCount == 0) { - std::map<int, CAddrInfo>::const_iterator itCopy = it++; + const auto itCopy = it++; Delete(itCopy->first); - nLostUnk++; + ++nLostUnk; } else { - it++; + ++it; } } if (nLost + nLostUnk > 0) { @@ -452,6 +454,7 @@ public: } void Clear() + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); std::vector<int>().swap(vRandom); @@ -487,26 +490,15 @@ public: //! Return the number of (unique) addresses in all tables. size_t size() const + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); // TODO: Cache this in an atomic to avoid this overhead return vRandom.size(); } - //! Consistency check - void Check() - { -#ifdef DEBUG_ADDRMAN - { - LOCK(cs); - int err; - if ((err=Check_())) - LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err); - } -#endif - } - //! Add a single address. bool Add(const CAddress &addr, const CNetAddr& source, int64_t nTimePenalty = 0) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); bool fRet = false; @@ -521,6 +513,7 @@ public: //! Add multiple addresses. bool Add(const std::vector<CAddress> &vAddr, const CNetAddr& source, int64_t nTimePenalty = 0) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); int nAdd = 0; @@ -536,6 +529,7 @@ public: //! Mark an entry as accessible. void Good(const CService &addr, bool test_before_evict = true, int64_t nTime = GetAdjustedTime()) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); Check(); @@ -545,6 +539,7 @@ public: //! Mark an entry as connection attempted to. void Attempt(const CService &addr, bool fCountFailure, int64_t nTime = GetAdjustedTime()) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); Check(); @@ -554,6 +549,7 @@ public: //! See if any to-be-evicted tried table entries have been tested and if so resolve the collisions. void ResolveCollisions() + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); Check(); @@ -563,14 +559,12 @@ public: //! Randomly select an address in tried that another address is attempting to evict. CAddrInfo SelectTriedCollision() + EXCLUSIVE_LOCKS_REQUIRED(!cs) { - CAddrInfo ret; - { - LOCK(cs); - Check(); - ret = SelectTriedCollision_(); - Check(); - } + LOCK(cs); + Check(); + const CAddrInfo ret = SelectTriedCollision_(); + Check(); return ret; } @@ -578,14 +572,12 @@ public: * Choose an address to connect to. */ CAddrInfo Select(bool newOnly = false) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { - CAddrInfo addrRet; - { - LOCK(cs); - Check(); - addrRet = Select_(newOnly); - Check(); - } + LOCK(cs); + Check(); + const CAddrInfo addrRet = Select_(newOnly); + Check(); return addrRet; } @@ -597,19 +589,19 @@ public: * @param[in] network Select only addresses of this network (nullopt = all). */ std::vector<CAddress> GetAddr(size_t max_addresses, size_t max_pct, std::optional<Network> network) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { + LOCK(cs); Check(); std::vector<CAddress> vAddr; - { - LOCK(cs); - GetAddr_(vAddr, max_addresses, max_pct, network); - } + GetAddr_(vAddr, max_addresses, max_pct, network); Check(); return vAddr; } //! Outer function for Connected_() void Connected(const CService &addr, int64_t nTime = GetAdjustedTime()) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); Check(); @@ -618,6 +610,7 @@ public: } void SetServices(const CService &addr, ServiceFlags nServices) + EXCLUSIVE_LOCKS_REQUIRED(!cs) { LOCK(cs); Check(); @@ -633,8 +626,8 @@ protected: FastRandomContext insecure_rand; private: - //! critical section to protect the inner data structures - mutable RecursiveMutex cs; + //! A mutex to protect the inner data structures. + mutable Mutex cs; //! Serialization versions. enum Format : uint8_t { @@ -662,10 +655,10 @@ private: int nIdCount GUARDED_BY(cs); //! table with information about all nIds - std::map<int, CAddrInfo> mapInfo GUARDED_BY(cs); + std::unordered_map<int, CAddrInfo> mapInfo GUARDED_BY(cs); //! find an nId based on its network address - std::map<CNetAddr, int> mapAddr GUARDED_BY(cs); + std::unordered_map<CNetAddr, int, CNetAddrHash> mapAddr GUARDED_BY(cs); //! randomly-ordered vector of all nIds std::vector<int> vRandom GUARDED_BY(cs); @@ -725,6 +718,19 @@ private: //! Return a random to-be-evicted tried table address. CAddrInfo SelectTriedCollision_() EXCLUSIVE_LOCKS_REQUIRED(cs); + //! Consistency check + void Check() + EXCLUSIVE_LOCKS_REQUIRED(cs) + { +#ifdef DEBUG_ADDRMAN + AssertLockHeld(cs); + const int err = Check_(); + if (err) { + LogPrintf("ADDRMAN CONSISTENCY CHECK FAILED!!! err=%i\n", err); + } +#endif + } + #ifdef DEBUG_ADDRMAN //! Perform consistency check. Returns an error code or zero. int Check_() EXCLUSIVE_LOCKS_REQUIRED(cs); diff --git a/src/banman.cpp b/src/banman.cpp index bb97fc4809..d2437e6733 100644 --- a/src/banman.cpp +++ b/src/banman.cpp @@ -18,20 +18,18 @@ BanMan::BanMan(fs::path ban_file, CClientUIInterface* client_interface, int64_t if (m_client_interface) m_client_interface->InitMessage(_("Loading banlist…").translated); int64_t n_start = GetTimeMillis(); - m_is_dirty = false; - banmap_t banmap; - if (m_ban_db.Read(banmap)) { - SetBanned(banmap); // thread save setter - SetBannedSetDirty(false); // no need to write down, just read data - SweepBanned(); // sweep out unused entries + if (m_ban_db.Read(m_banned, m_is_dirty)) { + SweepBanned(); // sweep out unused entries - LogPrint(BCLog::NET, "Loaded %d banned node ips/subnets from banlist.dat %dms\n", - m_banned.size(), GetTimeMillis() - n_start); + LogPrint(BCLog::NET, "Loaded %d banned node addresses/subnets %dms\n", m_banned.size(), + GetTimeMillis() - n_start); } else { - LogPrintf("Recreating banlist.dat\n"); - SetBannedSetDirty(true); // force write - DumpBanlist(); + LogPrintf("Recreating the banlist database\n"); + m_banned = {}; + m_is_dirty = true; } + + DumpBanlist(); } BanMan::~BanMan() @@ -53,8 +51,8 @@ void BanMan::DumpBanlist() SetBannedSetDirty(false); } - LogPrint(BCLog::NET, "Flushed %d banned node ips/subnets to banlist.dat %dms\n", - banmap.size(), GetTimeMillis() - n_start); + LogPrint(BCLog::NET, "Flushed %d banned node addresses/subnets to disk %dms\n", banmap.size(), + GetTimeMillis() - n_start); } void BanMan::ClearBanned() @@ -167,13 +165,6 @@ void BanMan::GetBanned(banmap_t& banmap) banmap = m_banned; //create a thread safe copy } -void BanMan::SetBanned(const banmap_t& banmap) -{ - LOCK(m_cs_banned); - m_banned = banmap; - m_is_dirty = true; -} - void BanMan::SweepBanned() { int64_t now = GetTime(); @@ -188,7 +179,7 @@ void BanMan::SweepBanned() m_banned.erase(it++); m_is_dirty = true; notify_ui = true; - LogPrint(BCLog::NET, "%s: Removed banned node ip/subnet from banlist.dat: %s\n", __func__, sub_net.ToString()); + LogPrint(BCLog::NET, "Removed banned node address/subnet: %s\n", sub_net.ToString()); } else ++it; } diff --git a/src/banman.h b/src/banman.h index f6bfbd1e49..8c75d4037e 100644 --- a/src/banman.h +++ b/src/banman.h @@ -17,7 +17,8 @@ // NOTE: When adjusting this, update rpcnet:setban's help ("24h") static constexpr unsigned int DEFAULT_MISBEHAVING_BANTIME = 60 * 60 * 24; // Default 24-hour ban -// How often to dump addresses to banlist.dat + +/// How often to dump banned addresses/subnets to disk. static constexpr std::chrono::minutes DUMP_BANS_INTERVAL{15}; class CClientUIInterface; @@ -30,7 +31,7 @@ class CSubNet; // If an address or subnet is banned, we never accept incoming connections from // it and never create outgoing connections to it. We won't gossip its address // to other peers in addr messages. Banned addresses and subnets are stored to -// banlist.dat on shutdown and reloaded on startup. Banning can be used to +// disk on shutdown and reloaded on startup. Banning can be used to // prevent connections with spy nodes or other griefers. // // 2. Discouragement. If a peer misbehaves enough (see Misbehaving() in @@ -79,7 +80,6 @@ public: void DumpBanlist(); private: - void SetBanned(const banmap_t& banmap); bool BannedSetIsDirty(); //!set the "dirty" flag for the banlist void SetBannedSetDirty(bool dirty = true); diff --git a/src/bench/duplicate_inputs.cpp b/src/bench/duplicate_inputs.cpp index 4f6e1122b8..8703a1cf94 100644 --- a/src/bench/duplicate_inputs.cpp +++ b/src/bench/duplicate_inputs.cpp @@ -25,7 +25,6 @@ static void DuplicateInputs(benchmark::Bench& bench) CMutableTransaction naughtyTx{}; LOCK(cs_main); - assert(std::addressof(::ChainActive()) == std::addressof(testing_setup->m_node.chainman->ActiveChain())); CBlockIndex* pindexPrev = testing_setup->m_node.chainman->ActiveChain().Tip(); assert(pindexPrev != nullptr); block.nBits = GetNextWorkRequired(pindexPrev, &block, chainparams.GetConsensus()); diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp index c5840b2098..7a5f945511 100644 --- a/src/bitcoin-cli.cpp +++ b/src/bitcoin-cli.cpp @@ -40,6 +40,7 @@ UrlDecodeFn* const URL_DECODE = urlDecode; static const char DEFAULT_RPCCONNECT[] = "127.0.0.1"; static const int DEFAULT_HTTP_CLIENT_TIMEOUT=900; +static constexpr int DEFAULT_WAIT_CLIENT_TIMEOUT = 0; static const bool DEFAULT_NAMED=false; static const int CONTINUE_EXECUTION=-1; static constexpr int8_t UNKNOWN_NETWORK{-1}; @@ -73,6 +74,7 @@ static void SetupCliArgs(ArgsManager& argsman) argsman.AddArg("-rpcport=<port>", strprintf("Connect to JSON-RPC on <port> (default: %u, testnet: %u, signet: %u, regtest: %u)", defaultBaseParams->RPCPort(), testnetBaseParams->RPCPort(), signetBaseParams->RPCPort(), regtestBaseParams->RPCPort()), ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS); argsman.AddArg("-rpcuser=<user>", "Username for JSON-RPC connections", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-rpcwait", "Wait for RPC server to start", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddArg("-rpcwaittimeout=<n>", strprintf("Timeout in seconds to wait for the RPC server to start, or 0 for no timeout. (default: %d)", DEFAULT_WAIT_CLIENT_TIMEOUT), ArgsManager::ALLOW_INT, OptionsCategory::OPTIONS); argsman.AddArg("-rpcwallet=<walletname>", "Send RPC for non-default wallet on RPC server (needs to exactly match corresponding -wallet option passed to bitcoind). This changes the RPC endpoint used, e.g. http://127.0.0.1:8332/wallet/<walletname>", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-stdin", "Read extra arguments from standard input, one per line until EOF/Ctrl-D (recommended for sensitive information such as passphrases). When combined with -stdinrpcpass, the first line from standard input is used for the RPC password.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-stdinrpcpass", "Read RPC password from standard input as a single line. When combined with -stdin, the first line from standard input is used for the RPC password. When combined with -stdinwalletpassphrase, -stdinrpcpass consumes the first line, and -stdinwalletpassphrase consumes the second.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); @@ -794,6 +796,9 @@ static UniValue ConnectAndCallRPC(BaseRequestHandler* rh, const std::string& str UniValue response(UniValue::VOBJ); // Execute and handle connection failures with -rpcwait. const bool fWait = gArgs.GetBoolArg("-rpcwait", false); + const int timeout = gArgs.GetArg("-rpcwaittimeout", DEFAULT_WAIT_CLIENT_TIMEOUT); + const auto deadline{GetTime<std::chrono::microseconds>() + 1s * timeout}; + do { try { response = CallRPC(rh, strMethod, args, rpcwallet); @@ -804,11 +809,12 @@ static UniValue ConnectAndCallRPC(BaseRequestHandler* rh, const std::string& str } } break; // Connection succeeded, no need to retry. - } catch (const CConnectionFailed&) { - if (fWait) { - UninterruptibleSleep(std::chrono::milliseconds{1000}); + } catch (const CConnectionFailed& e) { + const auto now{GetTime<std::chrono::microseconds>()}; + if (fWait && (timeout <= 0 || now < deadline)) { + UninterruptibleSleep(1s); } else { - throw; + throw CConnectionFailed(strprintf("timeout on transient error: %s", e.what())); } } } while (fWait); diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp index 93ac4b8f7e..3fc87ae1ff 100644 --- a/src/bitcoin-tx.cpp +++ b/src/bitcoin-tx.cpp @@ -506,11 +506,12 @@ static void MutateTxDelOutput(CMutableTransaction& tx, const std::string& strOut tx.vout.erase(tx.vout.begin() + outIdx); } -static const unsigned int N_SIGHASH_OPTS = 6; +static const unsigned int N_SIGHASH_OPTS = 7; static const struct { const char *flagStr; int flags; } sighashOptions[N_SIGHASH_OPTS] = { + {"DEFAULT", SIGHASH_DEFAULT}, {"ALL", SIGHASH_ALL}, {"NONE", SIGHASH_NONE}, {"SINGLE", SIGHASH_SINGLE}, diff --git a/src/bitcoin-util.cpp b/src/bitcoin-util.cpp index af07b28d3d..f534aecc19 100644 --- a/src/bitcoin-util.cpp +++ b/src/bitcoin-util.cpp @@ -7,28 +7,19 @@ #endif #include <arith_uint256.h> +#include <chain.h> +#include <chainparams.h> +#include <chainparamsbase.h> #include <clientversion.h> -#include <coins.h> -#include <consensus/consensus.h> #include <core_io.h> -#include <key_io.h> -#include <policy/rbf.h> -#include <primitives/transaction.h> -#include <script/script.h> -#include <script/sign.h> -#include <script/signingprovider.h> -#include <univalue.h> -#include <util/moneystr.h> -#include <util/rbf.h> -#include <util/strencodings.h> -#include <util/string.h> +#include <streams.h> #include <util/system.h> #include <util/translation.h> #include <atomic> +#include <cstdio> #include <functional> #include <memory> -#include <stdio.h> #include <thread> #include <boost/algorithm/string.hpp> @@ -43,35 +34,29 @@ static void SetupBitcoinUtilArgs(ArgsManager &argsman) argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); + argsman.AddCommand("grind", "Perform proof of work on hex header string"); + SetupChainParamsBaseOptions(argsman); } // This function returns either one of EXIT_ codes when it's expected to stop the process or // CONTINUE_EXECUTION when it's expected to continue further. -static int AppInitUtil(int argc, char* argv[]) +static int AppInitUtil(ArgsManager& args, int argc, char* argv[]) { - SetupBitcoinUtilArgs(gArgs); + SetupBitcoinUtilArgs(args); std::string error; - if (!gArgs.ParseParameters(argc, argv, error)) { + if (!args.ParseParameters(argc, argv, error)) { tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error); return EXIT_FAILURE; } - // Check for chain settings (Params() calls are only valid after this clause) - try { - SelectParams(gArgs.GetChainName()); - } catch (const std::exception& e) { - tfm::format(std::cerr, "Error: %s\n", e.what()); - return EXIT_FAILURE; - } - - if (argc < 2 || HelpRequested(gArgs) || gArgs.IsArgSet("-version")) { + if (HelpRequested(args) || args.IsArgSet("-version")) { // First part of help message is specific to this utility std::string strUsage = PACKAGE_NAME " bitcoin-util utility version " + FormatFullVersion() + "\n"; - if (!gArgs.IsArgSet("-version")) { + if (!args.IsArgSet("-version")) { strUsage += "\n" "Usage: bitcoin-util [options] [commands] Do stuff\n"; - strUsage += "\n" + gArgs.GetHelpMessage(); + strUsage += "\n" + args.GetHelpMessage(); } tfm::format(std::cout, "%s", strUsage); @@ -82,6 +67,15 @@ static int AppInitUtil(int argc, char* argv[]) } return EXIT_SUCCESS; } + + // Check for chain settings (Params() calls are only valid after this clause) + try { + SelectParams(args.GetChainName()); + } catch (const std::exception& e) { + tfm::format(std::cerr, "Error: %s\n", e.what()); + return EXIT_FAILURE; + } + return CONTINUE_EXECUTION; } @@ -111,17 +105,17 @@ static void grind_task(uint32_t nBits, CBlockHeader& header_orig, uint32_t offse } } -static int Grind(int argc, char* argv[], std::string& strPrint) +static int Grind(const std::vector<std::string>& args, std::string& strPrint) { - if (argc != 1) { + if (args.size() != 1) { strPrint = "Must specify block header to grind"; - return 1; + return EXIT_FAILURE; } CBlockHeader header; - if (!DecodeHexBlockHeader(header, argv[0])) { + if (!DecodeHexBlockHeader(header, args[0])) { strPrint = "Could not decode block header"; - return 1; + return EXIT_FAILURE; } uint32_t nBits = header.nBits; @@ -137,49 +131,13 @@ static int Grind(int argc, char* argv[], std::string& strPrint) } if (!found) { strPrint = "Could not satisfy difficulty target"; - return 1; + return EXIT_FAILURE; } CDataStream ss(SER_NETWORK, PROTOCOL_VERSION); ss << header; strPrint = HexStr(ss); - return 0; -} - -static int CommandLineUtil(int argc, char* argv[]) -{ - if (argc <= 1) return 1; - - std::string strPrint; - int nRet = 0; - - try { - while (argc > 1 && IsSwitchChar(argv[1][0]) && (argv[1][1] != 0)) { - --argc; - ++argv; - } - - char* command = argv[1]; - if (strcmp(command, "grind") == 0) { - nRet = Grind(argc-2, argv+2, strPrint); - } else { - strPrint = strprintf("Unknown command %s", command); - nRet = 1; - } - } - catch (const std::exception& e) { - strPrint = std::string("error: ") + e.what(); - nRet = EXIT_FAILURE; - } - catch (...) { - PrintExceptionContinue(nullptr, "CommandLineUtil()"); - throw; - } - - if (strPrint != "") { - tfm::format(nRet == 0 ? std::cout : std::cerr, "%s\n", strPrint); - } - return nRet; + return EXIT_SUCCESS; } #ifdef WIN32 @@ -193,14 +151,15 @@ __declspec(dllexport) int main(int argc, char* argv[]) int main(int argc, char* argv[]) #endif { + ArgsManager& args = gArgs; SetupEnvironment(); try { - int ret = AppInitUtil(argc, argv); - if (ret != CONTINUE_EXECUTION) + int ret = AppInitUtil(args, argc, argv); + if (ret != CONTINUE_EXECUTION) { return ret; - } - catch (const std::exception& e) { + } + } catch (const std::exception& e) { PrintExceptionContinue(&e, "AppInitUtil()"); return EXIT_FAILURE; } catch (...) { @@ -208,14 +167,29 @@ int main(int argc, char* argv[]) return EXIT_FAILURE; } + const auto cmd = args.GetCommand(); + if (!cmd) { + tfm::format(std::cerr, "Error: must specify a command\n"); + return EXIT_FAILURE; + } + int ret = EXIT_FAILURE; + std::string strPrint; try { - ret = CommandLineUtil(argc, argv); - } - catch (const std::exception& e) { - PrintExceptionContinue(&e, "CommandLineUtil()"); + if (cmd->command == "grind") { + ret = Grind(cmd->args, strPrint); + } else { + assert(false); // unknown command should be caught earlier + } + } catch (const std::exception& e) { + strPrint = std::string("error: ") + e.what(); } catch (...) { - PrintExceptionContinue(nullptr, "CommandLineUtil()"); + strPrint = "unknown error"; + } + + if (strPrint != "") { + tfm::format(ret == 0 ? std::cout : std::cerr, "%s\n", strPrint); } + return ret; } diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp index b84d909b07..765954c92e 100644 --- a/src/bitcoin-wallet.cpp +++ b/src/bitcoin-wallet.cpp @@ -33,11 +33,11 @@ static void SetupWalletToolArgs(ArgsManager& argsman) argsman.AddArg("-format=<format>", "The format of the wallet file to create. Either \"bdb\" or \"sqlite\". Only used with 'createfromdump'", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS); argsman.AddArg("-printtoconsole", "Send trace/debug info to console (default: 1 when no -debug is true, 0 otherwise).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); - argsman.AddCommand("info", "Get wallet info", OptionsCategory::COMMANDS); - argsman.AddCommand("create", "Create new wallet file", OptionsCategory::COMMANDS); - argsman.AddCommand("salvage", "Attempt to recover private keys from a corrupt wallet. Warning: 'salvage' is experimental.", OptionsCategory::COMMANDS); - argsman.AddCommand("dump", "Print out all of the wallet key-value records", OptionsCategory::COMMANDS); - argsman.AddCommand("createfromdump", "Create new wallet file from dumped records", OptionsCategory::COMMANDS); + argsman.AddCommand("info", "Get wallet info"); + argsman.AddCommand("create", "Create new wallet file"); + argsman.AddCommand("salvage", "Attempt to recover private keys from a corrupt wallet. Warning: 'salvage' is experimental."); + argsman.AddCommand("dump", "Print out all of the wallet key-value records"); + argsman.AddCommand("createfromdump", "Create new wallet file from dumped records"); } static bool WalletAppInit(ArgsManager& args, int argc, char* argv[]) diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp index cf9e4fad44..654679af27 100644 --- a/src/bitcoind.cpp +++ b/src/bitcoind.cpp @@ -112,8 +112,8 @@ static bool AppInit(NodeContext& node, int argc, char* argv[]) util::ThreadSetInternalName("init"); // If Qt is used, parameters/bitcoin.conf are parsed in qt/bitcoin.cpp's main() - SetupServerArgs(node); ArgsManager& args = *Assert(node.args); + SetupServerArgs(args); std::string error; if (!args.ParseParameters(argc, argv, error)) { return InitError(Untranslated(strprintf("Error parsing command line arguments: %s\n", error))); diff --git a/src/coins.cpp b/src/coins.cpp index d52851cadd..ce0b131de6 100644 --- a/src/coins.cpp +++ b/src/coins.cpp @@ -13,7 +13,7 @@ bool CCoinsView::GetCoin(const COutPoint &outpoint, Coin &coin) const { return f uint256 CCoinsView::GetBestBlock() const { return uint256(); } std::vector<uint256> CCoinsView::GetHeadBlocks() const { return std::vector<uint256>(); } bool CCoinsView::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return false; } -CCoinsViewCursor *CCoinsView::Cursor() const { return nullptr; } +std::unique_ptr<CCoinsViewCursor> CCoinsView::Cursor() const { return nullptr; } bool CCoinsView::HaveCoin(const COutPoint &outpoint) const { @@ -28,7 +28,7 @@ uint256 CCoinsViewBacked::GetBestBlock() const { return base->GetBestBlock(); } std::vector<uint256> CCoinsViewBacked::GetHeadBlocks() const { return base->GetHeadBlocks(); } void CCoinsViewBacked::SetBackend(CCoinsView &viewIn) { base = &viewIn; } bool CCoinsViewBacked::BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) { return base->BatchWrite(mapCoins, hashBlock); } -CCoinsViewCursor *CCoinsViewBacked::Cursor() const { return base->Cursor(); } +std::unique_ptr<CCoinsViewCursor> CCoinsViewBacked::Cursor() const { return base->Cursor(); } size_t CCoinsViewBacked::EstimateSize() const { return base->EstimateSize(); } CCoinsViewCache::CCoinsViewCache(CCoinsView *baseIn) : CCoinsViewBacked(baseIn), cachedCoinsUsage(0) {} diff --git a/src/coins.h b/src/coins.h index 816b4864a3..3151a260d9 100644 --- a/src/coins.h +++ b/src/coins.h @@ -180,7 +180,7 @@ public: virtual bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock); //! Get a cursor to iterate over the whole state - virtual CCoinsViewCursor *Cursor() const; + virtual std::unique_ptr<CCoinsViewCursor> Cursor() const; //! As we use CCoinsViews polymorphically, have a virtual destructor virtual ~CCoinsView() {} @@ -204,7 +204,7 @@ public: std::vector<uint256> GetHeadBlocks() const override; void SetBackend(CCoinsView &viewIn); bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; - CCoinsViewCursor *Cursor() const override; + std::unique_ptr<CCoinsViewCursor> Cursor() const override; size_t EstimateSize() const override; }; @@ -237,7 +237,7 @@ public: uint256 GetBestBlock() const override; void SetBestBlock(const uint256 &hashBlock); bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; - CCoinsViewCursor* Cursor() const override { + std::unique_ptr<CCoinsViewCursor> Cursor() const override { throw std::logic_error("CCoinsViewCache cursor iteration not supported."); } diff --git a/src/core_read.cpp b/src/core_read.cpp index b5fc93886d..6108961010 100644 --- a/src/core_read.cpp +++ b/src/core_read.cpp @@ -260,6 +260,7 @@ int ParseSighashString(const UniValue& sighash) int hash_type = SIGHASH_ALL; if (!sighash.isNull()) { static std::map<std::string, int> map_sighash_values = { + {std::string("DEFAULT"), int(SIGHASH_DEFAULT)}, {std::string("ALL"), int(SIGHASH_ALL)}, {std::string("ALL|ANYONECANPAY"), int(SIGHASH_ALL|SIGHASH_ANYONECANPAY)}, {std::string("NONE"), int(SIGHASH_NONE)}, diff --git a/src/external_signer.cpp b/src/external_signer.cpp index f16d21fa60..d6388b759a 100644 --- a/src/external_signer.cpp +++ b/src/external_signer.cpp @@ -13,9 +13,7 @@ #include <string> #include <vector> -#ifdef ENABLE_EXTERNAL_SIGNER - -ExternalSigner::ExternalSigner(const std::string& command, const std::string& fingerprint, const std::string chain, const std::string name): m_command(command), m_fingerprint(fingerprint), m_chain(chain), m_name(name) {} +ExternalSigner::ExternalSigner(const std::string& command, const std::string chain, const std::string& fingerprint, const std::string name): m_command(command), m_chain(chain), m_fingerprint(fingerprint), m_name(name) {} const std::string ExternalSigner::NetworkArg() const { @@ -55,7 +53,7 @@ bool ExternalSigner::Enumerate(const std::string& command, std::vector<ExternalS if (model_field.isStr() && model_field.getValStr() != "") { name += model_field.getValStr(); } - signers.push_back(ExternalSigner(command, fingerprintStr, chain, name)); + signers.push_back(ExternalSigner(command, chain, fingerprintStr, name)); } return true; } @@ -116,5 +114,3 @@ bool ExternalSigner::SignTransaction(PartiallySignedTransaction& psbtx, std::str return true; } - -#endif // ENABLE_EXTERNAL_SIGNER diff --git a/src/external_signer.h b/src/external_signer.h index b3b202091a..e40fd7f010 100644 --- a/src/external_signer.h +++ b/src/external_signer.h @@ -11,8 +11,6 @@ #include <string> #include <vector> -#ifdef ENABLE_EXTERNAL_SIGNER - struct PartiallySignedTransaction; //! Enables interaction with an external signing device or service, such as @@ -23,24 +21,24 @@ private: //! The command which handles interaction with the external signer. std::string m_command; + //! Bitcoin mainnet, testnet, etc + std::string m_chain; + + const std::string NetworkArg() const; + public: //! @param[in] command the command which handles interaction with the external signer //! @param[in] fingerprint master key fingerprint of the signer //! @param[in] chain "main", "test", "regtest" or "signet" //! @param[in] name device name - ExternalSigner(const std::string& command, const std::string& fingerprint, const std::string chain, const std::string name); + ExternalSigner(const std::string& command, const std::string chain, const std::string& fingerprint, const std::string name); //! Master key fingerprint of the signer std::string m_fingerprint; - //! Bitcoin mainnet, testnet, etc - std::string m_chain; - //! Name of signer std::string m_name; - const std::string NetworkArg() const; - //! Obtain a list of signers. Calls `<command> enumerate`. //! @param[in] command the command which handles interaction with the external signer //! @param[in,out] signers vector to which new signers (with a unique master key fingerprint) are added @@ -65,6 +63,4 @@ public: bool SignTransaction(PartiallySignedTransaction& psbt, std::string& error); }; -#endif // ENABLE_EXTERNAL_SIGNER - #endif // BITCOIN_EXTERNAL_SIGNER_H diff --git a/src/httpserver.cpp b/src/httpserver.cpp index 45c049c3be..b3984a43bb 100644 --- a/src/httpserver.cpp +++ b/src/httpserver.cpp @@ -83,7 +83,7 @@ public: bool Enqueue(WorkItem* item) { LOCK(cs); - if (queue.size() >= maxDepth) { + if (!running || queue.size() >= maxDepth) { return false; } queue.emplace_back(std::unique_ptr<WorkItem>(item)); @@ -99,7 +99,7 @@ public: WAIT_LOCK(cs, lock); while (running && queue.empty()) cond.wait(lock); - if (!running) + if (!running && queue.empty()) break; i = std::move(queue.front()); queue.pop_front(); @@ -448,8 +448,6 @@ void StopHTTPServer() thread.join(); } g_thread_http_workers.clear(); - delete workQueue; - workQueue = nullptr; } // Unlisten sockets, these are what make the event loop running, which means // that after this and all connections are closed the event loop will quit. @@ -469,6 +467,10 @@ void StopHTTPServer() event_base_free(eventBase); eventBase = nullptr; } + if (workQueue) { + delete workQueue; + workQueue = nullptr; + } LogPrint(BCLog::HTTP, "Stopped HTTP server\n"); } diff --git a/src/index/base.cpp b/src/index/base.cpp index 3a61af28b7..6fd2701e2e 100644 --- a/src/index/base.cpp +++ b/src/index/base.cpp @@ -340,7 +340,6 @@ void BaseIndex::Interrupt() bool BaseIndex::Start(CChainState& active_chainstate) { - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); m_chainstate = &active_chainstate; // Need to register this ValidationInterface before running Init(), so that // callbacks are not missed if Init sets m_synced to true. diff --git a/src/init.cpp b/src/init.cpp index 7f64b1acfa..da0447ca79 100644 --- a/src/init.cpp +++ b/src/init.cpp @@ -283,7 +283,7 @@ void Shutdown(NodeContext& node) init::UnsetGlobals(); node.mempool.reset(); node.fee_estimator.reset(); - node.chainman = nullptr; + node.chainman.reset(); node.scheduler.reset(); try { @@ -347,12 +347,8 @@ static void OnRPCStopped() LogPrint(BCLog::RPC, "RPC stopped.\n"); } -void SetupServerArgs(NodeContext& node) +void SetupServerArgs(ArgsManager& argsman) { - assert(!node.args); - node.args = &gArgs; - ArgsManager& argsman = *node.args; - SetupHelpOptions(argsman); argsman.AddArg("-help-debug", "Print help message with debugging options and exit", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST); // server-only for now @@ -1165,7 +1161,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) assert(!node.addrman); node.addrman = std::make_unique<CAddrMan>(); assert(!node.banman); - node.banman = std::make_unique<BanMan>(gArgs.GetDataDirNet() / "banlist.dat", &uiInterface, args.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME)); + node.banman = std::make_unique<BanMan>(gArgs.GetDataDirNet() / "banlist", &uiInterface, args.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME)); assert(!node.connman); node.connman = std::make_unique<CConnman>(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()), *node.addrman, args.GetBoolArg("-networkactive", true)); @@ -1179,8 +1175,8 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) node.mempool = std::make_unique<CTxMemPool>(node.fee_estimator.get(), check_ratio); assert(!node.chainman); - node.chainman = &g_chainman; - ChainstateManager& chainman = *Assert(node.chainman); + node.chainman = std::make_unique<ChainstateManager>(); + ChainstateManager& chainman = *node.chainman; assert(!node.peerman); node.peerman = PeerManager::make(chainparams, *node.connman, *node.addrman, node.banman.get(), @@ -1385,7 +1381,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // If the loaded chain has a wrong genesis, bail out immediately // (we're likely using a testnet datadir, or the other way around). if (!chainman.BlockIndex().empty() && - !g_chainman.m_blockman.LookupBlockIndex(chainparams.GetConsensus().hashGenesisBlock)) { + !chainman.m_blockman.LookupBlockIndex(chainparams.GetConsensus().hashGenesisBlock)) { return InitError(_("Incorrect or no genesis block found. Wrong datadir for network?")); } @@ -1400,7 +1396,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // If we're not mid-reindex (based on disk + args), add a genesis block on disk // (otherwise we use the one already on disk). // This is called again in ThreadImport after the reindex completes. - if (!fReindex && !::ChainstateActive().LoadGenesisBlock(chainparams)) { + if (!fReindex && !chainman.ActiveChainstate().LoadGenesisBlock(chainparams)) { strLoadError = _("Error initializing block database"); break; } @@ -1549,21 +1545,21 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // ********************************************************* Step 8: start indexers if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) { g_txindex = std::make_unique<TxIndex>(nTxIndexCache, false, fReindex); - if (!g_txindex->Start(::ChainstateActive())) { + if (!g_txindex->Start(chainman.ActiveChainstate())) { return false; } } for (const auto& filter_type : g_enabled_filter_types) { InitBlockFilterIndex(filter_type, filter_index_cache, false, fReindex); - if (!GetBlockFilterIndex(filter_type)->Start(::ChainstateActive())) { + if (!GetBlockFilterIndex(filter_type)->Start(chainman.ActiveChainstate())) { return false; } } if (args.GetBoolArg("-coinstatsindex", DEFAULT_COINSTATSINDEX)) { g_coin_stats_index = std::make_unique<CoinStatsIndex>(/* cache size */ 0, false, fReindex); - if (!g_coin_stats_index->Start(::ChainstateActive())) { + if (!g_coin_stats_index->Start(chainman.ActiveChainstate())) { return false; } } @@ -1611,7 +1607,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info) // Either install a handler to notify us when genesis activates, or set fHaveGenesis directly. // No locking, as this happens before any background thread is started. boost::signals2::connection block_notify_genesis_wait_connection; - if (::ChainActive().Tip() == nullptr) { + if (chainman.ActiveChain().Tip() == nullptr) { block_notify_genesis_wait_connection = uiInterface.NotifyBlockTip_connect(std::bind(BlockNotifyGenesisWait, std::placeholders::_2)); } else { fHaveGenesis = true; diff --git a/src/init.h b/src/init.h index 328eda9c7e..b856468e5d 100644 --- a/src/init.h +++ b/src/init.h @@ -69,7 +69,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info /** * Register all arguments with the ArgsManager */ -void SetupServerArgs(NodeContext& node); +void SetupServerArgs(ArgsManager& argsman); /** Returns licensing information (for -version) */ std::string LicenseInfo(); diff --git a/src/init/bitcoin-node.cpp b/src/init/bitcoin-node.cpp index 49684ede83..6b6157c139 100644 --- a/src/init/bitcoin-node.cpp +++ b/src/init/bitcoin-node.cpp @@ -6,6 +6,7 @@ #include <interfaces/init.h> #include <interfaces/ipc.h> #include <node/context.h> +#include <util/system.h> #include <memory> @@ -20,6 +21,7 @@ public: : m_node(node), m_ipc(interfaces::MakeIpc(EXE_NAME, arg0, *this)) { + m_node.args = &gArgs; m_node.init = this; } std::unique_ptr<interfaces::Echo> makeEcho() override { return interfaces::MakeEcho(); } diff --git a/src/init/bitcoind.cpp b/src/init/bitcoind.cpp index 1e17ce4d3c..1d4504c24f 100644 --- a/src/init/bitcoind.cpp +++ b/src/init/bitcoind.cpp @@ -4,6 +4,7 @@ #include <interfaces/init.h> #include <node/context.h> +#include <util/system.h> #include <memory> @@ -14,6 +15,7 @@ class BitcoindInit : public interfaces::Init public: BitcoindInit(NodeContext& node) : m_node(node) { + m_node.args = &gArgs; m_node.init = this; } NodeContext& m_node; diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h index 3395741b1b..7cac435e96 100644 --- a/src/interfaces/chain.h +++ b/src/interfaces/chain.h @@ -277,6 +277,9 @@ public: //! to be prepared to handle this by ignoring notifications about unknown //! removed transactions and already added new transactions. virtual void requestMempoolTransactions(Notifications& notifications) = 0; + + //! Check if Taproot has activated + virtual bool isTaprootActive() const = 0; }; //! Interface to let node manage chain clients (wallets, or maybe tools for diff --git a/src/interfaces/node.h b/src/interfaces/node.h index 35b6160cea..77129423db 100644 --- a/src/interfaces/node.h +++ b/src/interfaces/node.h @@ -111,10 +111,8 @@ public: //! Disconnect node by id. virtual bool disconnectById(NodeId id) = 0; -#ifdef ENABLE_EXTERNAL_SIGNER //! List external signers virtual std::vector<ExternalSigner> externalSigners() = 0; -#endif //! Get total bytes recv. virtual int64_t getTotalBytesRecv() = 0; diff --git a/src/interfaces/wallet.h b/src/interfaces/wallet.h index a0cb2787b7..fb1febc11b 100644 --- a/src/interfaces/wallet.h +++ b/src/interfaces/wallet.h @@ -198,9 +198,9 @@ public: virtual TransactionError fillPSBT(int sighash_type, bool sign, bool bip32derivs, + size_t* n_signed, PartiallySignedTransaction& psbtx, - bool& complete, - size_t* n_signed) = 0; + bool& complete) = 0; //! Get balances. virtual WalletBalances getBalances() = 0; diff --git a/src/key.cpp b/src/key.cpp index 5666adebb8..dcad386e77 100644 --- a/src/key.cpp +++ b/src/key.cpp @@ -7,10 +7,13 @@ #include <crypto/common.h> #include <crypto/hmac_sha512.h> +#include <hash.h> #include <random.h> #include <secp256k1.h> +#include <secp256k1_extrakeys.h> #include <secp256k1_recovery.h> +#include <secp256k1_schnorrsig.h> static secp256k1_context* secp256k1_context_sign = nullptr; @@ -258,6 +261,24 @@ bool CKey::SignCompact(const uint256 &hash, std::vector<unsigned char>& vchSig) return true; } +bool CKey::SignSchnorr(const uint256& hash, Span<unsigned char> sig, const uint256* merkle_root, const uint256* aux) const +{ + assert(sig.size() == 64); + secp256k1_keypair keypair; + if (!secp256k1_keypair_create(secp256k1_context_sign, &keypair, begin())) return false; + if (merkle_root) { + secp256k1_xonly_pubkey pubkey; + if (!secp256k1_keypair_xonly_pub(secp256k1_context_sign, &pubkey, nullptr, &keypair)) return false; + unsigned char pubkey_bytes[32]; + if (!secp256k1_xonly_pubkey_serialize(secp256k1_context_sign, pubkey_bytes, &pubkey)) return false; + uint256 tweak = XOnlyPubKey(pubkey_bytes).ComputeTapTweakHash(merkle_root->IsNull() ? nullptr : merkle_root); + if (!secp256k1_keypair_xonly_tweak_add(GetVerifyContext(), &keypair, tweak.data())) return false; + } + bool ret = secp256k1_schnorrsig_sign(secp256k1_context_sign, sig.data(), hash.data(), &keypair, secp256k1_nonce_function_bip340, aux ? (void*)aux->data() : nullptr); + memory_cleanse(&keypair, sizeof(keypair)); + return ret; +} + bool CKey::Load(const CPrivKey &seckey, const CPubKey &vchPubKey, bool fSkipCheck=false) { if (!ec_seckey_import_der(secp256k1_context_sign, (unsigned char*)begin(), seckey.data(), seckey.size())) return false; @@ -128,6 +128,18 @@ public: */ bool SignCompact(const uint256& hash, std::vector<unsigned char>& vchSig) const; + /** + * Create a BIP-340 Schnorr signature, for the xonly-pubkey corresponding to *this, + * optionally tweaked by *merkle_root. Additional nonce entropy can be provided through + * aux. + * + * When merkle_root is not nullptr, this results in a signature with a modified key as + * specified in BIP341: + * - If merkle_root->IsNull(): key + H_TapTweak(pubkey)*G + * - Otherwise: key + H_TapTweak(pubkey || *merkle_root) + */ + bool SignSchnorr(const uint256& hash, Span<unsigned char> sig, const uint256* merkle_root = nullptr, const uint256* aux = nullptr) const; + //! Derive BIP32 child key. bool Derive(CKey& keyChild, ChainCode &ccChild, unsigned int nChild, const ChainCode& cc) const; diff --git a/src/miner.cpp b/src/miner.cpp index eccddbb04f..0cf303eb3c 100644 --- a/src/miner.cpp +++ b/src/miner.cpp @@ -45,15 +45,7 @@ void RegenerateCommitments(CBlock& block, ChainstateManager& chainman) tx.vout.erase(tx.vout.begin() + GetWitnessCommitmentIndex(block)); block.vtx.at(0) = MakeTransactionRef(tx); - CBlockIndex* prev_block; - { - // TODO: Temporary scope to check correctness of refactored code. - // Should be removed manually after merge of - // https://github.com/bitcoin/bitcoin/pull/20158 - LOCK(::cs_main); - assert(std::addressof(g_chainman.m_blockman) == std::addressof(chainman.m_blockman)); - prev_block = chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock); - } + CBlockIndex* prev_block = WITH_LOCK(::cs_main, return chainman.m_blockman.LookupBlockIndex(block.hashPrevBlock)); GenerateCoinbaseCommitment(block, prev_block, Params().GetConsensus()); block.hashMerkleRoot = BlockMerkleRoot(block); @@ -124,7 +116,6 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc pblocktemplate->vTxSigOpsCost.push_back(-1); // updated at end LOCK2(cs_main, m_mempool.cs); - assert(std::addressof(*::ChainActive().Tip()) == std::addressof(*m_chainstate.m_chain.Tip())); CBlockIndex* pindexPrev = m_chainstate.m_chain.Tip(); assert(pindexPrev != nullptr); nHeight = pindexPrev->nHeight + 1; @@ -184,7 +175,6 @@ std::unique_ptr<CBlockTemplate> BlockAssembler::CreateNewBlock(const CScript& sc pblocktemplate->vTxSigOpsCost[0] = WITNESS_SCALE_FACTOR * GetLegacySigOpCount(*pblock->vtx[0]); BlockValidationState state; - assert(std::addressof(::ChainstateActive()) == std::addressof(m_chainstate)); if (!TestBlockValidity(state, chainparams, m_chainstate, *pblock, pindexPrev, false, false)) { throw std::runtime_error(strprintf("%s: TestBlockValidity failed: %s", __func__, state.ToString())); } diff --git a/src/net.cpp b/src/net.cpp index 6f9f17ed4e..60059249ed 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -42,6 +42,7 @@ #endif #include <algorithm> +#include <array> #include <cstdint> #include <functional> #include <optional> @@ -841,18 +842,6 @@ static bool ReverseCompareNodeTimeConnected(const NodeEvictionCandidate &a, cons return a.nTimeConnected > b.nTimeConnected; } -static bool CompareLocalHostTimeConnected(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) -{ - if (a.m_is_local != b.m_is_local) return b.m_is_local; - return a.nTimeConnected > b.nTimeConnected; -} - -static bool CompareOnionTimeConnected(const NodeEvictionCandidate& a, const NodeEvictionCandidate& b) -{ - if (a.m_is_onion != b.m_is_onion) return b.m_is_onion; - return a.nTimeConnected > b.nTimeConnected; -} - static bool CompareNetGroupKeyed(const NodeEvictionCandidate &a, const NodeEvictionCandidate &b) { return a.nKeyedNetGroup < b.nKeyedNetGroup; } @@ -883,6 +872,26 @@ static bool CompareNodeBlockRelayOnlyTime(const NodeEvictionCandidate &a, const return a.nTimeConnected > b.nTimeConnected; } +/** + * Sort eviction candidates by network/localhost and connection uptime. + * Candidates near the beginning are more likely to be evicted, and those + * near the end are more likely to be protected, e.g. less likely to be evicted. + * - First, nodes that are not `is_local` and that do not belong to `network`, + * sorted by increasing uptime (from most recently connected to connected longer). + * - Then, nodes that are `is_local` or belong to `network`, sorted by increasing uptime. + */ +struct CompareNodeNetworkTime { + const bool m_is_local; + const Network m_network; + CompareNodeNetworkTime(bool is_local, Network network) : m_is_local(is_local), m_network(network) {} + bool operator()(const NodeEvictionCandidate& a, const NodeEvictionCandidate& b) const + { + if (m_is_local && a.m_is_local != b.m_is_local) return b.m_is_local; + if ((a.m_network == m_network) != (b.m_network == m_network)) return b.m_network == m_network; + return a.nTimeConnected > b.nTimeConnected; + }; +}; + //! Sort an array by the specified comparator, then erase the last K elements where predicate is true. template <typename T, typename Comparator> static void EraseLastKElements( @@ -894,40 +903,72 @@ static void EraseLastKElements( elements.erase(std::remove_if(elements.end() - eraseSize, elements.end(), predicate), elements.end()); } -void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& vEvictionCandidates) +void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& eviction_candidates) { // Protect the half of the remaining nodes which have been connected the longest. // This replicates the non-eviction implicit behavior, and precludes attacks that start later. - // To favorise the diversity of our peer connections, reserve up to (half + 2) of - // these protected spots for onion and localhost peers, if any, even if they're not - // longest uptime overall. This helps protect tor peers, which tend to be otherwise + // To favorise the diversity of our peer connections, reserve up to half of these protected + // spots for Tor/onion, localhost and I2P peers, even if they're not longest uptime overall. + // This helps protect these higher-latency peers that tend to be otherwise // disadvantaged under our eviction criteria. - const size_t initial_size = vEvictionCandidates.size(); - size_t total_protect_size = initial_size / 2; - const size_t onion_protect_size = total_protect_size / 2; - - if (onion_protect_size) { - // Pick out up to 1/4 peers connected via our onion service, sorted by longest uptime. - EraseLastKElements(vEvictionCandidates, CompareOnionTimeConnected, onion_protect_size, - [](const NodeEvictionCandidate& n) { return n.m_is_onion; }); - } - - const size_t localhost_min_protect_size{2}; - if (onion_protect_size >= localhost_min_protect_size) { - // Allocate any remaining slots of the 1/4, or minimum 2 additional slots, - // to localhost peers, sorted by longest uptime, as manually configured - // hidden services not using `-bind=addr[:port]=onion` will not be detected - // as inbound onion connections. - const size_t remaining_tor_slots{onion_protect_size - (initial_size - vEvictionCandidates.size())}; - const size_t localhost_protect_size{std::max(remaining_tor_slots, localhost_min_protect_size)}; - EraseLastKElements(vEvictionCandidates, CompareLocalHostTimeConnected, localhost_protect_size, - [](const NodeEvictionCandidate& n) { return n.m_is_local; }); + const size_t initial_size = eviction_candidates.size(); + const size_t total_protect_size{initial_size / 2}; + + // Disadvantaged networks to protect: I2P, localhost, Tor/onion. In case of equal counts, earlier + // array members have first opportunity to recover unused slots from the previous iteration. + struct Net { bool is_local; Network id; size_t count; }; + std::array<Net, 3> networks{ + {{false, NET_I2P, 0}, {/* localhost */ true, NET_MAX, 0}, {false, NET_ONION, 0}}}; + + // Count and store the number of eviction candidates per network. + for (Net& n : networks) { + n.count = std::count_if(eviction_candidates.cbegin(), eviction_candidates.cend(), + [&n](const NodeEvictionCandidate& c) { + return n.is_local ? c.m_is_local : c.m_network == n.id; + }); + } + // Sort `networks` by ascending candidate count, to give networks having fewer candidates + // the first opportunity to recover unused protected slots from the previous iteration. + std::stable_sort(networks.begin(), networks.end(), [](Net a, Net b) { return a.count < b.count; }); + + // Protect up to 25% of the eviction candidates by disadvantaged network. + const size_t max_protect_by_network{total_protect_size / 2}; + size_t num_protected{0}; + + while (num_protected < max_protect_by_network) { + const size_t disadvantaged_to_protect{max_protect_by_network - num_protected}; + const size_t protect_per_network{ + std::max(disadvantaged_to_protect / networks.size(), static_cast<size_t>(1))}; + + // Early exit flag if there are no remaining candidates by disadvantaged network. + bool protected_at_least_one{false}; + + for (const Net& n : networks) { + if (n.count == 0) continue; + const size_t before = eviction_candidates.size(); + EraseLastKElements(eviction_candidates, CompareNodeNetworkTime(n.is_local, n.id), + protect_per_network, [&n](const NodeEvictionCandidate& c) { + return n.is_local ? c.m_is_local : c.m_network == n.id; + }); + const size_t after = eviction_candidates.size(); + if (before > after) { + protected_at_least_one = true; + num_protected += before - after; + if (num_protected >= max_protect_by_network) { + break; + } + } + } + if (!protected_at_least_one) { + break; + } } // Calculate how many we removed, and update our total number of peers that // we want to protect based on uptime accordingly. - total_protect_size -= initial_size - vEvictionCandidates.size(); - EraseLastKElements(vEvictionCandidates, ReverseCompareNodeTimeConnected, total_protect_size); + assert(num_protected == initial_size - eviction_candidates.size()); + const size_t remaining_to_protect{total_protect_size - num_protected}; + EraseLastKElements(eviction_candidates, ReverseCompareNodeTimeConnected, remaining_to_protect); } [[nodiscard]] std::optional<NodeId> SelectNodeToEvict(std::vector<NodeEvictionCandidate>&& vEvictionCandidates) @@ -944,8 +985,7 @@ void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& vEvict // An attacker cannot manipulate this metric without performing useful work. EraseLastKElements(vEvictionCandidates, CompareNodeTXTime, 4); // Protect up to 8 non-tx-relay peers that have sent us novel blocks. - const size_t erase_size = std::min(size_t(8), vEvictionCandidates.size()); - EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, erase_size, + EraseLastKElements(vEvictionCandidates, CompareNodeBlockRelayOnlyTime, 8, [](const NodeEvictionCandidate& n) { return !n.fRelayTxes && n.fRelevantServices; }); // Protect 4 nodes that most recently sent us novel blocks. @@ -1024,7 +1064,7 @@ bool CConnman::AttemptToEvictConnection() HasAllDesirableServiceFlags(node->nServices), peer_relay_txes, peer_filter_not_null, node->nKeyedNetGroup, node->m_prefer_evict, node->addr.IsLocal(), - node->m_inbound_onion}; + node->ConnectedThroughNetwork()}; vEvictionCandidates.push_back(candidate); } } @@ -2173,6 +2213,7 @@ void CConnman::OpenNetworkConnection(const CAddress& addrConnect, bool fCountFai void CConnman::ThreadMessageHandler() { + FastRandomContext rng; while (!flagInterruptMsgProc) { std::vector<CNode*> vNodesCopy; @@ -2186,6 +2227,11 @@ void CConnman::ThreadMessageHandler() bool fMoreWork = false; + // Randomize the order in which we process messages from/to our peers. + // This prevents attacks in which an attacker exploits having multiple + // consecutive connections in the vNodes list. + Shuffle(vNodesCopy.begin(), vNodesCopy.end(), rng); + for (CNode* pnode : vNodesCopy) { if (pnode->fDisconnect) @@ -1209,7 +1209,7 @@ struct NodeEvictionCandidate uint64_t nKeyedNetGroup; bool prefer_evict; bool m_is_local; - bool m_is_onion; + Network m_network; }; /** @@ -1227,20 +1227,20 @@ struct NodeEvictionCandidate * longest, to replicate the non-eviction implicit behavior and preclude attacks * that start later. * - * Half of these protected spots (1/4 of the total) are reserved for onion peers - * connected via our tor control service, if any, sorted by longest uptime, even - * if they're not longest uptime overall. Any remaining slots of the 1/4 are - * then allocated to protect localhost peers, if any (or up to 2 localhost peers - * if no slots remain and 2 or more onion peers were protected), sorted by - * longest uptime, as manually configured hidden services not using - * `-bind=addr[:port]=onion` will not be detected as inbound onion connections. + * Half of these protected spots (1/4 of the total) are reserved for the + * following categories of peers, sorted by longest uptime, even if they're not + * longest uptime overall: + * + * - onion peers connected via our tor control service + * + * - localhost peers, as manually configured hidden services not using + * `-bind=addr[:port]=onion` will not be detected as inbound onion connections * - * This helps protect onion peers, which tend to be otherwise disadvantaged - * under our eviction criteria for their higher min ping times relative to IPv4 - * and IPv6 peers, and favorise the diversity of peer connections. + * - I2P peers * - * This function was extracted from SelectNodeToEvict() to be able to test the - * ratio-based protection logic deterministically. + * This helps protect these privacy network peers, which tend to be otherwise + * disadvantaged under our eviction criteria for their higher min ping times + * relative to IPv4/IPv6 peers, and favorise the diversity of peer connections. */ void ProtectEvictionCandidatesByRatio(std::vector<NodeEvictionCandidate>& vEvictionCandidates); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 65224b4259..2175b76d16 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -159,10 +159,10 @@ static constexpr size_t MAX_ADDR_TO_SEND{1000}; namespace { /** Blocks that are in flight, and that are in the queue to be downloaded. */ struct QueuedBlock { - uint256 hash; - const CBlockIndex* pindex; //!< Optional. - bool fValidatedHeaders; //!< Whether this block has validated headers at the time of request. - std::unique_ptr<PartiallyDownloadedBlock> partialBlock; //!< Optional, used for CMPCTBLOCK downloads + /** BlockIndex. We must have this since we only request blocks when we've already validated the header. */ + const CBlockIndex* pindex; + /** Optional, used for CMPCTBLOCK downloads */ + std::unique_ptr<PartiallyDownloadedBlock> partialBlock; }; /** @@ -463,16 +463,20 @@ private: Mutex m_recent_confirmed_transactions_mutex; std::unique_ptr<CRollingBloomFilter> m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex); - /* Returns a bool indicating whether we requested this block. - * Also used if a block was /not/ received and timed out or started with another peer + /** Have we requested this block from a peer */ + bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + + /** Remove this block from our tracked requested blocks. Called if: + * - the block has been received from a peer + * - the request for the block has timed out */ - bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + void RemoveBlockRequest(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /* Mark a block as in flight * Returns false, still setting pit, if the block was already in flight from the same peer * pit will only be valid as long as the same cs_main lock is being held */ - bool MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex = nullptr, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); + bool BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit = nullptr) EXCLUSIVE_LOCKS_REQUIRED(cs_main); bool TipMayBeStale() EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -512,7 +516,7 @@ private: std::list<NodeId> lNodesAnnouncingHeaderAndIDs GUARDED_BY(cs_main); /** Number of peers from which we're downloading blocks. */ - int nPeersWithValidatedDownloads GUARDED_BY(cs_main) = 0; + int m_peers_downloading_from GUARDED_BY(cs_main) = 0; /** Storage for orphan information */ TxOrphanage m_orphanage; @@ -627,7 +631,6 @@ struct CNodeState { //! When the first entry in vBlocksInFlight started downloading. Don't care when vBlocksInFlight is empty. std::chrono::microseconds m_downloading_since{0us}; int nBlocksInFlight{0}; - int nBlocksInFlightValidHeaders{0}; //! Whether we consider this a preferred download peer. bool fPreferredDownload{false}; //! Whether this peer wants invs or headers (when possible) for block announcements. @@ -758,32 +761,42 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS nPreferredDownload += state->fPreferredDownload; } -bool PeerManagerImpl::MarkBlockAsReceived(const uint256& hash) +bool PeerManagerImpl::IsBlockRequested(const uint256& hash) { - std::map<uint256, std::pair<NodeId, std::list<QueuedBlock>::iterator> >::iterator itInFlight = mapBlocksInFlight.find(hash); - if (itInFlight != mapBlocksInFlight.end()) { - CNodeState *state = State(itInFlight->second.first); - assert(state != nullptr); - state->nBlocksInFlightValidHeaders -= itInFlight->second.second->fValidatedHeaders; - if (state->nBlocksInFlightValidHeaders == 0 && itInFlight->second.second->fValidatedHeaders) { - // Last validated block on the queue was received. - nPeersWithValidatedDownloads--; - } - if (state->vBlocksInFlight.begin() == itInFlight->second.second) { - // First block on the queue was received, update the start download time for the next one - state->m_downloading_since = std::max(state->m_downloading_since, GetTime<std::chrono::microseconds>()); - } - state->vBlocksInFlight.erase(itInFlight->second.second); - state->nBlocksInFlight--; - state->m_stalling_since = 0us; - mapBlocksInFlight.erase(itInFlight); - return true; + return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end(); +} + +void PeerManagerImpl::RemoveBlockRequest(const uint256& hash) +{ + auto it = mapBlocksInFlight.find(hash); + if (it == mapBlocksInFlight.end()) { + // Block was not requested + return; } - return false; + + auto [node_id, list_it] = it->second; + CNodeState *state = State(node_id); + assert(state != nullptr); + + if (state->vBlocksInFlight.begin() == list_it) { + // First block on the queue was received, update the start download time for the next one + state->m_downloading_since = std::max(state->m_downloading_since, GetTime<std::chrono::microseconds>()); + } + state->vBlocksInFlight.erase(list_it); + + state->nBlocksInFlight--; + if (state->nBlocksInFlight == 0) { + // Last validated block on the queue was received. + m_peers_downloading_from--; + } + state->m_stalling_since = 0us; + mapBlocksInFlight.erase(it); } -bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, const CBlockIndex* pindex, std::list<QueuedBlock>::iterator** pit) +bool PeerManagerImpl::BlockRequested(NodeId nodeid, const CBlockIndex& block, std::list<QueuedBlock>::iterator** pit) { + const uint256& hash{block.GetBlockHash()}; + CNodeState *state = State(nodeid); assert(state != nullptr); @@ -797,22 +810,20 @@ bool PeerManagerImpl::MarkBlockAsInFlight(NodeId nodeid, const uint256& hash, co } // Make sure it's not listed somewhere already. - MarkBlockAsReceived(hash); + RemoveBlockRequest(hash); std::list<QueuedBlock>::iterator it = state->vBlocksInFlight.insert(state->vBlocksInFlight.end(), - {hash, pindex, pindex != nullptr, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); + {&block, std::unique_ptr<PartiallyDownloadedBlock>(pit ? new PartiallyDownloadedBlock(&m_mempool) : nullptr)}); state->nBlocksInFlight++; - state->nBlocksInFlightValidHeaders += it->fValidatedHeaders; if (state->nBlocksInFlight == 1) { // We're starting a block download (batch) from this peer. state->m_downloading_since = GetTime<std::chrono::microseconds>(); - } - if (state->nBlocksInFlightValidHeaders == 1 && pindex != nullptr) { - nPeersWithValidatedDownloads++; + m_peers_downloading_from++; } itInFlight = mapBlocksInFlight.insert(std::make_pair(hash, std::make_pair(nodeid, it))).first; - if (pit) + if (pit) { *pit = &itInFlight->second.second; + } return true; } @@ -825,12 +836,27 @@ void PeerManagerImpl::MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid) return; } if (nodestate->fProvidesHeaderAndIDs) { + int num_outbound_hb_peers = 0; for (std::list<NodeId>::iterator it = lNodesAnnouncingHeaderAndIDs.begin(); it != lNodesAnnouncingHeaderAndIDs.end(); it++) { if (*it == nodeid) { lNodesAnnouncingHeaderAndIDs.erase(it); lNodesAnnouncingHeaderAndIDs.push_back(nodeid); return; } + CNodeState *state = State(*it); + if (state != nullptr && !state->m_is_inbound) ++num_outbound_hb_peers; + } + if (nodestate->m_is_inbound) { + // If we're adding an inbound HB peer, make sure we're not removing + // our last outbound HB peer in the process. + if (lNodesAnnouncingHeaderAndIDs.size() >= 3 && num_outbound_hb_peers == 1) { + CNodeState *remove_node = State(lNodesAnnouncingHeaderAndIDs.front()); + if (remove_node != nullptr && !remove_node->m_is_inbound) { + // Put the HB outbound peer in the second slot, so that it + // doesn't get removed. + std::swap(lNodesAnnouncingHeaderAndIDs.front(), *std::next(lNodesAnnouncingHeaderAndIDs.begin())); + } + } } m_connman.ForNode(nodeid, [this](CNode* pfrom) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) { AssertLockHeld(::cs_main); @@ -978,7 +1004,7 @@ void PeerManagerImpl::FindNextBlocksToDownload(NodeId nodeid, unsigned int count if (pindex->nStatus & BLOCK_HAVE_DATA || m_chainman.ActiveChain().Contains(pindex)) { if (pindex->HaveTxsDownloaded()) state->pindexLastCommonBlock = pindex; - } else if (mapBlocksInFlight.count(pindex->GetBlockHash()) == 0) { + } else if (!IsBlockRequested(pindex->GetBlockHash())) { // The block is not already downloaded, and not yet in flight. if (pindex->nHeight > nWindowEnd) { // We reached the end of the window. @@ -1129,13 +1155,13 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) nSyncStarted--; for (const QueuedBlock& entry : state->vBlocksInFlight) { - mapBlocksInFlight.erase(entry.hash); + mapBlocksInFlight.erase(entry.pindex->GetBlockHash()); } WITH_LOCK(g_cs_orphans, m_orphanage.EraseForPeer(nodeid)); m_txrequest.DisconnectedPeer(nodeid); nPreferredDownload -= state->fPreferredDownload; - nPeersWithValidatedDownloads -= (state->nBlocksInFlightValidHeaders != 0); - assert(nPeersWithValidatedDownloads >= 0); + m_peers_downloading_from -= (state->nBlocksInFlight != 0); + assert(m_peers_downloading_from >= 0); m_outbound_peers_with_protect_from_disconnect -= state->m_chain_sync.m_protect; assert(m_outbound_peers_with_protect_from_disconnect >= 0); m_wtxid_relay_peers -= state->m_wtxid_relay; @@ -1147,7 +1173,7 @@ void PeerManagerImpl::FinalizeNode(const CNode& node) // Do a consistency check after the last peer is removed. assert(mapBlocksInFlight.empty()); assert(nPreferredDownload == 0); - assert(nPeersWithValidatedDownloads == 0); + assert(m_peers_downloading_from == 0); assert(m_outbound_peers_with_protect_from_disconnect == 0); assert(m_wtxid_relay_peers == 0); assert(m_txrequest.Size() == 0); @@ -1350,7 +1376,6 @@ PeerManagerImpl::PeerManagerImpl(const CChainParams& chainparams, CConnman& conn m_stale_tip_check_time(0), m_ignore_incoming_txs(ignore_incoming_txs) { - assert(std::addressof(g_chainman) == std::addressof(m_chainman)); // Initialize global variables that cannot be constructed at startup. recentRejects.reset(new CRollingBloomFilter(120000, 0.000001)); @@ -2056,7 +2081,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, // Calculate all the blocks we'd need to switch to pindexLast, up to a limit. while (pindexWalk && !m_chainman.ActiveChain().Contains(pindexWalk) && vToFetch.size() <= MAX_BLOCKS_IN_TRANSIT_PER_PEER) { if (!(pindexWalk->nStatus & BLOCK_HAVE_DATA) && - !mapBlocksInFlight.count(pindexWalk->GetBlockHash()) && + !IsBlockRequested(pindexWalk->GetBlockHash()) && (!IsWitnessEnabled(pindexWalk->pprev, m_chainparams.GetConsensus()) || State(pfrom.GetId())->fHaveWitness)) { // We don't have this block, and it's not yet in flight. vToFetch.push_back(pindexWalk); @@ -2081,7 +2106,7 @@ void PeerManagerImpl::ProcessHeadersMessage(CNode& pfrom, const Peer& peer, } uint32_t nFetchFlags = GetFetchFlags(pfrom); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex); + BlockRequested(pfrom.GetId(), *pindex); LogPrint(BCLog::NET, "Requesting block %s from peer=%d\n", pindex->GetBlockHash().ToString(), pfrom.GetId()); } @@ -2827,7 +2852,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId()); UpdateBlockAvailability(pfrom.GetId(), inv.hash); - if (!fAlreadyHave && !fImporting && !fReindex && !mapBlocksInFlight.count(inv.hash)) { + if (!fAlreadyHave && !fImporting && !fReindex && !IsBlockRequested(inv.hash)) { // Headers-first is the primary method of announcement on // the network. If a node fell back to sending blocks by inv, // it's probably for a re-org. The final block hash @@ -3384,7 +3409,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, if ((!fAlreadyInFlight && nodestate->nBlocksInFlight < MAX_BLOCKS_IN_TRANSIT_PER_PEER) || (fAlreadyInFlight && blockInFlightIt->second.first == pfrom.GetId())) { std::list<QueuedBlock>::iterator* queuedBlockIt = nullptr; - if (!MarkBlockAsInFlight(pfrom.GetId(), pindex->GetBlockHash(), pindex, &queuedBlockIt)) { + if (!BlockRequested(pfrom.GetId(), *pindex, &queuedBlockIt)) { if (!(*queuedBlockIt)->partialBlock) (*queuedBlockIt)->partialBlock.reset(new PartiallyDownloadedBlock(&m_mempool)); else { @@ -3397,7 +3422,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, PartiallyDownloadedBlock& partialBlock = *(*queuedBlockIt)->partialBlock; ReadStatus status = partialBlock.InitData(cmpctblock, vExtraTxnForCompact); if (status == READ_STATUS_INVALID) { - MarkBlockAsReceived(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect + RemoveBlockRequest(pindex->GetBlockHash()); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(pfrom.GetId(), 100, "invalid compact block"); return; } else if (status == READ_STATUS_FAILED) { @@ -3492,7 +3517,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // process from some other peer. We do this after calling // ProcessNewBlock so that a malleated cmpctblock announcement // can't be used to interfere with block relay. - MarkBlockAsReceived(pblock->GetHash()); + RemoveBlockRequest(pblock->GetHash()); } } return; @@ -3524,7 +3549,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, PartiallyDownloadedBlock& partialBlock = *it->second.second->partialBlock; ReadStatus status = partialBlock.FillBlock(*pblock, resp.txn); if (status == READ_STATUS_INVALID) { - MarkBlockAsReceived(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect + RemoveBlockRequest(resp.blockhash); // Reset in-flight state in case Misbehaving does not result in a disconnect Misbehaving(pfrom.GetId(), 100, "invalid compact block/non-matching block transactions"); return; } else if (status == READ_STATUS_FAILED) { @@ -3550,7 +3575,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, // though the block was successfully read, and rely on the // handling in ProcessNewBlock to ensure the block index is // updated, etc. - MarkBlockAsReceived(resp.blockhash); // it is now an empty pointer + RemoveBlockRequest(resp.blockhash); // it is now an empty pointer fBlockRead = true; // mapBlockSource is used for potentially punishing peers and // updating which peers send us compact blocks, so the race @@ -3615,9 +3640,10 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, const uint256 hash(pblock->GetHash()); { LOCK(cs_main); - // Also always process if we requested the block explicitly, as we may - // need it even though it is not a candidate for a new best tip. - forceProcessing |= MarkBlockAsReceived(hash); + // Always process the block if we requested it, since we may + // need it even when it's not a candidate for a new best tip. + forceProcessing = IsBlockRequested(hash); + RemoveBlockRequest(hash); // mapBlockSource is only used for punishing peers and setting // which peers send us compact blocks, so the race between here and // cs_main in ProcessNewBlock is fine. @@ -4712,9 +4738,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto) // to unreasonably increase our timeout. if (state.vBlocksInFlight.size() > 0) { QueuedBlock &queuedBlock = state.vBlocksInFlight.front(); - int nOtherPeersWithValidatedDownloads = nPeersWithValidatedDownloads - (state.nBlocksInFlightValidHeaders > 0); + int nOtherPeersWithValidatedDownloads = m_peers_downloading_from - 1; if (current_time > state.m_downloading_since + std::chrono::seconds{consensusParams.nPowTargetSpacing} * (BLOCK_DOWNLOAD_TIMEOUT_BASE + BLOCK_DOWNLOAD_TIMEOUT_PER_PEER * nOtherPeersWithValidatedDownloads)) { - LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.hash.ToString(), pto->GetId()); + LogPrintf("Timeout downloading block %s from peer=%d, disconnecting\n", queuedBlock.pindex->GetBlockHash().ToString(), pto->GetId()); pto->fDisconnect = true; return true; } @@ -4767,7 +4793,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto) for (const CBlockIndex *pindex : vToDownload) { uint32_t nFetchFlags = GetFetchFlags(*pto); vGetData.push_back(CInv(MSG_BLOCK | nFetchFlags, pindex->GetBlockHash())); - MarkBlockAsInFlight(pto->GetId(), pindex->GetBlockHash(), pindex); + BlockRequested(pto->GetId(), *pindex); LogPrint(BCLog::NET, "Requesting block %s (%d) peer=%d\n", pindex->GetBlockHash().ToString(), pindex->nHeight, pto->GetId()); } diff --git a/src/netaddress.h b/src/netaddress.h index 0d04ab88fb..dd47ab5749 100644 --- a/src/netaddress.h +++ b/src/netaddress.h @@ -11,7 +11,9 @@ #include <attributes.h> #include <compat.h> +#include <crypto/siphash.h> #include <prevector.h> +#include <random.h> #include <serialize.h> #include <tinyformat.h> #include <util/strencodings.h> @@ -251,6 +253,7 @@ class CNetAddr } } + friend class CNetAddrHash; friend class CSubNet; private: @@ -464,6 +467,22 @@ class CNetAddr } }; +class CNetAddrHash +{ +public: + size_t operator()(const CNetAddr& a) const noexcept + { + CSipHasher hasher(m_salt_k0, m_salt_k1); + hasher.Write(a.m_net); + hasher.Write(a.m_addr.data(), a.m_addr.size()); + return static_cast<size_t>(hasher.Finalize()); + } + +private: + const uint64_t m_salt_k0 = GetRand(std::numeric_limits<uint64_t>::max()); + const uint64_t m_salt_k1 = GetRand(std::numeric_limits<uint64_t>::max()); +}; + class CSubNet { protected: diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp index 6c66c565ad..013d61282b 100644 --- a/src/node/blockstorage.cpp +++ b/src/node/blockstorage.cpp @@ -248,7 +248,6 @@ bool FindBlockPos(FlatFilePos& pos, unsigned int nAddSize, unsigned int nHeight, // when the undo file is keeping up with the block file, we want to flush it explicitly // when it is lagging behind (more blocks arrive than are being connected), we let the // undo block write case handle it - assert(std::addressof(::ChainActive()) == std::addressof(active_chain)); finalize_undo = (vinfoBlockFile[nFile].nHeightLast == (unsigned int)active_chain.Tip()->nHeight); nFile++; if (vinfoBlockFile.size() <= nFile) { diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h index faf99dea81..7c7bf68178 100644 --- a/src/node/blockstorage.h +++ b/src/node/blockstorage.h @@ -5,12 +5,13 @@ #ifndef BITCOIN_NODE_BLOCKSTORAGE_H #define BITCOIN_NODE_BLOCKSTORAGE_H -#include <cstdint> -#include <vector> - #include <fs.h> #include <protocol.h> // For CMessageHeader::MessageStartChars +#include <atomic> +#include <cstdint> +#include <vector> + class ArgsManager; class BlockValidationState; class CBlock; diff --git a/src/node/coin.cpp b/src/node/coin.cpp index 23d4fa2aae..50fddf3ab0 100644 --- a/src/node/coin.cpp +++ b/src/node/coin.cpp @@ -13,7 +13,6 @@ void FindCoins(const NodeContext& node, std::map<COutPoint, Coin>& coins) assert(node.mempool); assert(node.chainman); LOCK2(cs_main, node.mempool->cs); - assert(std::addressof(::ChainstateActive()) == std::addressof(node.chainman->ActiveChainstate())); CCoinsViewCache& chain_view = node.chainman->ActiveChainstate().CoinsTip(); CCoinsViewMemPool mempool_view(&chain_view, *node.mempool); for (auto& coin : coins) { diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp index 38c1d29250..67e497c218 100644 --- a/src/node/coinstats.cpp +++ b/src/node/coinstats.cpp @@ -97,7 +97,6 @@ static bool GetUTXOStats(CCoinsView* view, BlockManager& blockman, CCoinsStats& if (!pindex) { { LOCK(cs_main); - assert(std::addressof(g_chainman.m_blockman) == std::addressof(blockman)); pindex = blockman.LookupBlockIndex(view->GetBestBlock()); } } diff --git a/src/node/context.cpp b/src/node/context.cpp index 6d22a6b110..9afadd09a9 100644 --- a/src/node/context.cpp +++ b/src/node/context.cpp @@ -12,6 +12,7 @@ #include <policy/fees.h> #include <scheduler.h> #include <txmempool.h> +#include <validation.h> NodeContext::NodeContext() {} NodeContext::~NodeContext() {} diff --git a/src/node/context.h b/src/node/context.h index 06adb33a80..135f9ea1c6 100644 --- a/src/node/context.h +++ b/src/node/context.h @@ -44,7 +44,7 @@ struct NodeContext { std::unique_ptr<CTxMemPool> mempool; std::unique_ptr<CBlockPolicyEstimator> fee_estimator; std::unique_ptr<PeerManager> peerman; - ChainstateManager* chainman{nullptr}; // Currently a raw pointer because the memory is not managed by this struct + std::unique_ptr<ChainstateManager> chainman; std::unique_ptr<BanMan> banman; ArgsManager* args{nullptr}; // Currently a raw pointer because the memory is not managed by this struct std::unique_ptr<interfaces::Chain> chain; diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp index 171f15d4fb..807b0143a6 100644 --- a/src/node/interfaces.cpp +++ b/src/node/interfaces.cpp @@ -6,6 +6,7 @@ #include <banman.h> #include <chain.h> #include <chainparams.h> +#include <external_signer.h> #include <init.h> #include <interfaces/chain.h> #include <interfaces/handler.h> @@ -170,16 +171,24 @@ public: } return false; } -#ifdef ENABLE_EXTERNAL_SIGNER std::vector<ExternalSigner> externalSigners() override { +#ifdef ENABLE_EXTERNAL_SIGNER std::vector<ExternalSigner> signers = {}; const std::string command = gArgs.GetArg("-signer", ""); if (command == "") return signers; ExternalSigner::Enumerate(command, signers, Params().NetworkIDString()); return signers; +#else + // This result is indistinguishable from a successful call that returns + // no signers. For the current GUI this doesn't matter, because the wallet + // creation dialog disables the external signer checkbox in both + // cases. The return type could be changed to std::optional<std::vector> + // (or something that also includes error messages) if this distinction + // becomes important. + return {}; +#endif // ENABLE_EXTERNAL_SIGNER } -#endif int64_t getTotalBytesRecv() override { return m_context->connman ? m_context->connman->GetTotalBytesRecv() : 0; } int64_t getTotalBytesSent() override { return m_context->connman ? m_context->connman->GetTotalBytesSent() : 0; } size_t getMempoolSize() override { return m_context->mempool ? m_context->mempool->size() : 0; } @@ -197,26 +206,16 @@ public: int getNumBlocks() override { LOCK(::cs_main); - assert(std::addressof(::ChainActive()) == std::addressof(chainman().ActiveChain())); return chainman().ActiveChain().Height(); } uint256 getBestBlockHash() override { - const CBlockIndex* tip; - { - // TODO: Temporary scope to check correctness of refactored code. - // Should be removed manually after merge of - // https://github.com/bitcoin/bitcoin/pull/20158 - LOCK(cs_main); - assert(std::addressof(::ChainActive()) == std::addressof(chainman().ActiveChain())); - tip = chainman().ActiveChain().Tip(); - } + const CBlockIndex* tip = WITH_LOCK(::cs_main, return chainman().ActiveChain().Tip()); return tip ? tip->GetBlockHash() : Params().GenesisBlock().GetHash(); } int64_t getLastBlockTime() override { LOCK(::cs_main); - assert(std::addressof(::ChainActive()) == std::addressof(chainman().ActiveChain())); if (chainman().ActiveChain().Tip()) { return chainman().ActiveChain().Tip()->GetBlockTime(); } @@ -227,22 +226,12 @@ public: const CBlockIndex* tip; { LOCK(::cs_main); - assert(std::addressof(::ChainActive()) == std::addressof(chainman().ActiveChain())); tip = chainman().ActiveChain().Tip(); } return GuessVerificationProgress(Params().TxData(), tip); } bool isInitialBlockDownload() override { - const CChainState* active_chainstate; - { - // TODO: Temporary scope to check correctness of refactored code. - // Should be removed manually after merge of - // https://github.com/bitcoin/bitcoin/pull/20158 - LOCK(::cs_main); - active_chainstate = &m_context->chainman->ActiveChainstate(); - assert(std::addressof(::ChainstateActive()) == std::addressof(*active_chainstate)); - } - return active_chainstate->IsInitialBlockDownload(); + return chainman().ActiveChainstate().IsInitialBlockDownload(); } bool getReindex() override { return ::fReindex; } bool getImporting() override { return ::fImporting; } @@ -269,7 +258,6 @@ public: bool getUnspentOutput(const COutPoint& output, Coin& coin) override { LOCK(::cs_main); - assert(std::addressof(::ChainstateActive()) == std::addressof(chainman().ActiveChainstate())); return chainman().ActiveChainstate().CoinsTip().GetCoin(output, coin); } WalletClient& walletClient() override @@ -476,14 +464,12 @@ public: bool checkFinalTx(const CTransaction& tx) override { LOCK(cs_main); - assert(std::addressof(::ChainActive()) == std::addressof(chainman().ActiveChain())); return CheckFinalTx(chainman().ActiveChain().Tip(), tx); } std::optional<int> findLocatorFork(const CBlockLocator& locator) override { LOCK(cs_main); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - assert(std::addressof(g_chainman) == std::addressof(*m_node.chainman)); if (CBlockIndex* fork = m_node.chainman->m_blockman.FindForkInGlobalIndex(active, locator)) { return fork->nHeight; } @@ -493,7 +479,6 @@ public: { WAIT_LOCK(cs_main, lock); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - assert(std::addressof(g_chainman) == std::addressof(*m_node.chainman)); return FillBlock(m_node.chainman->m_blockman.LookupBlockIndex(hash), block, lock, active); } bool findFirstBlockWithTimeAndHeight(int64_t min_time, int min_height, const FoundBlock& block) override @@ -506,7 +491,6 @@ public: { WAIT_LOCK(cs_main, lock); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - assert(std::addressof(g_chainman) == std::addressof(*m_node.chainman)); if (const CBlockIndex* block = m_node.chainman->m_blockman.LookupBlockIndex(block_hash)) { if (const CBlockIndex* ancestor = block->GetAncestor(ancestor_height)) { return FillBlock(ancestor, ancestor_out, lock, active); @@ -518,9 +502,7 @@ public: { WAIT_LOCK(cs_main, lock); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - assert(std::addressof(g_chainman) == std::addressof(*m_node.chainman)); const CBlockIndex* block = m_node.chainman->m_blockman.LookupBlockIndex(block_hash); - assert(std::addressof(g_chainman) == std::addressof(*m_node.chainman)); const CBlockIndex* ancestor = m_node.chainman->m_blockman.LookupBlockIndex(ancestor_hash); if (block && ancestor && block->GetAncestor(ancestor->nHeight) != ancestor) ancestor = nullptr; return FillBlock(ancestor, ancestor_out, lock, active); @@ -529,9 +511,7 @@ public: { WAIT_LOCK(cs_main, lock); const CChain& active = Assert(m_node.chainman)->ActiveChain(); - assert(std::addressof(g_chainman) == std::addressof(*m_node.chainman)); const CBlockIndex* block1 = m_node.chainman->m_blockman.LookupBlockIndex(block_hash1); - assert(std::addressof(g_chainman) == std::addressof(*m_node.chainman)); const CBlockIndex* block2 = m_node.chainman->m_blockman.LookupBlockIndex(block_hash2); const CBlockIndex* ancestor = block1 && block2 ? LastCommonAncestor(block1, block2) : nullptr; // Using & instead of && below to avoid short circuiting and leaving @@ -542,7 +522,6 @@ public: double guessVerificationProgress(const uint256& block_hash) override { LOCK(cs_main); - assert(std::addressof(g_chainman.m_blockman) == std::addressof(chainman().m_blockman)); return GuessVerificationProgress(Params().TxData(), chainman().m_blockman.LookupBlockIndex(block_hash)); } bool hasBlocks(const uint256& block_hash, int min_height, std::optional<int> max_height) override @@ -555,7 +534,6 @@ public: // used to limit the range, and passing min_height that's too low or // max_height that's too high will not crash or change the result. LOCK(::cs_main); - assert(std::addressof(g_chainman.m_blockman) == std::addressof(chainman().m_blockman)); if (CBlockIndex* block = chainman().m_blockman.LookupBlockIndex(block_hash)) { if (max_height && block->nHeight >= *max_height) block = block->GetAncestor(*max_height); for (; block->nStatus & BLOCK_HAVE_DATA; block = block->pprev) { @@ -647,16 +625,7 @@ public: } bool isReadyToBroadcast() override { return !::fImporting && !::fReindex && !isInitialBlockDownload(); } bool isInitialBlockDownload() override { - const CChainState* active_chainstate; - { - // TODO: Temporary scope to check correctness of refactored code. - // Should be removed manually after merge of - // https://github.com/bitcoin/bitcoin/pull/20158 - LOCK(::cs_main); - active_chainstate = &chainman().ActiveChainstate(); - assert(std::addressof(::ChainstateActive()) == std::addressof(*active_chainstate)); - } - return active_chainstate->IsInitialBlockDownload(); + return chainman().ActiveChainstate().IsInitialBlockDownload(); } bool shutdownRequested() override { return ShutdownRequested(); } int64_t getAdjustedTime() override { return GetAdjustedTime(); } @@ -719,6 +688,12 @@ public: notifications.transactionAddedToMempool(entry.GetSharedTx(), 0 /* mempool_sequence */); } } + bool isTaprootActive() const override + { + LOCK(::cs_main); + const CBlockIndex* tip = Assert(m_node.chainman)->ActiveChain().Tip(); + return VersionBitsState(tip, Params().GetConsensus(), Consensus::DEPLOYMENT_TAPROOT, versionbitscache) == ThresholdState::ACTIVE; + } NodeContext& m_node; }; } // namespace diff --git a/src/node/psbt.cpp b/src/node/psbt.cpp index c189018268..b013b6d579 100644 --- a/src/node/psbt.cpp +++ b/src/node/psbt.cpp @@ -23,6 +23,8 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx) result.inputs.resize(psbtx.tx->vin.size()); + const PrecomputedTransactionData txdata = PrecomputePSBTData(psbtx); + for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) { PSBTInput& input = psbtx.inputs[i]; PSBTInputAnalysis& input_analysis = result.inputs[i]; @@ -61,7 +63,7 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx) // Figure out what is missing SignatureData outdata; - bool complete = SignPSBTInput(DUMMY_SIGNING_PROVIDER, psbtx, i, 1, &outdata); + bool complete = SignPSBTInput(DUMMY_SIGNING_PROVIDER, psbtx, i, &txdata, 1, &outdata); // Things are missing if (!complete) { @@ -121,7 +123,7 @@ PSBTAnalysis AnalyzePSBT(PartiallySignedTransaction psbtx) PSBTInput& input = psbtx.inputs[i]; Coin newcoin; - if (!SignPSBTInput(DUMMY_SIGNING_PROVIDER, psbtx, i, 1, nullptr, true) || !psbtx.GetInputUTXO(newcoin.out, i)) { + if (!SignPSBTInput(DUMMY_SIGNING_PROVIDER, psbtx, i, nullptr, 1) || !psbtx.GetInputUTXO(newcoin.out, i)) { success = false; break; } else { diff --git a/src/node/transaction.cpp b/src/node/transaction.cpp index a1e7a71e2c..f21b390915 100644 --- a/src/node/transaction.cpp +++ b/src/node/transaction.cpp @@ -40,7 +40,6 @@ TransactionError BroadcastTransaction(NodeContext& node, const CTransactionRef t { // cs_main scope assert(node.chainman); LOCK(cs_main); - assert(std::addressof(::ChainstateActive()) == std::addressof(node.chainman->ActiveChainstate())); // If the transaction is already confirmed in the chain, don't do anything // and return early. CCoinsViewCache &view = node.chainman->ActiveChainstate().CoinsTip(); diff --git a/src/outputtype.cpp b/src/outputtype.cpp index d96fb282c5..8ede7b9974 100644 --- a/src/outputtype.cpp +++ b/src/outputtype.cpp @@ -18,6 +18,7 @@ static const std::string OUTPUT_TYPE_STRING_LEGACY = "legacy"; static const std::string OUTPUT_TYPE_STRING_P2SH_SEGWIT = "p2sh-segwit"; static const std::string OUTPUT_TYPE_STRING_BECH32 = "bech32"; +static const std::string OUTPUT_TYPE_STRING_BECH32M = "bech32m"; bool ParseOutputType(const std::string& type, OutputType& output_type) { @@ -30,6 +31,9 @@ bool ParseOutputType(const std::string& type, OutputType& output_type) } else if (type == OUTPUT_TYPE_STRING_BECH32) { output_type = OutputType::BECH32; return true; + } else if (type == OUTPUT_TYPE_STRING_BECH32M) { + output_type = OutputType::BECH32M; + return true; } return false; } @@ -40,6 +44,7 @@ const std::string& FormatOutputType(OutputType type) case OutputType::LEGACY: return OUTPUT_TYPE_STRING_LEGACY; case OutputType::P2SH_SEGWIT: return OUTPUT_TYPE_STRING_P2SH_SEGWIT; case OutputType::BECH32: return OUTPUT_TYPE_STRING_BECH32; + case OutputType::BECH32M: return OUTPUT_TYPE_STRING_BECH32M; } // no default case, so the compiler can warn about missing cases assert(false); } @@ -59,6 +64,7 @@ CTxDestination GetDestinationForKey(const CPubKey& key, OutputType type) return witdest; } } + case OutputType::BECH32M: {} // This function should never be used with BECH32M, so let it assert } // no default case, so the compiler can warn about missing cases assert(false); } @@ -98,6 +104,23 @@ CTxDestination AddAndGetDestinationForScript(FillableSigningProvider& keystore, return ScriptHash(witprog); } } + case OutputType::BECH32M: {} // This function should not be used for BECH32M, so let it assert } // no default case, so the compiler can warn about missing cases assert(false); } + +std::optional<OutputType> OutputTypeFromDestination(const CTxDestination& dest) { + if (std::holds_alternative<PKHash>(dest) || + std::holds_alternative<ScriptHash>(dest)) { + return OutputType::LEGACY; + } + if (std::holds_alternative<WitnessV0KeyHash>(dest) || + std::holds_alternative<WitnessV0ScriptHash>(dest)) { + return OutputType::BECH32; + } + if (std::holds_alternative<WitnessV1Taproot>(dest) || + std::holds_alternative<WitnessUnknown>(dest)) { + return OutputType::BECH32M; + } + return std::nullopt; +} diff --git a/src/outputtype.h b/src/outputtype.h index 88422e5824..2b83235cd0 100644 --- a/src/outputtype.h +++ b/src/outputtype.h @@ -18,12 +18,14 @@ enum class OutputType { LEGACY, P2SH_SEGWIT, BECH32, + BECH32M, }; static constexpr auto OUTPUT_TYPES = std::array{ OutputType::LEGACY, OutputType::P2SH_SEGWIT, OutputType::BECH32, + OutputType::BECH32M, }; [[nodiscard]] bool ParseOutputType(const std::string& str, OutputType& output_type); @@ -45,4 +47,7 @@ std::vector<CTxDestination> GetAllDestinationsForKey(const CPubKey& key); */ CTxDestination AddAndGetDestinationForScript(FillableSigningProvider& keystore, const CScript& script, OutputType); +/** Get the OutputType for a CTxDestination */ +std::optional<OutputType> OutputTypeFromDestination(const CTxDestination& dest); + #endif // BITCOIN_OUTPUTTYPE_H diff --git a/src/policy/packages.cpp b/src/policy/packages.cpp new file mode 100644 index 0000000000..cfd0539965 --- /dev/null +++ b/src/policy/packages.cpp @@ -0,0 +1,62 @@ +// Copyright (c) 2021 The Bitcoin Core developers +// Distributed under the MIT software license, see the accompanying +// file COPYING or http://www.opensource.org/licenses/mit-license.php. + +#include <consensus/validation.h> +#include <policy/packages.h> +#include <primitives/transaction.h> +#include <uint256.h> +#include <util/hasher.h> + +#include <numeric> +#include <unordered_set> + +bool CheckPackage(const Package& txns, PackageValidationState& state) +{ + const unsigned int package_count = txns.size(); + + if (package_count > MAX_PACKAGE_COUNT) { + return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-many-transactions"); + } + + const int64_t total_size = std::accumulate(txns.cbegin(), txns.cend(), 0, + [](int64_t sum, const auto& tx) { return sum + GetVirtualTransactionSize(*tx); }); + // If the package only contains 1 tx, it's better to report the policy violation on individual tx size. + if (package_count > 1 && total_size > MAX_PACKAGE_SIZE * 1000) { + return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-large"); + } + + // Require the package to be sorted in order of dependency, i.e. parents appear before children. + // An unsorted package will fail anyway on missing-inputs, but it's better to quit earlier and + // fail on something less ambiguous (missing-inputs could also be an orphan or trying to + // spend nonexistent coins). + std::unordered_set<uint256, SaltedTxidHasher> later_txids; + std::transform(txns.cbegin(), txns.cend(), std::inserter(later_txids, later_txids.end()), + [](const auto& tx) { return tx->GetHash(); }); + for (const auto& tx : txns) { + for (const auto& input : tx->vin) { + if (later_txids.find(input.prevout.hash) != later_txids.end()) { + // The parent is a subsequent transaction in the package. + return state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-sorted"); + } + } + later_txids.erase(tx->GetHash()); + } + + // Don't allow any conflicting transactions, i.e. spending the same inputs, in a package. + std::unordered_set<COutPoint, SaltedOutpointHasher> inputs_seen; + for (const auto& tx : txns) { + for (const auto& input : tx->vin) { + if (inputs_seen.find(input.prevout) != inputs_seen.end()) { + // This input is also present in another tx in the package. + return state.Invalid(PackageValidationResult::PCKG_POLICY, "conflict-in-package"); + } + } + // Batch-add all the inputs for a tx at a time. If we added them 1 at a time, we could + // catch duplicate inputs within a single tx. This is a more severe, consensus error, + // and we want to report that from CheckTransaction instead. + std::transform(tx->vin.cbegin(), tx->vin.cend(), std::inserter(inputs_seen, inputs_seen.end()), + [](const auto& input) { return input.prevout; }); + } + return true; +} diff --git a/src/policy/packages.h b/src/policy/packages.h index 4b1463dcb3..6b7ac3e450 100644 --- a/src/policy/packages.h +++ b/src/policy/packages.h @@ -6,6 +6,7 @@ #define BITCOIN_POLICY_PACKAGES_H #include <consensus/validation.h> +#include <policy/policy.h> #include <primitives/transaction.h> #include <vector> @@ -14,6 +15,7 @@ static constexpr uint32_t MAX_PACKAGE_COUNT{25}; /** Default maximum total virtual size of transactions in a package in KvB. */ static constexpr uint32_t MAX_PACKAGE_SIZE{101}; +static_assert(MAX_PACKAGE_SIZE * WITNESS_SCALE_FACTOR * 1000 >= MAX_STANDARD_TX_WEIGHT); /** A "reason" why a package was invalid. It may be that one or more of the included * transactions is invalid or the package itself violates our rules. @@ -31,4 +33,12 @@ using Package = std::vector<CTransactionRef>; class PackageValidationState : public ValidationState<PackageValidationResult> {}; +/** Context-free package policy checks: + * 1. The number of transactions cannot exceed MAX_PACKAGE_COUNT. + * 2. The total virtual size cannot exceed MAX_PACKAGE_SIZE. + * 3. If any dependencies exist between transactions, parents must appear before children. + * 4. Transactions cannot conflict, i.e., spend the same inputs. + */ +bool CheckPackage(const Package& txns, PackageValidationState& state); + #endif // BITCOIN_POLICY_PACKAGES_H diff --git a/src/protocol.h b/src/protocol.h index aaa9f1df40..f9248899dc 100644 --- a/src/protocol.h +++ b/src/protocol.h @@ -13,6 +13,7 @@ #include <netaddress.h> #include <primitives/transaction.h> #include <serialize.h> +#include <streams.h> #include <uint256.h> #include <version.h> @@ -358,6 +359,31 @@ class CAddress : public CService { static constexpr uint32_t TIME_INIT{100000000}; + /** Historically, CAddress disk serialization stored the CLIENT_VERSION, optionally OR'ed with + * the ADDRV2_FORMAT flag to indicate V2 serialization. The first field has since been + * disentangled from client versioning, and now instead: + * - The low bits (masked by DISK_VERSION_IGNORE_MASK) store the fixed value DISK_VERSION_INIT, + * (in case any code exists that treats it as a client version) but are ignored on + * deserialization. + * - The high bits (masked by ~DISK_VERSION_IGNORE_MASK) store actual serialization information. + * Only 0 or DISK_VERSION_ADDRV2 (equal to the historical ADDRV2_FORMAT) are valid now, and + * any other value triggers a deserialization failure. Other values can be added later if + * needed. + * + * For disk deserialization, ADDRV2_FORMAT in the stream version signals that ADDRV2 + * deserialization is permitted, but the actual format is determined by the high bits in the + * stored version field. For network serialization, the stream version having ADDRV2_FORMAT or + * not determines the actual format used (as it has no embedded version number). + */ + static constexpr uint32_t DISK_VERSION_INIT{220000}; + static constexpr uint32_t DISK_VERSION_IGNORE_MASK{0b00000000'00000111'11111111'11111111}; + /** The version number written in disk serialized addresses to indicate V2 serializations. + * It must be exactly 1<<29, as that is the value that historical versions used for this + * (they used their internal ADDRV2_FORMAT flag here). */ + static constexpr uint32_t DISK_VERSION_ADDRV2{1 << 29}; + static_assert((DISK_VERSION_INIT & ~DISK_VERSION_IGNORE_MASK) == 0, "DISK_VERSION_INIT must be covered by DISK_VERSION_IGNORE_MASK"); + static_assert((DISK_VERSION_ADDRV2 & DISK_VERSION_IGNORE_MASK) == 0, "DISK_VERSION_ADDRV2 must not be covered by DISK_VERSION_IGNORE_MASK"); + public: CAddress() : CService{} {}; CAddress(CService ipIn, ServiceFlags nServicesIn) : CService{ipIn}, nServices{nServicesIn} {}; @@ -365,22 +391,48 @@ public: SERIALIZE_METHODS(CAddress, obj) { - SER_READ(obj, obj.nTime = TIME_INIT); - int nVersion = s.GetVersion(); + // CAddress has a distinct network serialization and a disk serialization, but it should never + // be hashed (except through CHashWriter in addrdb.cpp, which sets SER_DISK), and it's + // ambiguous what that would mean. Make sure no code relying on that is introduced: + assert(!(s.GetType() & SER_GETHASH)); + bool use_v2; + bool store_time; if (s.GetType() & SER_DISK) { - READWRITE(nVersion); - } - if ((s.GetType() & SER_DISK) || - (nVersion != INIT_PROTO_VERSION && !(s.GetType() & SER_GETHASH))) { + // In the disk serialization format, the encoding (v1 or v2) is determined by a flag version + // that's part of the serialization itself. ADDRV2_FORMAT in the stream version only determines + // whether V2 is chosen/permitted at all. + uint32_t stored_format_version = DISK_VERSION_INIT; + if (s.GetVersion() & ADDRV2_FORMAT) stored_format_version |= DISK_VERSION_ADDRV2; + READWRITE(stored_format_version); + stored_format_version &= ~DISK_VERSION_IGNORE_MASK; // ignore low bits + if (stored_format_version == 0) { + use_v2 = false; + } else if (stored_format_version == DISK_VERSION_ADDRV2 && (s.GetVersion() & ADDRV2_FORMAT)) { + // Only support v2 deserialization if ADDRV2_FORMAT is set. + use_v2 = true; + } else { + throw std::ios_base::failure("Unsupported CAddress disk format version"); + } + store_time = true; + } else { + // In the network serialization format, the encoding (v1 or v2) is determined directly by + // the value of ADDRV2_FORMAT in the stream version, as no explicitly encoded version + // exists in the stream. + assert(s.GetType() & SER_NETWORK); + use_v2 = s.GetVersion() & ADDRV2_FORMAT; // The only time we serialize a CAddress object without nTime is in // the initial VERSION messages which contain two CAddress records. // At that point, the serialization version is INIT_PROTO_VERSION. // After the version handshake, serialization version is >= // MIN_PEER_PROTO_VERSION and all ADDR messages are serialized with // nTime. - READWRITE(obj.nTime); + store_time = s.GetVersion() != INIT_PROTO_VERSION; } - if (nVersion & ADDRV2_FORMAT) { + + SER_READ(obj, obj.nTime = TIME_INIT); + if (store_time) READWRITE(obj.nTime); + // nServices is serialized as CompactSize in V2; as uint64_t in V1. + if (use_v2) { uint64_t services_tmp; SER_WRITE(obj, services_tmp = obj.nServices); READWRITE(Using<CompactSizeFormatter<false>>(services_tmp)); @@ -388,13 +440,22 @@ public: } else { READWRITE(Using<CustomUintFormatter<8>>(obj.nServices)); } - READWRITEAS(CService, obj); + // Invoke V1/V2 serializer for CService parent object. + OverrideStream<Stream> os(&s, s.GetType(), use_v2 ? ADDRV2_FORMAT : 0); + SerReadWriteMany(os, ser_action, ReadWriteAsHelper<CService>(obj)); } - // disk and network only + //! Always included in serialization, except in the network format on INIT_PROTO_VERSION. uint32_t nTime{TIME_INIT}; - + //! Serialized as uint64_t in V1, and as CompactSize in V2. ServiceFlags nServices{NODE_NONE}; + + friend bool operator==(const CAddress& a, const CAddress& b) + { + return a.nTime == b.nTime && + a.nServices == b.nServices && + static_cast<const CService&>(a) == static_cast<const CService&>(b); + } }; /** getdata message type flags */ diff --git a/src/psbt.cpp b/src/psbt.cpp index a849b2ea53..5445bc8aa1 100644 --- a/src/psbt.cpp +++ b/src/psbt.cpp @@ -59,12 +59,15 @@ bool PartiallySignedTransaction::AddOutput(const CTxOut& txout, const PSBTOutput bool PartiallySignedTransaction::GetInputUTXO(CTxOut& utxo, int input_index) const { - PSBTInput input = inputs[input_index]; + const PSBTInput& input = inputs[input_index]; uint32_t prevout_index = tx->vin[input_index].prevout.n; if (input.non_witness_utxo) { if (prevout_index >= input.non_witness_utxo->vout.size()) { return false; } + if (input.non_witness_utxo->GetHash() != tx->vin[input_index].prevout.hash) { + return false; + } utxo = input.non_witness_utxo->vout[prevout_index]; } else if (!input.witness_utxo.IsNull()) { utxo = input.witness_utxo; @@ -227,7 +230,24 @@ void UpdatePSBTOutput(const SigningProvider& provider, PartiallySignedTransactio psbt_out.FromSignatureData(sigdata); } -bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction& psbt, int index, int sighash, SignatureData* out_sigdata, bool use_dummy) +PrecomputedTransactionData PrecomputePSBTData(const PartiallySignedTransaction& psbt) +{ + const CMutableTransaction& tx = *psbt.tx; + bool have_all_spent_outputs = true; + std::vector<CTxOut> utxos(tx.vin.size()); + for (size_t idx = 0; idx < tx.vin.size(); ++idx) { + if (!psbt.GetInputUTXO(utxos[idx], idx)) have_all_spent_outputs = false; + } + PrecomputedTransactionData txdata; + if (have_all_spent_outputs) { + txdata.Init(tx, std::move(utxos), true); + } else { + txdata.Init(tx, {}, true); + } + return txdata; +} + +bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction& psbt, int index, const PrecomputedTransactionData* txdata, int sighash, SignatureData* out_sigdata) { PSBTInput& input = psbt.inputs.at(index); const CMutableTransaction& tx = *psbt.tx; @@ -267,10 +287,10 @@ bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction& sigdata.witness = false; bool sig_complete; - if (use_dummy) { + if (txdata == nullptr) { sig_complete = ProduceSignature(provider, DUMMY_SIGNATURE_CREATOR, utxo.scriptPubKey, sigdata); } else { - MutableTransactionSignatureCreator creator(&tx, index, utxo.nValue, sighash); + MutableTransactionSignatureCreator creator(&tx, index, utxo.nValue, txdata, sighash); sig_complete = ProduceSignature(provider, creator, utxo.scriptPubKey, sigdata); } // Verify that a witness signature was produced in case one was required. @@ -302,8 +322,9 @@ bool FinalizePSBT(PartiallySignedTransaction& psbtx) // PartiallySignedTransaction did not understand them), this will combine them into a final // script. bool complete = true; + const PrecomputedTransactionData txdata = PrecomputePSBTData(psbtx); for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) { - complete &= SignPSBTInput(DUMMY_SIGNING_PROVIDER, psbtx, i, SIGHASH_ALL); + complete &= SignPSBTInput(DUMMY_SIGNING_PROVIDER, psbtx, i, &txdata, SIGHASH_ALL); } return complete; diff --git a/src/psbt.h b/src/psbt.h index 96ae39fdb8..f6b82b43de 100644 --- a/src/psbt.h +++ b/src/psbt.h @@ -567,11 +567,18 @@ enum class PSBTRole { std::string PSBTRoleName(PSBTRole role); +/** Compute a PrecomputedTransactionData object from a psbt. */ +PrecomputedTransactionData PrecomputePSBTData(const PartiallySignedTransaction& psbt); + /** Checks whether a PSBTInput is already signed. */ bool PSBTInputSigned(const PSBTInput& input); -/** Signs a PSBTInput, verifying that all provided data matches what is being signed. */ -bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction& psbt, int index, int sighash = SIGHASH_ALL, SignatureData* out_sigdata = nullptr, bool use_dummy = false); +/** Signs a PSBTInput, verifying that all provided data matches what is being signed. + * + * txdata should be the output of PrecomputePSBTData (which can be shared across + * multiple SignPSBTInput calls). If it is nullptr, a dummy signature will be created. + **/ +bool SignPSBTInput(const SigningProvider& provider, PartiallySignedTransaction& psbt, int index, const PrecomputedTransactionData* txdata, int sighash = SIGHASH_ALL, SignatureData* out_sigdata = nullptr); /** Counts the unsigned inputs of a PSBT. */ size_t CountPSBTUnsignedInputs(const PartiallySignedTransaction& psbt); diff --git a/src/pubkey.cpp b/src/pubkey.cpp index 51cc826b00..175a39b805 100644 --- a/src/pubkey.cpp +++ b/src/pubkey.cpp @@ -373,3 +373,7 @@ ECCVerifyHandle::~ECCVerifyHandle() secp256k1_context_verify = nullptr; } } + +const secp256k1_context* GetVerifyContext() { + return secp256k1_context_verify; +} diff --git a/src/pubkey.h b/src/pubkey.h index 152a48dd18..eec34a89c2 100644 --- a/src/pubkey.h +++ b/src/pubkey.h @@ -234,6 +234,10 @@ public: * fail. */ bool IsFullyValid() const; + /** Test whether this is the 0 key (the result of default construction). This implies + * !IsFullyValid(). */ + bool IsNull() const { return m_keydata.IsNull(); } + /** Construct an x-only pubkey from exactly 32 bytes. */ explicit XOnlyPubKey(Span<const unsigned char> bytes); @@ -312,4 +316,10 @@ public: ~ECCVerifyHandle(); }; +typedef struct secp256k1_context_struct secp256k1_context; + +/** Access to the internal secp256k1 context used for verification. Only intended to be used + * by key.cpp. */ +const secp256k1_context* GetVerifyContext(); + #endif // BITCOIN_PUBKEY_H diff --git a/src/qt/addressbookpage.cpp b/src/qt/addressbookpage.cpp index 7024fc7654..c31f0aceea 100644 --- a/src/qt/addressbookpage.cpp +++ b/src/qt/addressbookpage.cpp @@ -114,12 +114,12 @@ AddressBookPage::AddressBookPage(const PlatformStyle *platformStyle, Mode _mode, // Build context menu contextMenu = new QMenu(this); - contextMenu->addAction(tr("Copy Address"), this, &AddressBookPage::on_copyAddress_clicked); - contextMenu->addAction(tr("Copy Label"), this, &AddressBookPage::onCopyLabelAction); - contextMenu->addAction(tr("Edit"), this, &AddressBookPage::onEditAction); + contextMenu->addAction(tr("&Copy Address"), this, &AddressBookPage::on_copyAddress_clicked); + contextMenu->addAction(tr("Copy &Label"), this, &AddressBookPage::onCopyLabelAction); + contextMenu->addAction(tr("&Edit"), this, &AddressBookPage::onEditAction); if (tab == SendingTab) { - contextMenu->addAction(tr("Delete"), this, &AddressBookPage::on_deleteAddress_clicked); + contextMenu->addAction(tr("&Delete"), this, &AddressBookPage::on_deleteAddress_clicked); } connect(ui->tableView, &QWidget::customContextMenuRequested, this, &AddressBookPage::contextualMenu); diff --git a/src/qt/bitcoin.cpp b/src/qt/bitcoin.cpp index 9e6cf56d31..442c813a5a 100644 --- a/src/qt/bitcoin.cpp +++ b/src/qt/bitcoin.cpp @@ -490,7 +490,8 @@ int GuiMain(int argc, char* argv[]) /// 2. Parse command-line options. We do this after qt in order to show an error if there are problems parsing these // Command-line options take precedence: - SetupServerArgs(node_context); + node_context.args = &gArgs; + SetupServerArgs(gArgs); SetupUIArgs(gArgs); std::string error; if (!gArgs.ParseParameters(argc, argv, error)) { diff --git a/src/qt/bitcoingui.cpp b/src/qt/bitcoingui.cpp index 50f6e739e8..3d632ec702 100644 --- a/src/qt/bitcoingui.cpp +++ b/src/qt/bitcoingui.cpp @@ -211,11 +211,6 @@ BitcoinGUI::BitcoinGUI(interfaces::Node& node, const PlatformStyle *_platformSty connect(labelBlocksIcon, &GUIUtil::ClickableLabel::clicked, this, &BitcoinGUI::showModalOverlay); connect(progressBar, &GUIUtil::ClickableProgressBar::clicked, this, &BitcoinGUI::showModalOverlay); -#ifdef ENABLE_WALLET - if(enableWallet) { - connect(walletFrame, &WalletFrame::requestedSyncWarningInfo, this, &BitcoinGUI::showModalOverlay); - } -#endif #ifdef Q_OS_MAC m_app_nap_inhibitor = new CAppNapInhibitor; @@ -690,7 +685,7 @@ void BitcoinGUI::addWallet(WalletModel* walletModel) const QString display_name = walletModel->getDisplayName(); m_wallet_selector->addItem(display_name, QVariant::fromValue(walletModel)); - connect(wallet_view, &WalletView::outOfSyncWarningClicked, walletFrame, &WalletFrame::outOfSyncWarningClicked); + connect(wallet_view, &WalletView::outOfSyncWarningClicked, this, &BitcoinGUI::showModalOverlay); connect(wallet_view, &WalletView::transactionClicked, this, &BitcoinGUI::gotoHistoryPage); connect(wallet_view, &WalletView::coinsSent, this, &BitcoinGUI::gotoHistoryPage); connect(wallet_view, &WalletView::message, [this](const QString& title, const QString& message, unsigned int style) { diff --git a/src/qt/coincontroldialog.cpp b/src/qt/coincontroldialog.cpp index 8ae0648141..2360fa9b37 100644 --- a/src/qt/coincontroldialog.cpp +++ b/src/qt/coincontroldialog.cpp @@ -52,13 +52,13 @@ CoinControlDialog::CoinControlDialog(CCoinControl& coin_control, WalletModel* _m // context menu contextMenu = new QMenu(this); - contextMenu->addAction(tr("Copy address"), this, &CoinControlDialog::copyAddress); - contextMenu->addAction(tr("Copy label"), this, &CoinControlDialog::copyLabel); - contextMenu->addAction(tr("Copy amount"), this, &CoinControlDialog::copyAmount); - copyTransactionHashAction = contextMenu->addAction(tr("Copy transaction ID"), this, &CoinControlDialog::copyTransactionHash); + contextMenu->addAction(tr("&Copy address"), this, &CoinControlDialog::copyAddress); + contextMenu->addAction(tr("Copy &label"), this, &CoinControlDialog::copyLabel); + contextMenu->addAction(tr("Copy &amount"), this, &CoinControlDialog::copyAmount); + copyTransactionHashAction = contextMenu->addAction(tr("Copy transaction &ID"), this, &CoinControlDialog::copyTransactionHash); contextMenu->addSeparator(); - lockAction = contextMenu->addAction(tr("Lock unspent"), this, &CoinControlDialog::lockCoin); - unlockAction = contextMenu->addAction(tr("Unlock unspent"), this, &CoinControlDialog::unlockCoin); + lockAction = contextMenu->addAction(tr("L&ock unspent"), this, &CoinControlDialog::lockCoin); + unlockAction = contextMenu->addAction(tr("&Unlock unspent"), this, &CoinControlDialog::unlockCoin); connect(ui->treeWidget, &QWidget::customContextMenuRequested, this, &CoinControlDialog::showMenu); // clipboard actions diff --git a/src/qt/createwalletdialog.cpp b/src/qt/createwalletdialog.cpp index e593697b46..dc24bbc6a6 100644 --- a/src/qt/createwalletdialog.cpp +++ b/src/qt/createwalletdialog.cpp @@ -31,8 +31,9 @@ CreateWalletDialog::CreateWalletDialog(QWidget* parent) : // Disable the disable_privkeys_checkbox and external_signer_checkbox when isEncryptWalletChecked is // set to true, enable it when isEncryptWalletChecked is false. ui->disable_privkeys_checkbox->setEnabled(!checked); +#ifdef ENABLE_EXTERNAL_SIGNER ui->external_signer_checkbox->setEnabled(!checked); - +#endif // When the disable_privkeys_checkbox is disabled, uncheck it. if (!ui->disable_privkeys_checkbox->isEnabled()) { ui->disable_privkeys_checkbox->setChecked(false); @@ -112,8 +113,7 @@ CreateWalletDialog::~CreateWalletDialog() delete ui; } -#ifdef ENABLE_EXTERNAL_SIGNER -void CreateWalletDialog::setSigners(std::vector<ExternalSigner>& signers) +void CreateWalletDialog::setSigners(const std::vector<ExternalSigner>& signers) { if (!signers.empty()) { ui->external_signer_checkbox->setEnabled(true); @@ -132,7 +132,6 @@ void CreateWalletDialog::setSigners(std::vector<ExternalSigner>& signers) ui->external_signer_checkbox->setEnabled(false); } } -#endif QString CreateWalletDialog::walletName() const { diff --git a/src/qt/createwalletdialog.h b/src/qt/createwalletdialog.h index 585b1461f7..25ddf97585 100644 --- a/src/qt/createwalletdialog.h +++ b/src/qt/createwalletdialog.h @@ -7,11 +7,8 @@ #include <QDialog> -class WalletModel; - -#ifdef ENABLE_EXTERNAL_SIGNER class ExternalSigner; -#endif +class WalletModel; namespace Ui { class CreateWalletDialog; @@ -27,9 +24,7 @@ public: explicit CreateWalletDialog(QWidget* parent); virtual ~CreateWalletDialog(); -#ifdef ENABLE_EXTERNAL_SIGNER - void setSigners(std::vector<ExternalSigner>& signers); -#endif + void setSigners(const std::vector<ExternalSigner>& signers); QString walletName() const; bool isEncryptWalletChecked() const; diff --git a/src/qt/optionsdialog.cpp b/src/qt/optionsdialog.cpp index 6ad8db4348..b12fe96567 100644 --- a/src/qt/optionsdialog.cpp +++ b/src/qt/optionsdialog.cpp @@ -92,6 +92,11 @@ OptionsDialog::OptionsDialog(QWidget *parent, bool enableWallet) : ui->thirdPartyTxUrls->setVisible(false); } +#ifndef ENABLE_EXTERNAL_SIGNER + //: "External signing" means using devices such as hardware wallets. + ui->externalSignerPath->setToolTip(tr("Compiled without external signing support (required for external signing)")); + ui->externalSignerPath->setEnabled(false); +#endif /* Display elements init */ QDir translations(":translations"); diff --git a/src/qt/overviewpage.cpp b/src/qt/overviewpage.cpp index 27783bdf87..26e3dd0d60 100644 --- a/src/qt/overviewpage.cpp +++ b/src/qt/overviewpage.cpp @@ -166,8 +166,8 @@ OverviewPage::OverviewPage(const PlatformStyle *platformStyle, QWidget *parent) // start with displaying the "out of sync" warnings showOutOfSyncWarning(true); - connect(ui->labelWalletStatus, &QPushButton::clicked, this, &OverviewPage::handleOutOfSyncWarningClicks); - connect(ui->labelTransactionsStatus, &QPushButton::clicked, this, &OverviewPage::handleOutOfSyncWarningClicks); + connect(ui->labelWalletStatus, &QPushButton::clicked, this, &OverviewPage::outOfSyncWarningClicked); + connect(ui->labelTransactionsStatus, &QPushButton::clicked, this, &OverviewPage::outOfSyncWarningClicked); } void OverviewPage::handleTransactionClicked(const QModelIndex &index) @@ -176,11 +176,6 @@ void OverviewPage::handleTransactionClicked(const QModelIndex &index) Q_EMIT transactionClicked(filter->mapToSource(index)); } -void OverviewPage::handleOutOfSyncWarningClicks() -{ - Q_EMIT outOfSyncWarningClicked(); -} - void OverviewPage::setPrivacy(bool privacy) { m_privacy = privacy; diff --git a/src/qt/overviewpage.h b/src/qt/overviewpage.h index 755a107a00..5270741c0d 100644 --- a/src/qt/overviewpage.h +++ b/src/qt/overviewpage.h @@ -65,7 +65,6 @@ private Q_SLOTS: void handleTransactionClicked(const QModelIndex &index); void updateAlerts(const QString &warnings); void updateWatchOnlyLabels(bool showWatchOnly); - void handleOutOfSyncWarningClicks(); void setMonospacedFont(bool use_embedded_font); }; diff --git a/src/qt/psbtoperationsdialog.cpp b/src/qt/psbtoperationsdialog.cpp index 99318c3bc0..2adfeeaaf0 100644 --- a/src/qt/psbtoperationsdialog.cpp +++ b/src/qt/psbtoperationsdialog.cpp @@ -50,7 +50,7 @@ void PSBTOperationsDialog::openWithPSBT(PartiallySignedTransaction psbtx) bool complete; size_t n_could_sign; FinalizePSBT(psbtx); // Make sure all existing signatures are fully combined before checking for completeness. - TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, m_transaction_data, complete, &n_could_sign); + TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, &n_could_sign, m_transaction_data, complete); if (err != TransactionError::OK) { showStatus(tr("Failed to load transaction: %1") .arg(QString::fromStdString(TransactionErrorString(err).translated)), StatusLevel::ERR); @@ -67,7 +67,7 @@ void PSBTOperationsDialog::signTransaction() { bool complete; size_t n_signed; - TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, true /* sign */, true /* bip32derivs */, m_transaction_data, complete, &n_signed); + TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, true /* sign */, true /* bip32derivs */, &n_signed, m_transaction_data, complete); if (err != TransactionError::OK) { showStatus(tr("Failed to sign transaction: %1") @@ -226,7 +226,7 @@ void PSBTOperationsDialog::showStatus(const QString &msg, StatusLevel level) { size_t PSBTOperationsDialog::couldSignInputs(const PartiallySignedTransaction &psbtx) { size_t n_signed; bool complete; - TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, false /* bip32derivs */, m_transaction_data, complete, &n_signed); + TransactionError err = m_wallet_model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, false /* bip32derivs */, &n_signed, m_transaction_data, complete); if (err != TransactionError::OK) { return 0; diff --git a/src/qt/qrimagewidget.cpp b/src/qt/qrimagewidget.cpp index f5200bb5c0..7cdd568644 100644 --- a/src/qt/qrimagewidget.cpp +++ b/src/qt/qrimagewidget.cpp @@ -27,8 +27,8 @@ QRImageWidget::QRImageWidget(QWidget *parent): QLabel(parent), contextMenu(nullptr) { contextMenu = new QMenu(this); - contextMenu->addAction(tr("Save Image…"), this, &QRImageWidget::saveImage); - contextMenu->addAction(tr("Copy Image"), this, &QRImageWidget::copyImage); + contextMenu->addAction(tr("&Save Image…"), this, &QRImageWidget::saveImage); + contextMenu->addAction(tr("&Copy Image"), this, &QRImageWidget::copyImage); } bool QRImageWidget::setQR(const QString& data, const QString& text) diff --git a/src/qt/receivecoinsdialog.cpp b/src/qt/receivecoinsdialog.cpp index 3f4d7f85e6..d47ee95826 100644 --- a/src/qt/receivecoinsdialog.cpp +++ b/src/qt/receivecoinsdialog.cpp @@ -44,11 +44,11 @@ ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWid // context menu contextMenu = new QMenu(this); - contextMenu->addAction(tr("Copy URI"), this, &ReceiveCoinsDialog::copyURI); - contextMenu->addAction(tr("Copy address"), this, &ReceiveCoinsDialog::copyAddress); - copyLabelAction = contextMenu->addAction(tr("Copy label"), this, &ReceiveCoinsDialog::copyLabel); - copyMessageAction = contextMenu->addAction(tr("Copy message"), this, &ReceiveCoinsDialog::copyMessage); - copyAmountAction = contextMenu->addAction(tr("Copy amount"), this, &ReceiveCoinsDialog::copyAmount); + contextMenu->addAction(tr("Copy &URI"), this, &ReceiveCoinsDialog::copyURI); + contextMenu->addAction(tr("&Copy address"), this, &ReceiveCoinsDialog::copyAddress); + copyLabelAction = contextMenu->addAction(tr("Copy &label"), this, &ReceiveCoinsDialog::copyLabel); + copyMessageAction = contextMenu->addAction(tr("Copy &message"), this, &ReceiveCoinsDialog::copyMessage); + copyAmountAction = contextMenu->addAction(tr("Copy &amount"), this, &ReceiveCoinsDialog::copyAmount); connect(ui->recentRequestsView, &QWidget::customContextMenuRequested, this, &ReceiveCoinsDialog::showMenu); connect(ui->clearButton, &QPushButton::clicked, this, &ReceiveCoinsDialog::clear); diff --git a/src/qt/receiverequestdialog.cpp b/src/qt/receiverequestdialog.cpp index abe7de8f89..41f22e9c34 100644 --- a/src/qt/receiverequestdialog.cpp +++ b/src/qt/receiverequestdialog.cpp @@ -90,7 +90,7 @@ void ReceiveRequestDialog::setInfo(const SendCoinsRecipient &_info) ui->wallet_content->hide(); } - ui->btnVerify->setVisible(this->model->wallet().hasExternalSigner()); + ui->btnVerify->setVisible(model->wallet().hasExternalSigner()); connect(ui->btnVerify, &QPushButton::clicked, [this] { model->displayAddress(info.address.toStdString()); diff --git a/src/qt/rpcconsole.cpp b/src/qt/rpcconsole.cpp index 9579e6dc24..ff4bfb16f6 100644 --- a/src/qt/rpcconsole.cpp +++ b/src/qt/rpcconsole.cpp @@ -675,11 +675,11 @@ void RPCConsole::setClientModel(ClientModel *model, int bestblock_height, int64_ // create peer table context menu peersTableContextMenu = new QMenu(this); - peersTableContextMenu->addAction(tr("Disconnect"), this, &RPCConsole::disconnectSelectedNode); - peersTableContextMenu->addAction(ts.ban_for + " " + tr("1 hour"), [this] { banSelectedNode(60 * 60); }); - peersTableContextMenu->addAction(ts.ban_for + " " + tr("1 day"), [this] { banSelectedNode(60 * 60 * 24); }); - peersTableContextMenu->addAction(ts.ban_for + " " + tr("1 week"), [this] { banSelectedNode(60 * 60 * 24 * 7); }); - peersTableContextMenu->addAction(ts.ban_for + " " + tr("1 year"), [this] { banSelectedNode(60 * 60 * 24 * 365); }); + peersTableContextMenu->addAction(tr("&Disconnect"), this, &RPCConsole::disconnectSelectedNode); + peersTableContextMenu->addAction(ts.ban_for + " " + tr("1 &hour"), [this] { banSelectedNode(60 * 60); }); + peersTableContextMenu->addAction(ts.ban_for + " " + tr("1 d&ay"), [this] { banSelectedNode(60 * 60 * 24); }); + peersTableContextMenu->addAction(ts.ban_for + " " + tr("1 &week"), [this] { banSelectedNode(60 * 60 * 24 * 7); }); + peersTableContextMenu->addAction(ts.ban_for + " " + tr("1 &year"), [this] { banSelectedNode(60 * 60 * 24 * 365); }); connect(ui->peerWidget, &QTableView::customContextMenuRequested, this, &RPCConsole::showPeersTableContextMenu); // peer table signal handling - update peer details when selecting new node @@ -701,7 +701,7 @@ void RPCConsole::setClientModel(ClientModel *model, int bestblock_height, int64_ // create ban table context menu banTableContextMenu = new QMenu(this); - banTableContextMenu->addAction(tr("Unban"), this, &RPCConsole::unbanSelectedNode); + banTableContextMenu->addAction(tr("&Unban"), this, &RPCConsole::unbanSelectedNode); connect(ui->banlistWidget, &QTableView::customContextMenuRequested, this, &RPCConsole::showBanTableContextMenu); // ban table signal handling - clear peer details when clicking a peer in the ban table diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp index e87a2b97bc..c9bf757dfc 100644 --- a/src/qt/sendcoinsdialog.cpp +++ b/src/qt/sendcoinsdialog.cpp @@ -200,12 +200,14 @@ void SendCoinsDialog::setModel(WalletModel *_model) ui->optInRBF->setCheckState(Qt::Checked); if (model->wallet().hasExternalSigner()) { + //: "device" usually means a hardware wallet ui->sendButton->setText(tr("Sign on device")); if (gArgs.GetArg("-signer", "") != "") { ui->sendButton->setEnabled(true); ui->sendButton->setToolTip(tr("Connect your hardware wallet first.")); } else { ui->sendButton->setEnabled(false); + //: "External signer" means using devices such as hardware wallets. ui->sendButton->setToolTip(tr("Set external signer script path in Options -> Wallet")); } } else if (model->wallet().privateKeysDisabled()) { @@ -414,23 +416,25 @@ void SendCoinsDialog::sendButtonClicked([[maybe_unused]] bool checked) bool complete = false; // Always fill without signing first. This prevents an external signer // from being called prematurely and is not expensive. - TransactionError err = model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, psbtx, complete, nullptr); + TransactionError err = model->wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, nullptr, psbtx, complete); assert(!complete); assert(err == TransactionError::OK); if (model->wallet().hasExternalSigner()) { try { - err = model->wallet().fillPSBT(SIGHASH_ALL, true /* sign */, true /* bip32derivs */, psbtx, complete, nullptr); + err = model->wallet().fillPSBT(SIGHASH_ALL, true /* sign */, true /* bip32derivs */, nullptr, psbtx, complete); } catch (const std::runtime_error& e) { QMessageBox::critical(nullptr, tr("Sign failed"), e.what()); send_failure = true; return; } if (err == TransactionError::EXTERNAL_SIGNER_NOT_FOUND) { + //: "External signer" means using devices such as hardware wallets. QMessageBox::critical(nullptr, tr("External signer not found"), "External signer not found"); send_failure = true; return; } if (err == TransactionError::EXTERNAL_SIGNER_FAILED) { + //: "External signer" means using devices such as hardware wallets. QMessageBox::critical(nullptr, tr("External signer failure"), "External signer failure"); send_failure = true; return; diff --git a/src/qt/test/apptests.cpp b/src/qt/test/apptests.cpp index cb3dbd2267..9c31cd50df 100644 --- a/src/qt/test/apptests.cpp +++ b/src/qt/test/apptests.cpp @@ -85,11 +85,6 @@ void AppTests::appTests() // Reset global state to avoid interfering with later tests. LogInstance().DisconnectTestLogger(); AbortShutdown(); - { - LOCK(cs_main); - UnloadBlockIndex(/* mempool */ nullptr, g_chainman); - g_chainman.Reset(); - } } //! Entry point for BitcoinGUI tests. diff --git a/src/qt/test/wallettests.cpp b/src/qt/test/wallettests.cpp index ea35f80cf5..3e1a0e0fa9 100644 --- a/src/qt/test/wallettests.cpp +++ b/src/qt/test/wallettests.cpp @@ -146,14 +146,14 @@ void TestGUI(interfaces::Node& node) LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore); wallet->SetAddressBook(GetDestinationForKey(test.coinbaseKey.GetPubKey(), wallet->m_default_address_type), "", "receive"); spk_man->AddKeyPubKey(test.coinbaseKey, test.coinbaseKey.GetPubKey()); - wallet->SetLastBlockProcessed(105, ::ChainActive().Tip()->GetBlockHash()); + wallet->SetLastBlockProcessed(105, node.context()->chainman->ActiveChain().Tip()->GetBlockHash()); } { WalletRescanReserver reserver(*wallet); reserver.reserve(); CWallet::ScanResult result = wallet->ScanForWalletTransactions(Params().GetConsensus().hashGenesisBlock, 0 /* block height */, {} /* max height */, reserver, true /* fUpdate */); QCOMPARE(result.status, CWallet::ScanResult::SUCCESS); - QCOMPARE(result.last_scanned_block, ::ChainActive().Tip()->GetBlockHash()); + QCOMPARE(result.last_scanned_block, node.context()->chainman->ActiveChain().Tip()->GetBlockHash()); QVERIFY(result.last_failed_block.IsNull()); } wallet->SetBroadcastTransactions(true); diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp index 1e8e012dcf..4b1a546c7c 100644 --- a/src/qt/transactionview.cpp +++ b/src/qt/transactionview.cpp @@ -163,19 +163,19 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa contextMenu = new QMenu(this); contextMenu->setObjectName("contextMenu"); - copyAddressAction = contextMenu->addAction(tr("Copy address"), this, &TransactionView::copyAddress); - copyLabelAction = contextMenu->addAction(tr("Copy label"), this, &TransactionView::copyLabel); - contextMenu->addAction(tr("Copy amount"), this, &TransactionView::copyAmount); - contextMenu->addAction(tr("Copy transaction ID"), this, &TransactionView::copyTxID); - contextMenu->addAction(tr("Copy raw transaction"), this, &TransactionView::copyTxHex); - contextMenu->addAction(tr("Copy full transaction details"), this, &TransactionView::copyTxPlainText); - contextMenu->addAction(tr("Show transaction details"), this, &TransactionView::showDetails); + copyAddressAction = contextMenu->addAction(tr("&Copy address"), this, &TransactionView::copyAddress); + copyLabelAction = contextMenu->addAction(tr("Copy &label"), this, &TransactionView::copyLabel); + contextMenu->addAction(tr("Copy &amount"), this, &TransactionView::copyAmount); + contextMenu->addAction(tr("Copy transaction &ID"), this, &TransactionView::copyTxID); + contextMenu->addAction(tr("Copy &raw transaction"), this, &TransactionView::copyTxHex); + contextMenu->addAction(tr("Copy full transaction &details"), this, &TransactionView::copyTxPlainText); + contextMenu->addAction(tr("&Show transaction details"), this, &TransactionView::showDetails); contextMenu->addSeparator(); - bumpFeeAction = contextMenu->addAction(tr("Increase transaction fee")); + bumpFeeAction = contextMenu->addAction(tr("Increase transaction &fee")); GUIUtil::ExceptionSafeConnect(bumpFeeAction, &QAction::triggered, this, &TransactionView::bumpFee); bumpFeeAction->setObjectName("bumpFeeAction"); - abandonAction = contextMenu->addAction(tr("Abandon transaction"), this, &TransactionView::abandonTx); - contextMenu->addAction(tr("Edit address label"), this, &TransactionView::editLabel); + abandonAction = contextMenu->addAction(tr("A&bandon transaction"), this, &TransactionView::abandonTx); + contextMenu->addAction(tr("&Edit address label"), this, &TransactionView::editLabel); connect(dateWidget, qOverload<int>(&QComboBox::activated), this, &TransactionView::chooseDate); connect(typeWidget, qOverload<int>(&QComboBox::activated), this, &TransactionView::chooseType); diff --git a/src/qt/walletcontroller.cpp b/src/qt/walletcontroller.cpp index 7e5790fd87..3cceb5ca5a 100644 --- a/src/qt/walletcontroller.cpp +++ b/src/qt/walletcontroller.cpp @@ -11,6 +11,7 @@ #include <qt/guiutil.h> #include <qt/walletmodel.h> +#include <external_signer.h> #include <interfaces/handler.h> #include <interfaces/node.h> #include <util/string.h> @@ -295,7 +296,6 @@ void CreateWalletActivity::create() { m_create_wallet_dialog = new CreateWalletDialog(m_parent_widget); -#ifdef ENABLE_EXTERNAL_SIGNER std::vector<ExternalSigner> signers; try { signers = node().externalSigners(); @@ -303,7 +303,6 @@ void CreateWalletActivity::create() QMessageBox::critical(nullptr, tr("Can't list signers"), e.what()); } m_create_wallet_dialog->setSigners(signers); -#endif m_create_wallet_dialog->setWindowModality(Qt::ApplicationModal); m_create_wallet_dialog->show(); diff --git a/src/qt/walletframe.cpp b/src/qt/walletframe.cpp index 0f2ea99685..a1f357e0db 100644 --- a/src/qt/walletframe.cpp +++ b/src/qt/walletframe.cpp @@ -242,8 +242,3 @@ WalletModel* WalletFrame::currentWalletModel() const WalletView* wallet_view = currentWalletView(); return wallet_view ? wallet_view->getWalletModel() : nullptr; } - -void WalletFrame::outOfSyncWarningClicked() -{ - Q_EMIT requestedSyncWarningInfo(); -} diff --git a/src/qt/walletframe.h b/src/qt/walletframe.h index 844ed121a0..4f77bd716f 100644 --- a/src/qt/walletframe.h +++ b/src/qt/walletframe.h @@ -47,9 +47,6 @@ public: QSize sizeHint() const override { return m_size_hint; } Q_SIGNALS: - /** Notify that the user has requested more information about the out-of-sync warning */ - void requestedSyncWarningInfo(); - void createWalletButtonClicked(); private: @@ -98,8 +95,6 @@ public Q_SLOTS: void usedSendingAddresses(); /** Show used receiving addresses */ void usedReceivingAddresses(); - /** Pass on signal over requested out-of-sync-warning information */ - void outOfSyncWarningClicked(); }; #endif // BITCOIN_QT_WALLETFRAME_H diff --git a/src/qt/walletmodel.cpp b/src/qt/walletmodel.cpp index e32b7c2807..967dd588b4 100644 --- a/src/qt/walletmodel.cpp +++ b/src/qt/walletmodel.cpp @@ -525,7 +525,7 @@ bool WalletModel::bumpFee(uint256 hash, uint256& new_hash) if (create_psbt) { PartiallySignedTransaction psbtx(mtx); bool complete = false; - const TransactionError err = wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, psbtx, complete, nullptr); + const TransactionError err = wallet().fillPSBT(SIGHASH_ALL, false /* sign */, true /* bip32derivs */, nullptr, psbtx, complete); if (err != TransactionError::OK || complete) { QMessageBox::critical(nullptr, tr("Fee bump error"), tr("Can't draft transaction.")); return false; diff --git a/src/qt/walletview.cpp b/src/qt/walletview.cpp index cc9e1502f0..3b8cf4c7ed 100644 --- a/src/qt/walletview.cpp +++ b/src/qt/walletview.cpp @@ -73,7 +73,7 @@ WalletView::WalletView(const PlatformStyle *_platformStyle, QWidget *parent): // Clicking on a transaction on the overview pre-selects the transaction on the transaction history page connect(overviewPage, &OverviewPage::transactionClicked, transactionView, qOverload<const QModelIndex&>(&TransactionView::focusTransaction)); - connect(overviewPage, &OverviewPage::outOfSyncWarningClicked, this, &WalletView::requestedSyncWarningInfo); + connect(overviewPage, &OverviewPage::outOfSyncWarningClicked, this, &WalletView::outOfSyncWarningClicked); connect(sendCoinsPage, &SendCoinsDialog::coinsSent, this, &WalletView::coinsSent); // Highlight transaction after send @@ -347,8 +347,3 @@ void WalletView::showProgress(const QString &title, int nProgress) } } } - -void WalletView::requestedSyncWarningInfo() -{ - Q_EMIT outOfSyncWarningClicked(); -} diff --git a/src/qt/walletview.h b/src/qt/walletview.h index 68f8a5e95b..fedf06b710 100644 --- a/src/qt/walletview.h +++ b/src/qt/walletview.h @@ -111,9 +111,6 @@ public Q_SLOTS: /** Show progress dialog e.g. for rescan */ void showProgress(const QString &title, int nProgress); - /** User has requested more information about the out of sync state */ - void requestedSyncWarningInfo(); - Q_SIGNALS: void setPrivacy(bool privacy); void transactionClicked(); diff --git a/src/rest.cpp b/src/rest.cpp index 747c7aea19..d599f381e3 100644 --- a/src/rest.cpp +++ b/src/rest.cpp @@ -125,7 +125,7 @@ static ChainstateManager* GetChainman(const std::any& context, HTTPRequest* req) __FILE__, __LINE__, __func__, PACKAGE_BUGREPORT)); return nullptr; } - return node_context->chainman; + return node_context->chainman.get(); } static RetFormat ParseDataFormat(std::string& param, const std::string& strReq) diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp index 83c1975d38..f996aa9c76 100644 --- a/src/rpc/blockchain.cpp +++ b/src/rpc/blockchain.cpp @@ -84,7 +84,6 @@ ChainstateManager& EnsureChainman(const NodeContext& node) if (!node.chainman) { throw JSONRPCError(RPC_INTERNAL_ERROR, "Node chainman not found"); } - WITH_LOCK(::cs_main, CHECK_NONFATAL(std::addressof(g_chainman) == std::addressof(*node.chainman))); return *node.chainman; } @@ -2259,6 +2258,7 @@ public: if (g_scan_in_progress.exchange(true)) { return false; } + CHECK_NONFATAL(g_scan_progress == 0); m_could_reserve = true; return true; } @@ -2266,6 +2266,7 @@ public: ~CoinsViewScanReserver() { if (m_could_reserve) { g_scan_in_progress = false; + g_scan_progress = 0; } } }; @@ -2382,7 +2383,6 @@ static RPCHelpMan scantxoutset() std::vector<CTxOut> input_txos; std::map<COutPoint, Coin> coins; g_should_abort_scan = false; - g_scan_progress = 0; int64_t count = 0; std::unique_ptr<CCoinsViewCursor> pcursor; CBlockIndex* tip; @@ -2392,7 +2392,7 @@ static RPCHelpMan scantxoutset() LOCK(cs_main); CChainState& active_chainstate = chainman.ActiveChainstate(); active_chainstate.ForceFlushStateToDisk(); - pcursor = std::unique_ptr<CCoinsViewCursor>(active_chainstate.CoinsDB().Cursor()); + pcursor = active_chainstate.CoinsDB().Cursor(); CHECK_NONFATAL(pcursor); tip = active_chainstate.m_chain.Tip(); CHECK_NONFATAL(tip); @@ -2591,7 +2591,7 @@ UniValue CreateUTXOSnapshot(NodeContext& node, CChainState& chainstate, CAutoFil throw JSONRPCError(RPC_INTERNAL_ERROR, "Unable to read UTXO set"); } - pcursor = std::unique_ptr<CCoinsViewCursor>(chainstate.CoinsDB().Cursor()); + pcursor = chainstate.CoinsDB().Cursor(); tip = chainstate.m_blockman.LookupBlockIndex(stats.hashBlock); CHECK_NONFATAL(tip); } diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp index 6826e6fd07..327f961196 100644 --- a/src/rpc/mining.cpp +++ b/src/rpc/mining.cpp @@ -114,7 +114,6 @@ static bool GenerateBlock(ChainstateManager& chainman, CBlock& block, uint64_t& { LOCK(cs_main); - CHECK_NONFATAL(std::addressof(::ChainActive()) == std::addressof(chainman.ActiveChain())); IncrementExtraNonce(&block, chainman.ActiveChain().Tip(), extra_nonce); } @@ -147,7 +146,6 @@ static UniValue generateBlocks(ChainstateManager& chainman, const CTxMemPool& me { // Don't keep cs_main locked LOCK(cs_main); - CHECK_NONFATAL(std::addressof(::ChainActive()) == std::addressof(chainman.ActiveChain())); nHeight = chainman.ActiveChain().Height(); nHeightEnd = nHeight+nGenerate; } diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp index ab239fe79c..5178ce60e8 100644 --- a/src/rpc/misc.cpp +++ b/src/rpc/misc.cpp @@ -131,6 +131,9 @@ static RPCHelpMan createmultisig() if (!ParseOutputType(request.params[2].get_str(), output_type)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[2].get_str())); } + if (output_type == OutputType::BECH32M) { + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "createmultisig cannot create bech32m multisig addresses"); + } } // Construct using pay-to-script-hash: diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp index 339d711ac9..ccb3123714 100644 --- a/src/rpc/rawtransaction.cpp +++ b/src/rpc/rawtransaction.cpp @@ -753,7 +753,8 @@ static RPCHelpMan signrawtransactionwithkey() }, }, }, - {"sighashtype", RPCArg::Type::STR, RPCArg::Default{"ALL"}, "The signature hash type. Must be one of:\n" + {"sighashtype", RPCArg::Type::STR, RPCArg::Default{"DEFAULT"}, "The signature hash type. Must be one of:\n" + " \"DEFAULT\"\n" " \"ALL\"\n" " \"NONE\"\n" " \"SINGLE\"\n" @@ -889,7 +890,7 @@ static RPCHelpMan testmempoolaccept() "\nReturns result of mempool acceptance tests indicating if raw transaction(s) (serialized, hex-encoded) would be accepted by mempool.\n" "\nIf multiple transactions are passed in, parents must come before children and package policies apply: the transactions cannot conflict with any mempool transactions or each other.\n" "\nIf one transaction fails, other transactions may not be fully validated (the 'allowed' key will be blank).\n" - "\nThe maximum number of transactions allowed is 25 (MAX_PACKAGE_COUNT)\n" + "\nThe maximum number of transactions allowed is " + ToString(MAX_PACKAGE_COUNT) + ".\n" "\nThis checks if transactions violate the consensus or policy rules.\n" "\nSee sendrawtransaction call.\n", { @@ -905,7 +906,7 @@ static RPCHelpMan testmempoolaccept() RPCResult{ RPCResult::Type::ARR, "", "The result of the mempool acceptance test for each raw transaction in the input array.\n" "Returns results for each transaction in the same order they were passed in.\n" - "It is possible for transactions to not be fully validated ('allowed' unset) if an earlier transaction failed.\n", + "It is possible for transactions to not be fully validated ('allowed' unset) if another transaction failed.\n", { {RPCResult::Type::OBJ, "", "", { @@ -939,7 +940,6 @@ static RPCHelpMan testmempoolaccept() UniValue::VARR, UniValueType(), // VNUM or VSTR, checked inside AmountFromValue() }); - const UniValue raw_transactions = request.params[0].get_array(); if (raw_transactions.size() < 1 || raw_transactions.size() > MAX_PACKAGE_COUNT) { throw JSONRPCError(RPC_INVALID_PARAMETER, @@ -951,6 +951,7 @@ static RPCHelpMan testmempoolaccept() CFeeRate(AmountFromValue(request.params[1])); std::vector<CTransactionRef> txns; + txns.reserve(raw_transactions.size()); for (const auto& rawtx : raw_transactions.getValues()) { CMutableTransaction mtx; if (!DecodeHexTx(mtx, rawtx.get_str())) { @@ -971,8 +972,8 @@ static RPCHelpMan testmempoolaccept() }(); UniValue rpc_result(UniValue::VARR); - // We will check transaction fees we iterate through txns in order. If any transaction fee - // exceeds maxfeerate, we will keave the rest of the validation results blank, because it + // We will check transaction fees while we iterate through txns in order. If any transaction fee + // exceeds maxfeerate, we will leave the rest of the validation results blank, because it // doesn't make sense to return a validation result for a transaction if its ancestor(s) would // not be submitted. bool exit_early{false}; @@ -1655,6 +1656,7 @@ static RPCHelpMan utxoupdatepsbt() } // Fill the inputs + const PrecomputedTransactionData txdata = PrecomputePSBTData(psbtx); for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) { PSBTInput& input = psbtx.inputs.at(i); @@ -1671,7 +1673,7 @@ static RPCHelpMan utxoupdatepsbt() // Update script/keypath information using descriptor data. // Note that SignPSBTInput does a lot more than just constructing ECDSA signatures // we don't actually care about those here, in fact. - SignPSBTInput(public_provider, psbtx, i, /* sighash_type */ 1); + SignPSBTInput(public_provider, psbtx, i, &txdata, /* sighash_type */ 1); } // Update script/keypath information using descriptor data. diff --git a/src/script/descriptor.cpp b/src/script/descriptor.cpp index 51cf8a7d62..fdbd2d7fc7 100644 --- a/src/script/descriptor.cpp +++ b/src/script/descriptor.cpp @@ -244,7 +244,7 @@ class ConstPubkeyProvider final : public PubkeyProvider bool m_xonly; public: - ConstPubkeyProvider(uint32_t exp_index, const CPubKey& pubkey, bool xonly = false) : PubkeyProvider(exp_index), m_pubkey(pubkey), m_xonly(xonly) {} + ConstPubkeyProvider(uint32_t exp_index, const CPubKey& pubkey, bool xonly) : PubkeyProvider(exp_index), m_pubkey(pubkey), m_xonly(xonly) {} bool GetPubKey(int pos, const SigningProvider& arg, CPubKey& key, KeyOriginInfo& info, const DescriptorCache* read_cache = nullptr, DescriptorCache* write_cache = nullptr) override { key = m_pubkey; @@ -640,20 +640,6 @@ public: std::optional<OutputType> GetOutputType() const override { return std::nullopt; } }; -static std::optional<OutputType> OutputTypeFromDestination(const CTxDestination& dest) { - if (std::holds_alternative<PKHash>(dest) || - std::holds_alternative<ScriptHash>(dest)) { - return OutputType::LEGACY; - } - if (std::holds_alternative<WitnessV0KeyHash>(dest) || - std::holds_alternative<WitnessV0ScriptHash>(dest) || - std::holds_alternative<WitnessV1Taproot>(dest) || - std::holds_alternative<WitnessUnknown>(dest)) { - return OutputType::BECH32; - } - return std::nullopt; -} - /** A parsed addr(A) descriptor. */ class AddressDescriptor final : public DescriptorImpl { @@ -843,7 +829,9 @@ protected: XOnlyPubKey xpk(keys[0]); if (!xpk.IsFullyValid()) return {}; builder.Finalize(xpk); - return Vector(GetScriptForDestination(builder.GetOutput())); + WitnessV1Taproot output = builder.GetOutput(); + out.tr_spenddata[output].Merge(builder.GetSpendData()); + return Vector(GetScriptForDestination(output)); } bool ToStringSubScriptHelper(const SigningProvider* arg, std::string& ret, bool priv, bool normalized) const override { @@ -872,7 +860,7 @@ public: { assert(m_subdescriptor_args.size() == m_depths.size()); } - std::optional<OutputType> GetOutputType() const override { return OutputType::BECH32; } + std::optional<OutputType> GetOutputType() const override { return OutputType::BECH32M; } bool IsSingleType() const final { return true; } }; @@ -929,7 +917,7 @@ std::unique_ptr<PubkeyProvider> ParsePubkeyInner(uint32_t key_exp_index, const S CPubKey pubkey(data); if (pubkey.IsFullyValid()) { if (permit_uncompressed || pubkey.IsCompressed()) { - return std::make_unique<ConstPubkeyProvider>(key_exp_index, pubkey); + return std::make_unique<ConstPubkeyProvider>(key_exp_index, pubkey, false); } else { error = "Uncompressed keys are not allowed"; return nullptr; @@ -950,7 +938,7 @@ std::unique_ptr<PubkeyProvider> ParsePubkeyInner(uint32_t key_exp_index, const S if (permit_uncompressed || key.IsCompressed()) { CPubKey pubkey = key.GetPubKey(); out.keys.emplace(pubkey.GetID(), key); - return std::make_unique<ConstPubkeyProvider>(key_exp_index, pubkey); + return std::make_unique<ConstPubkeyProvider>(key_exp_index, pubkey, ctx == ParseScriptContext::P2TR); } else { error = "Uncompressed keys are not allowed"; return nullptr; @@ -1219,26 +1207,50 @@ std::unique_ptr<DescriptorImpl> ParseScript(uint32_t& key_exp_index, Span<const std::unique_ptr<PubkeyProvider> InferPubkey(const CPubKey& pubkey, ParseScriptContext, const SigningProvider& provider) { - std::unique_ptr<PubkeyProvider> key_provider = std::make_unique<ConstPubkeyProvider>(0, pubkey); + std::unique_ptr<PubkeyProvider> key_provider = std::make_unique<ConstPubkeyProvider>(0, pubkey, false); + KeyOriginInfo info; + if (provider.GetKeyOrigin(pubkey.GetID(), info)) { + return std::make_unique<OriginPubkeyProvider>(0, std::move(info), std::move(key_provider)); + } + return key_provider; +} + +std::unique_ptr<PubkeyProvider> InferXOnlyPubkey(const XOnlyPubKey& xkey, ParseScriptContext ctx, const SigningProvider& provider) +{ + unsigned char full_key[CPubKey::COMPRESSED_SIZE] = {0x02}; + std::copy(xkey.begin(), xkey.end(), full_key + 1); + CPubKey pubkey(full_key); + std::unique_ptr<PubkeyProvider> key_provider = std::make_unique<ConstPubkeyProvider>(0, pubkey, true); KeyOriginInfo info; if (provider.GetKeyOrigin(pubkey.GetID(), info)) { return std::make_unique<OriginPubkeyProvider>(0, std::move(info), std::move(key_provider)); + } else { + full_key[0] = 0x03; + pubkey = CPubKey(full_key); + if (provider.GetKeyOrigin(pubkey.GetID(), info)) { + return std::make_unique<OriginPubkeyProvider>(0, std::move(info), std::move(key_provider)); + } } return key_provider; } std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptContext ctx, const SigningProvider& provider) { + if (ctx == ParseScriptContext::P2TR && script.size() == 34 && script[0] == 32 && script[33] == OP_CHECKSIG) { + XOnlyPubKey key{Span<const unsigned char>{script.data() + 1, script.data() + 33}}; + return std::make_unique<PKDescriptor>(InferXOnlyPubkey(key, ctx, provider)); + } + std::vector<std::vector<unsigned char>> data; TxoutType txntype = Solver(script, data); - if (txntype == TxoutType::PUBKEY) { + if (txntype == TxoutType::PUBKEY && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH || ctx == ParseScriptContext::P2WSH)) { CPubKey pubkey(data[0]); if (pubkey.IsValid()) { return std::make_unique<PKDescriptor>(InferPubkey(pubkey, ctx, provider)); } } - if (txntype == TxoutType::PUBKEYHASH) { + if (txntype == TxoutType::PUBKEYHASH && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH || ctx == ParseScriptContext::P2WSH)) { uint160 hash(data[0]); CKeyID keyid(hash); CPubKey pubkey; @@ -1246,7 +1258,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo return std::make_unique<PKHDescriptor>(InferPubkey(pubkey, ctx, provider)); } } - if (txntype == TxoutType::WITNESS_V0_KEYHASH && ctx != ParseScriptContext::P2WSH) { + if (txntype == TxoutType::WITNESS_V0_KEYHASH && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH)) { uint160 hash(data[0]); CKeyID keyid(hash); CPubKey pubkey; @@ -1254,7 +1266,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo return std::make_unique<WPKHDescriptor>(InferPubkey(pubkey, ctx, provider)); } } - if (txntype == TxoutType::MULTISIG) { + if (txntype == TxoutType::MULTISIG && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH || ctx == ParseScriptContext::P2WSH)) { std::vector<std::unique_ptr<PubkeyProvider>> providers; for (size_t i = 1; i + 1 < data.size(); ++i) { CPubKey pubkey(data[i]); @@ -1271,7 +1283,7 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo if (sub) return std::make_unique<SHDescriptor>(std::move(sub)); } } - if (txntype == TxoutType::WITNESS_V0_SCRIPTHASH && ctx != ParseScriptContext::P2WSH) { + if (txntype == TxoutType::WITNESS_V0_SCRIPTHASH && (ctx == ParseScriptContext::TOP || ctx == ParseScriptContext::P2SH)) { CScriptID scriptid; CRIPEMD160().Write(data[0].data(), data[0].size()).Finalize(scriptid.begin()); CScript subscript; @@ -1280,6 +1292,40 @@ std::unique_ptr<DescriptorImpl> InferScript(const CScript& script, ParseScriptCo if (sub) return std::make_unique<WSHDescriptor>(std::move(sub)); } } + if (txntype == TxoutType::WITNESS_V1_TAPROOT && ctx == ParseScriptContext::TOP) { + // Extract x-only pubkey from output. + XOnlyPubKey pubkey; + std::copy(data[0].begin(), data[0].end(), pubkey.begin()); + // Request spending data. + TaprootSpendData tap; + if (provider.GetTaprootSpendData(pubkey, tap)) { + // If found, convert it back to tree form. + auto tree = InferTaprootTree(tap, pubkey); + if (tree) { + // If that works, try to infer subdescriptors for all leaves. + bool ok = true; + std::vector<std::unique_ptr<DescriptorImpl>> subscripts; //!< list of script subexpressions + std::vector<int> depths; //!< depth in the tree of each subexpression (same length subscripts) + for (const auto& [depth, script, leaf_ver] : *tree) { + std::unique_ptr<DescriptorImpl> subdesc; + if (leaf_ver == TAPROOT_LEAF_TAPSCRIPT) { + subdesc = InferScript(script, ParseScriptContext::P2TR, provider); + } + if (!subdesc) { + ok = false; + break; + } else { + subscripts.push_back(std::move(subdesc)); + depths.push_back(depth); + } + } + if (ok) { + auto key = InferXOnlyPubkey(tap.internal_key, ParseScriptContext::P2TR, provider); + return std::make_unique<TRDescriptor>(std::move(key), std::move(subscripts), std::move(depths)); + } + } + } + } CTxDestination dest; if (ExtractDestination(script, dest)) { diff --git a/src/script/interpreter.cpp b/src/script/interpreter.cpp index 3c3c3ac1a8..8cebbc5d10 100644 --- a/src/script/interpreter.cpp +++ b/src/script/interpreter.cpp @@ -1420,7 +1420,7 @@ uint256 GetSpentScriptsSHA256(const std::vector<CTxOut>& outputs_spent) } // namespace template <class T> -void PrecomputedTransactionData::Init(const T& txTo, std::vector<CTxOut>&& spent_outputs) +void PrecomputedTransactionData::Init(const T& txTo, std::vector<CTxOut>&& spent_outputs, bool force) { assert(!m_spent_outputs_ready); @@ -1431,9 +1431,9 @@ void PrecomputedTransactionData::Init(const T& txTo, std::vector<CTxOut>&& spent } // Determine which precomputation-impacting features this transaction uses. - bool uses_bip143_segwit = false; - bool uses_bip341_taproot = false; - for (size_t inpos = 0; inpos < txTo.vin.size(); ++inpos) { + bool uses_bip143_segwit = force; + bool uses_bip341_taproot = force; + for (size_t inpos = 0; inpos < txTo.vin.size() && !(uses_bip143_segwit && uses_bip341_taproot); ++inpos) { if (!txTo.vin[inpos].scriptWitness.IsNull()) { if (m_spent_outputs_ready && m_spent_outputs[inpos].scriptPubKey.size() == 2 + WITNESS_V1_TAPROOT_SIZE && m_spent_outputs[inpos].scriptPubKey[0] == OP_1) { @@ -1478,8 +1478,8 @@ PrecomputedTransactionData::PrecomputedTransactionData(const T& txTo) } // explicit instantiation -template void PrecomputedTransactionData::Init(const CTransaction& txTo, std::vector<CTxOut>&& spent_outputs); -template void PrecomputedTransactionData::Init(const CMutableTransaction& txTo, std::vector<CTxOut>&& spent_outputs); +template void PrecomputedTransactionData::Init(const CTransaction& txTo, std::vector<CTxOut>&& spent_outputs, bool force); +template void PrecomputedTransactionData::Init(const CMutableTransaction& txTo, std::vector<CTxOut>&& spent_outputs, bool force); template PrecomputedTransactionData::PrecomputedTransactionData(const CTransaction& txTo); template PrecomputedTransactionData::PrecomputedTransactionData(const CMutableTransaction& txTo); @@ -1711,7 +1711,7 @@ bool GenericTransactionSignatureChecker<T>::CheckSchnorrSignature(Span<const uns if (hashtype == SIGHASH_DEFAULT) return set_error(serror, SCRIPT_ERR_SCHNORR_SIG_HASHTYPE); } uint256 sighash; - assert(this->txdata); + if (!this->txdata) return HandleMissingData(m_mdb); if (!SignatureHashSchnorr(sighash, execdata, *txTo, nIn, hashtype, sigversion, *this->txdata, m_mdb)) { return set_error(serror, SCRIPT_ERR_SCHNORR_SIG_HASHTYPE); } @@ -1847,16 +1847,14 @@ static bool ExecuteWitnessScript(const Span<const valtype>& stack_span, const CS return true; } -static bool VerifyTaprootCommitment(const std::vector<unsigned char>& control, const std::vector<unsigned char>& program, const CScript& script, uint256& tapleaf_hash) +uint256 ComputeTapleafHash(uint8_t leaf_version, const CScript& script) +{ + return (CHashWriter(HASHER_TAPLEAF) << leaf_version << script).GetSHA256(); +} + +uint256 ComputeTaprootMerkleRoot(Span<const unsigned char> control, const uint256& tapleaf_hash) { const int path_len = (control.size() - TAPROOT_CONTROL_BASE_SIZE) / TAPROOT_CONTROL_NODE_SIZE; - //! The internal pubkey (x-only, so no Y coordinate parity). - const XOnlyPubKey p{uint256(std::vector<unsigned char>(control.begin() + 1, control.begin() + TAPROOT_CONTROL_BASE_SIZE))}; - //! The output pubkey (taken from the scriptPubKey). - const XOnlyPubKey q{uint256(program)}; - // Compute the tapleaf hash. - tapleaf_hash = (CHashWriter(HASHER_TAPLEAF) << uint8_t(control[0] & TAPROOT_LEAF_MASK) << script).GetSHA256(); - // Compute the Merkle root from the leaf and the provided path. uint256 k = tapleaf_hash; for (int i = 0; i < path_len; ++i) { CHashWriter ss_branch{HASHER_TAPBRANCH}; @@ -1868,8 +1866,21 @@ static bool VerifyTaprootCommitment(const std::vector<unsigned char>& control, c } k = ss_branch.GetSHA256(); } + return k; +} + +static bool VerifyTaprootCommitment(const std::vector<unsigned char>& control, const std::vector<unsigned char>& program, const uint256& tapleaf_hash) +{ + assert(control.size() >= TAPROOT_CONTROL_BASE_SIZE); + assert(program.size() >= uint256::size()); + //! The internal pubkey (x-only, so no Y coordinate parity). + const XOnlyPubKey p{uint256(std::vector<unsigned char>(control.begin() + 1, control.begin() + TAPROOT_CONTROL_BASE_SIZE))}; + //! The output pubkey (taken from the scriptPubKey). + const XOnlyPubKey q{uint256(program)}; + // Compute the Merkle root from the leaf and the provided path. + const uint256 merkle_root = ComputeTaprootMerkleRoot(control, tapleaf_hash); // Verify that the output pubkey matches the tweaked internal pubkey, after correcting for parity. - return q.CheckTapTweak(p, k, control[0] & 1); + return q.CheckTapTweak(p, merkle_root, control[0] & 1); } static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, const std::vector<unsigned char>& program, unsigned int flags, const BaseSignatureChecker& checker, ScriptError* serror, bool is_p2sh) @@ -1929,7 +1940,8 @@ static bool VerifyWitnessProgram(const CScriptWitness& witness, int witversion, if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE || ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) { return set_error(serror, SCRIPT_ERR_TAPROOT_WRONG_CONTROL_SIZE); } - if (!VerifyTaprootCommitment(control, program, exec_script, execdata.m_tapleaf_hash)) { + execdata.m_tapleaf_hash = ComputeTapleafHash(control[0] & TAPROOT_LEAF_MASK, exec_script); + if (!VerifyTaprootCommitment(control, program, execdata.m_tapleaf_hash)) { return set_error(serror, SCRIPT_ERR_WITNESS_PROGRAM_MISMATCH); } execdata.m_tapleaf_hash_init = true; diff --git a/src/script/interpreter.h b/src/script/interpreter.h index fa4ee83e04..034c937b99 100644 --- a/src/script/interpreter.h +++ b/src/script/interpreter.h @@ -168,7 +168,7 @@ struct PrecomputedTransactionData PrecomputedTransactionData() = default; template <class T> - void Init(const T& tx, std::vector<CTxOut>&& spent_outputs); + void Init(const T& tx, std::vector<CTxOut>&& spent_outputs, bool force = false); template <class T> explicit PrecomputedTransactionData(const T& tx); @@ -260,6 +260,9 @@ enum class MissingDataBehavior FAIL, //!< Just act as if the signature was invalid }; +template<typename T> +bool SignatureHashSchnorr(uint256& hash_out, const ScriptExecutionData& execdata, const T& tx_to, uint32_t in_pos, uint8_t hash_type, SigVersion sigversion, const PrecomputedTransactionData& cache, MissingDataBehavior mdb); + template <class T> class GenericTransactionSignatureChecker : public BaseSignatureChecker { @@ -314,6 +317,12 @@ public: } }; +/** Compute the BIP341 tapleaf hash from leaf version & script. */ +uint256 ComputeTapleafHash(uint8_t leaf_version, const CScript& script); +/** Compute the BIP341 taproot script tree Merkle root from control block and leaf hash. + * Requires control block to have valid length (33 + k*32, with k in {0,1,..,128}). */ +uint256 ComputeTaprootMerkleRoot(Span<const unsigned char> control, const uint256& tapleaf_hash); + bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& script, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptExecutionData& execdata, ScriptError* error = nullptr); bool EvalScript(std::vector<std::vector<unsigned char> >& stack, const CScript& script, unsigned int flags, const BaseSignatureChecker& checker, SigVersion sigversion, ScriptError* error = nullptr); bool VerifyScript(const CScript& scriptSig, const CScript& scriptPubKey, const CScriptWitness* witness, unsigned int flags, const BaseSignatureChecker& checker, ScriptError* serror = nullptr); diff --git a/src/script/sign.cpp b/src/script/sign.cpp index da0092f9e3..65276f641f 100644 --- a/src/script/sign.cpp +++ b/src/script/sign.cpp @@ -11,13 +11,28 @@ #include <script/signingprovider.h> #include <script/standard.h> #include <uint256.h> +#include <util/vector.h> typedef std::vector<unsigned char> valtype; -MutableTransactionSignatureCreator::MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) : txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), checker(txTo, nIn, amountIn, MissingDataBehavior::FAIL) {} +MutableTransactionSignatureCreator::MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn) + : txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), checker(txTo, nIn, amountIn, MissingDataBehavior::FAIL), + m_txdata(nullptr) +{ +} + +MutableTransactionSignatureCreator::MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData* txdata, int nHashTypeIn) + : txTo(txToIn), nIn(nInIn), nHashType(nHashTypeIn), amount(amountIn), + checker(txdata ? MutableTransactionSignatureChecker(txTo, nIn, amount, *txdata, MissingDataBehavior::FAIL) : + MutableTransactionSignatureChecker(txTo, nIn, amount, MissingDataBehavior::FAIL)), + m_txdata(txdata) +{ +} bool MutableTransactionSignatureCreator::CreateSig(const SigningProvider& provider, std::vector<unsigned char>& vchSig, const CKeyID& address, const CScript& scriptCode, SigVersion sigversion) const { + assert(sigversion == SigVersion::BASE || sigversion == SigVersion::WITNESS_V0); + CKey key; if (!provider.GetKey(address, key)) return false; @@ -26,13 +41,61 @@ bool MutableTransactionSignatureCreator::CreateSig(const SigningProvider& provid if (sigversion == SigVersion::WITNESS_V0 && !key.IsCompressed()) return false; - // Signing for witness scripts needs the amount. - if (sigversion == SigVersion::WITNESS_V0 && amount < 0) return false; + // Signing without known amount does not work in witness scripts. + if (sigversion == SigVersion::WITNESS_V0 && !MoneyRange(amount)) return false; + + // BASE/WITNESS_V0 signatures don't support explicit SIGHASH_DEFAULT, use SIGHASH_ALL instead. + const int hashtype = nHashType == SIGHASH_DEFAULT ? SIGHASH_ALL : nHashType; - uint256 hash = SignatureHash(scriptCode, *txTo, nIn, nHashType, amount, sigversion); + uint256 hash = SignatureHash(scriptCode, *txTo, nIn, hashtype, amount, sigversion, m_txdata); if (!key.Sign(hash, vchSig)) return false; - vchSig.push_back((unsigned char)nHashType); + vchSig.push_back((unsigned char)hashtype); + return true; +} + +bool MutableTransactionSignatureCreator::CreateSchnorrSig(const SigningProvider& provider, std::vector<unsigned char>& sig, const XOnlyPubKey& pubkey, const uint256* leaf_hash, const uint256* merkle_root, SigVersion sigversion) const +{ + assert(sigversion == SigVersion::TAPROOT || sigversion == SigVersion::TAPSCRIPT); + + CKey key; + { + // For now, use the old full pubkey-based key derivation logic. As it indexed by + // Hash160(full pubkey), we need to try both a version prefixed with 0x02, and one + // with 0x03. + unsigned char b[33] = {0x02}; + std::copy(pubkey.begin(), pubkey.end(), b + 1); + CPubKey fullpubkey; + fullpubkey.Set(b, b + 33); + CKeyID keyid = fullpubkey.GetID(); + if (!provider.GetKey(keyid, key)) { + b[0] = 0x03; + fullpubkey.Set(b, b + 33); + CKeyID keyid = fullpubkey.GetID(); + if (!provider.GetKey(keyid, key)) return false; + } + } + + // BIP341/BIP342 signing needs lots of precomputed transaction data. While some + // (non-SIGHASH_DEFAULT) sighash modes exist that can work with just some subset + // of data present, for now, only support signing when everything is provided. + if (!m_txdata || !m_txdata->m_bip341_taproot_ready || !m_txdata->m_spent_outputs_ready) return false; + + ScriptExecutionData execdata; + execdata.m_annex_init = true; + execdata.m_annex_present = false; // Only support annex-less signing for now. + if (sigversion == SigVersion::TAPSCRIPT) { + execdata.m_codeseparator_pos_init = true; + execdata.m_codeseparator_pos = 0xFFFFFFFF; // Only support non-OP_CODESEPARATOR BIP342 signing for now. + if (!leaf_hash) return false; // BIP342 signing needs leaf hash. + execdata.m_tapleaf_hash_init = true; + execdata.m_tapleaf_hash = *leaf_hash; + } + uint256 hash; + if (!SignatureHashSchnorr(hash, execdata, *txTo, nIn, nHashType, sigversion, *m_txdata, MissingDataBehavior::FAIL)) return false; + sig.resize(64); + if (!key.SignSchnorr(hash, sig, merkle_root, nullptr)) return false; + if (nHashType) sig.push_back(nHashType); return true; } @@ -92,6 +155,86 @@ static bool CreateSig(const BaseSignatureCreator& creator, SignatureData& sigdat return false; } +static bool CreateTaprootScriptSig(const BaseSignatureCreator& creator, SignatureData& sigdata, const SigningProvider& provider, std::vector<unsigned char>& sig_out, const XOnlyPubKey& pubkey, const uint256& leaf_hash, SigVersion sigversion) +{ + auto lookup_key = std::make_pair(pubkey, leaf_hash); + auto it = sigdata.taproot_script_sigs.find(lookup_key); + if (it != sigdata.taproot_script_sigs.end()) { + sig_out = it->second; + } + if (creator.CreateSchnorrSig(provider, sig_out, pubkey, &leaf_hash, nullptr, sigversion)) { + sigdata.taproot_script_sigs[lookup_key] = sig_out; + return true; + } + return false; +} + +static bool SignTaprootScript(const SigningProvider& provider, const BaseSignatureCreator& creator, SignatureData& sigdata, int leaf_version, const CScript& script, std::vector<valtype>& result) +{ + // Only BIP342 tapscript signing is supported for now. + if (leaf_version != TAPROOT_LEAF_TAPSCRIPT) return false; + SigVersion sigversion = SigVersion::TAPSCRIPT; + + uint256 leaf_hash = (CHashWriter(HASHER_TAPLEAF) << uint8_t(leaf_version) << script).GetSHA256(); + + // <xonly pubkey> OP_CHECKSIG + if (script.size() == 34 && script[33] == OP_CHECKSIG && script[0] == 0x20) { + XOnlyPubKey pubkey(MakeSpan(script).subspan(1, 32)); + std::vector<unsigned char> sig; + if (CreateTaprootScriptSig(creator, sigdata, provider, sig, pubkey, leaf_hash, sigversion)) { + result = Vector(std::move(sig)); + return true; + } + } + + return false; +} + +static bool SignTaproot(const SigningProvider& provider, const BaseSignatureCreator& creator, const WitnessV1Taproot& output, SignatureData& sigdata, std::vector<valtype>& result) +{ + TaprootSpendData spenddata; + + // Gather information about this output. + if (provider.GetTaprootSpendData(output, spenddata)) { + sigdata.tr_spenddata.Merge(spenddata); + } + + // Try key path spending. + { + std::vector<unsigned char> sig; + if (sigdata.taproot_key_path_sig.size() == 0) { + if (creator.CreateSchnorrSig(provider, sig, spenddata.internal_key, nullptr, &spenddata.merkle_root, SigVersion::TAPROOT)) { + sigdata.taproot_key_path_sig = sig; + } + } + if (sigdata.taproot_key_path_sig.size()) { + result = Vector(sigdata.taproot_key_path_sig); + return true; + } + } + + // Try script path spending. + std::vector<std::vector<unsigned char>> smallest_result_stack; + for (const auto& [key, control_blocks] : sigdata.tr_spenddata.scripts) { + const auto& [script, leaf_ver] = key; + std::vector<std::vector<unsigned char>> result_stack; + if (SignTaprootScript(provider, creator, sigdata, leaf_ver, script, result_stack)) { + result_stack.emplace_back(std::begin(script), std::end(script)); // Push the script + result_stack.push_back(*control_blocks.begin()); // Push the smallest control block + if (smallest_result_stack.size() == 0 || + GetSerializeSize(result_stack, PROTOCOL_VERSION) < GetSerializeSize(smallest_result_stack, PROTOCOL_VERSION)) { + smallest_result_stack = std::move(result_stack); + } + } + } + if (smallest_result_stack.size() != 0) { + result = std::move(smallest_result_stack); + return true; + } + + return false; +} + /** * Sign scriptPubKey using signature made with creator. * Signatures are returned in scriptSigRet (or returns false if scriptPubKey can't be signed), @@ -113,7 +256,6 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator case TxoutType::NONSTANDARD: case TxoutType::NULL_DATA: case TxoutType::WITNESS_UNKNOWN: - case TxoutType::WITNESS_V1_TAPROOT: return false; case TxoutType::PUBKEY: if (!CreateSig(creator, sigdata, provider, sig, CPubKey(vSolutions[0]), scriptPubKey, sigversion)) return false; @@ -175,6 +317,9 @@ static bool SignStep(const SigningProvider& provider, const BaseSignatureCreator // Could not find witnessScript, add to missing sigdata.missing_witness_script = uint256(vSolutions[0]); return false; + + case TxoutType::WITNESS_V1_TAPROOT: + return SignTaproot(provider, creator, WitnessV1Taproot(XOnlyPubKey{vSolutions[0]}), sigdata, ret); } // no default case, so the compiler can warn about missing cases assert(false); } @@ -205,7 +350,6 @@ bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreato bool solved = SignStep(provider, creator, fromPubKey, result, whichType, SigVersion::BASE, sigdata); bool P2SH = false; CScript subscript; - sigdata.scriptWitness.stack.clear(); if (solved && whichType == TxoutType::SCRIPTHASH) { @@ -238,10 +382,17 @@ bool ProduceSignature(const SigningProvider& provider, const BaseSignatureCreato sigdata.scriptWitness.stack = result; sigdata.witness = true; result.clear(); + } else if (whichType == TxoutType::WITNESS_V1_TAPROOT && !P2SH) { + sigdata.witness = true; + if (solved) { + sigdata.scriptWitness.stack = std::move(result); + } + result.clear(); } else if (solved && whichType == TxoutType::WITNESS_UNKNOWN) { sigdata.witness = true; } + if (!sigdata.witness) sigdata.scriptWitness.stack.clear(); if (P2SH) { result.push_back(std::vector<unsigned char>(subscript.begin(), subscript.end())); } @@ -402,6 +553,7 @@ class DummySignatureChecker final : public BaseSignatureChecker public: DummySignatureChecker() {} bool CheckECDSASignature(const std::vector<unsigned char>& scriptSig, const std::vector<unsigned char>& vchPubKey, const CScript& scriptCode, SigVersion sigversion) const override { return true; } + bool CheckSchnorrSignature(Span<const unsigned char> sig, Span<const unsigned char> pubkey, SigVersion sigversion, const ScriptExecutionData& execdata, ScriptError* serror) const override { return true; } }; const DummySignatureChecker DUMMY_CHECKER; @@ -427,6 +579,11 @@ public: vchSig[6 + m_r_len + m_s_len] = SIGHASH_ALL; return true; } + bool CreateSchnorrSig(const SigningProvider& provider, std::vector<unsigned char>& sig, const XOnlyPubKey& pubkey, const uint256* leaf_hash, const uint256* tweak, SigVersion sigversion) const override + { + sig.assign(64, '\000'); + return true; + } }; } @@ -476,6 +633,26 @@ bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, // Use CTransaction for the constant parts of the // transaction to avoid rehashing. const CTransaction txConst(mtx); + + PrecomputedTransactionData txdata; + std::vector<CTxOut> spent_outputs; + spent_outputs.resize(mtx.vin.size()); + bool have_all_spent_outputs = true; + for (unsigned int i = 0; i < mtx.vin.size(); i++) { + CTxIn& txin = mtx.vin[i]; + auto coin = coins.find(txin.prevout); + if (coin == coins.end() || coin->second.IsSpent()) { + have_all_spent_outputs = false; + } else { + spent_outputs[i] = CTxOut(coin->second.out.nValue, coin->second.out.scriptPubKey); + } + } + if (have_all_spent_outputs) { + txdata.Init(txConst, std::move(spent_outputs), true); + } else { + txdata.Init(txConst, {}, true); + } + // Sign what we can: for (unsigned int i = 0; i < mtx.vin.size(); i++) { CTxIn& txin = mtx.vin[i]; @@ -490,7 +667,7 @@ bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, SignatureData sigdata = DataFromTransaction(mtx, i, coin->second.out); // Only sign SIGHASH_SINGLE if there's a corresponding output: if (!fHashSingle || (i < mtx.vout.size())) { - ProduceSignature(*keystore, MutableTransactionSignatureCreator(&mtx, i, amount, nHashType), prevPubKey, sigdata); + ProduceSignature(*keystore, MutableTransactionSignatureCreator(&mtx, i, amount, &txdata, nHashType), prevPubKey, sigdata); } UpdateInput(txin, sigdata); @@ -502,7 +679,7 @@ bool SignTransaction(CMutableTransaction& mtx, const SigningProvider* keystore, } ScriptError serror = SCRIPT_ERR_OK; - if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount, MissingDataBehavior::FAIL), &serror)) { + if (!VerifyScript(txin.scriptSig, prevPubKey, &txin.scriptWitness, STANDARD_SCRIPT_VERIFY_FLAGS, TransactionSignatureChecker(&txConst, i, amount, txdata, MissingDataBehavior::FAIL), &serror)) { if (serror == SCRIPT_ERR_INVALID_STACK_OPERATION) { // Unable to sign input and verification failed (possible attempt to partially sign). input_errors[i] = "Unable to sign input, invalid stack size (possibly missing key)"; diff --git a/src/script/sign.h b/src/script/sign.h index a1cfe1574d..b4e7318892 100644 --- a/src/script/sign.h +++ b/src/script/sign.h @@ -11,13 +11,13 @@ #include <pubkey.h> #include <script/interpreter.h> #include <script/keyorigin.h> +#include <script/standard.h> #include <span.h> #include <streams.h> class CKey; class CKeyID; class CScript; -class CScriptID; class CTransaction; class SigningProvider; @@ -31,6 +31,7 @@ public: /** Create a singular (non-script) signature. */ virtual bool CreateSig(const SigningProvider& provider, std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const =0; + virtual bool CreateSchnorrSig(const SigningProvider& provider, std::vector<unsigned char>& sig, const XOnlyPubKey& pubkey, const uint256* leaf_hash, const uint256* merkle_root, SigVersion sigversion) const =0; }; /** A signature creator for transactions. */ @@ -40,11 +41,14 @@ class MutableTransactionSignatureCreator : public BaseSignatureCreator { int nHashType; CAmount amount; const MutableTransactionSignatureChecker checker; + const PrecomputedTransactionData* m_txdata; public: MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, int nHashTypeIn = SIGHASH_ALL); + MutableTransactionSignatureCreator(const CMutableTransaction* txToIn, unsigned int nInIn, const CAmount& amountIn, const PrecomputedTransactionData* txdata, int nHashTypeIn = SIGHASH_ALL); const BaseSignatureChecker& Checker() const override { return checker; } bool CreateSig(const SigningProvider& provider, std::vector<unsigned char>& vchSig, const CKeyID& keyid, const CScript& scriptCode, SigVersion sigversion) const override; + bool CreateSchnorrSig(const SigningProvider& provider, std::vector<unsigned char>& sig, const XOnlyPubKey& pubkey, const uint256* leaf_hash, const uint256* merkle_root, SigVersion sigversion) const override; }; /** A signature creator that just produces 71-byte empty signatures. */ @@ -64,8 +68,11 @@ struct SignatureData { CScript redeem_script; ///< The redeemScript (if any) for the input CScript witness_script; ///< The witnessScript (if any) for the input. witnessScripts are used in P2WSH outputs. CScriptWitness scriptWitness; ///< The scriptWitness of an input. Contains complete signatures or the traditional partial signatures format. scriptWitness is part of a transaction input per BIP 144. + TaprootSpendData tr_spenddata; ///< Taproot spending data. std::map<CKeyID, SigPair> signatures; ///< BIP 174 style partial signatures for the input. May contain all signatures necessary for producing a final scriptSig or scriptWitness. std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>> misc_pubkeys; + std::vector<unsigned char> taproot_key_path_sig; /// Schnorr signature for key path spending + std::map<std::pair<XOnlyPubKey, uint256>, std::vector<unsigned char>> taproot_script_sigs; ///< (Partial) schnorr signatures, indexed by XOnlyPubKey and leaf_hash. std::vector<CKeyID> missing_pubkeys; ///< KeyIDs of pubkeys which could not be found std::vector<CKeyID> missing_sigs; ///< KeyIDs of pubkeys for signatures which could not be found uint160 missing_redeem_script; ///< ScriptID of the missing redeemScript (if any) diff --git a/src/script/signingprovider.cpp b/src/script/signingprovider.cpp index 9781ec32af..b80fbe22ce 100644 --- a/src/script/signingprovider.cpp +++ b/src/script/signingprovider.cpp @@ -44,6 +44,11 @@ bool HidingSigningProvider::GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& inf return m_provider->GetKeyOrigin(keyid, info); } +bool HidingSigningProvider::GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const +{ + return m_provider->GetTaprootSpendData(output_key, spenddata); +} + bool FlatSigningProvider::GetCScript(const CScriptID& scriptid, CScript& script) const { return LookupHelper(scripts, scriptid, script); } bool FlatSigningProvider::GetPubKey(const CKeyID& keyid, CPubKey& pubkey) const { return LookupHelper(pubkeys, keyid, pubkey); } bool FlatSigningProvider::GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const @@ -54,6 +59,10 @@ bool FlatSigningProvider::GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) return ret; } bool FlatSigningProvider::GetKey(const CKeyID& keyid, CKey& key) const { return LookupHelper(keys, keyid, key); } +bool FlatSigningProvider::GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const +{ + return LookupHelper(tr_spenddata, output_key, spenddata); +} FlatSigningProvider Merge(const FlatSigningProvider& a, const FlatSigningProvider& b) { @@ -66,6 +75,10 @@ FlatSigningProvider Merge(const FlatSigningProvider& a, const FlatSigningProvide ret.keys.insert(b.keys.begin(), b.keys.end()); ret.origins = a.origins; ret.origins.insert(b.origins.begin(), b.origins.end()); + ret.tr_spenddata = a.tr_spenddata; + for (const auto& [output_key, spenddata] : b.tr_spenddata) { + ret.tr_spenddata[output_key].Merge(spenddata); + } return ret; } diff --git a/src/script/signingprovider.h b/src/script/signingprovider.h index 76f31d2f6f..939ae10622 100644 --- a/src/script/signingprovider.h +++ b/src/script/signingprovider.h @@ -25,6 +25,7 @@ public: virtual bool GetKey(const CKeyID &address, CKey& key) const { return false; } virtual bool HaveKey(const CKeyID &address) const { return false; } virtual bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const { return false; } + virtual bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const { return false; } }; extern const SigningProvider& DUMMY_SIGNING_PROVIDER; @@ -42,6 +43,7 @@ public: bool GetPubKey(const CKeyID& keyid, CPubKey& pubkey) const override; bool GetKey(const CKeyID& keyid, CKey& key) const override; bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override; + bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const override; }; struct FlatSigningProvider final : public SigningProvider @@ -50,11 +52,13 @@ struct FlatSigningProvider final : public SigningProvider std::map<CKeyID, CPubKey> pubkeys; std::map<CKeyID, std::pair<CPubKey, KeyOriginInfo>> origins; std::map<CKeyID, CKey> keys; + std::map<XOnlyPubKey, TaprootSpendData> tr_spenddata; /** Map from output key to spend data. */ bool GetCScript(const CScriptID& scriptid, CScript& script) const override; bool GetPubKey(const CKeyID& keyid, CPubKey& pubkey) const override; bool GetKeyOrigin(const CKeyID& keyid, KeyOriginInfo& info) const override; bool GetKey(const CKeyID& keyid, CKey& key) const override; + bool GetTaprootSpendData(const XOnlyPubKey& output_key, TaprootSpendData& spenddata) const override; }; FlatSigningProvider Merge(const FlatSigningProvider& a, const FlatSigningProvider& b); diff --git a/src/script/standard.cpp b/src/script/standard.cpp index a4b11cc0a9..b8349bb9ab 100644 --- a/src/script/standard.cpp +++ b/src/script/standard.cpp @@ -377,6 +377,16 @@ bool IsValidDestination(const CTxDestination& dest) { /*static*/ TaprootBuilder::NodeInfo TaprootBuilder::Combine(NodeInfo&& a, NodeInfo&& b) { NodeInfo ret; + /* Iterate over all tracked leaves in a, add b's hash to their Merkle branch, and move them to ret. */ + for (auto& leaf : a.leaves) { + leaf.merkle_branch.push_back(b.hash); + ret.leaves.emplace_back(std::move(leaf)); + } + /* Iterate over all tracked leaves in b, add a's hash to their Merkle branch, and move them to ret. */ + for (auto& leaf : b.leaves) { + leaf.merkle_branch.push_back(a.hash); + ret.leaves.emplace_back(std::move(leaf)); + } /* Lexicographically sort a and b's hash, and compute parent hash. */ if (a.hash < b.hash) { ret.hash = (CHashWriter(HASHER_TAPBRANCH) << a.hash << b.hash).GetSHA256(); @@ -386,6 +396,27 @@ bool IsValidDestination(const CTxDestination& dest) { return ret; } +void TaprootSpendData::Merge(TaprootSpendData other) +{ + // TODO: figure out how to better deal with conflicting information + // being merged. + if (internal_key.IsNull() && !other.internal_key.IsNull()) { + internal_key = other.internal_key; + } + if (merkle_root.IsNull() && !other.merkle_root.IsNull()) { + merkle_root = other.merkle_root; + } + for (auto& [key, control_blocks] : other.scripts) { + // Once P0083R3 is supported by all our targeted platforms, + // this loop body can be replaced with: + // scripts[key].merge(std::move(control_blocks)); + auto& target = scripts[key]; + for (auto& control_block: control_blocks) { + target.insert(std::move(control_block)); + } + } +} + void TaprootBuilder::Insert(TaprootBuilder::NodeInfo&& node, int depth) { assert(depth >= 0 && (size_t)depth <= TAPROOT_CONTROL_MAX_NODE_COUNT); @@ -435,13 +466,14 @@ void TaprootBuilder::Insert(TaprootBuilder::NodeInfo&& node, int depth) return branch.size() == 0 || (branch.size() == 1 && branch[0]); } -TaprootBuilder& TaprootBuilder::Add(int depth, const CScript& script, int leaf_version) +TaprootBuilder& TaprootBuilder::Add(int depth, const CScript& script, int leaf_version, bool track) { assert((leaf_version & ~TAPROOT_LEAF_MASK) == 0); if (!IsValid()) return *this; - /* Construct NodeInfo object with leaf hash. */ + /* Construct NodeInfo object with leaf hash and (if track is true) also leaf information. */ NodeInfo node; node.hash = (CHashWriter{HASHER_TAPLEAF} << uint8_t(leaf_version) << script).GetSHA256(); + if (track) node.leaves.emplace_back(LeafInfo{script, leaf_version, {}}); /* Insert into the branch. */ Insert(std::move(node), depth); return *this; @@ -464,8 +496,168 @@ TaprootBuilder& TaprootBuilder::Finalize(const XOnlyPubKey& internal_key) m_internal_key = internal_key; auto ret = m_internal_key.CreateTapTweak(m_branch.size() == 0 ? nullptr : &m_branch[0]->hash); assert(ret.has_value()); - std::tie(m_output_key, std::ignore) = *ret; + std::tie(m_output_key, m_parity) = *ret; return *this; } WitnessV1Taproot TaprootBuilder::GetOutput() { return WitnessV1Taproot{m_output_key}; } + +TaprootSpendData TaprootBuilder::GetSpendData() const +{ + TaprootSpendData spd; + spd.merkle_root = m_branch.size() == 0 ? uint256() : m_branch[0]->hash; + spd.internal_key = m_internal_key; + if (m_branch.size()) { + // If any script paths exist, they have been combined into the root m_branch[0] + // by now. Compute the control block for each of its tracked leaves, and put them in + // spd.scripts. + for (const auto& leaf : m_branch[0]->leaves) { + std::vector<unsigned char> control_block; + control_block.resize(TAPROOT_CONTROL_BASE_SIZE + TAPROOT_CONTROL_NODE_SIZE * leaf.merkle_branch.size()); + control_block[0] = leaf.leaf_version | (m_parity ? 1 : 0); + std::copy(m_internal_key.begin(), m_internal_key.end(), control_block.begin() + 1); + if (leaf.merkle_branch.size()) { + std::copy(leaf.merkle_branch[0].begin(), + leaf.merkle_branch[0].begin() + TAPROOT_CONTROL_NODE_SIZE * leaf.merkle_branch.size(), + control_block.begin() + TAPROOT_CONTROL_BASE_SIZE); + } + spd.scripts[{leaf.script, leaf.leaf_version}].insert(std::move(control_block)); + } + } + return spd; +} + +std::optional<std::vector<std::tuple<int, CScript, int>>> InferTaprootTree(const TaprootSpendData& spenddata, const XOnlyPubKey& output) +{ + // Verify that the output matches the assumed Merkle root and internal key. + auto tweak = spenddata.internal_key.CreateTapTweak(spenddata.merkle_root.IsNull() ? nullptr : &spenddata.merkle_root); + if (!tweak || tweak->first != output) return std::nullopt; + // If the Merkle root is 0, the tree is empty, and we're done. + std::vector<std::tuple<int, CScript, int>> ret; + if (spenddata.merkle_root.IsNull()) return ret; + + /** Data structure to represent the nodes of the tree we're going to build. */ + struct TreeNode { + /** Hash of this node, if known; 0 otherwise. */ + uint256 hash; + /** The left and right subtrees (note that their order is irrelevant). */ + std::unique_ptr<TreeNode> sub[2]; + /** If this is known to be a leaf node, a pointer to the (script, leaf_ver) pair. + * nullptr otherwise. */ + const std::pair<CScript, int>* leaf = nullptr; + /** Whether or not this node has been explored (is known to be a leaf, or known to have children). */ + bool explored = false; + /** Whether or not this node is an inner node (unknown until explored = true). */ + bool inner; + /** Whether or not we have produced output for this subtree. */ + bool done = false; + }; + + // Build tree from the provided branches. + TreeNode root; + root.hash = spenddata.merkle_root; + for (const auto& [key, control_blocks] : spenddata.scripts) { + const auto& [script, leaf_ver] = key; + for (const auto& control : control_blocks) { + // Skip script records with nonsensical leaf version. + if (leaf_ver < 0 || leaf_ver >= 0x100 || leaf_ver & 1) continue; + // Skip script records with invalid control block sizes. + if (control.size() < TAPROOT_CONTROL_BASE_SIZE || control.size() > TAPROOT_CONTROL_MAX_SIZE || + ((control.size() - TAPROOT_CONTROL_BASE_SIZE) % TAPROOT_CONTROL_NODE_SIZE) != 0) continue; + // Skip script records that don't match the control block. + if ((control[0] & TAPROOT_LEAF_MASK) != leaf_ver) continue; + // Skip script records that don't match the provided Merkle root. + const uint256 leaf_hash = ComputeTapleafHash(leaf_ver, script); + const uint256 merkle_root = ComputeTaprootMerkleRoot(control, leaf_hash); + if (merkle_root != spenddata.merkle_root) continue; + + TreeNode* node = &root; + size_t levels = (control.size() - TAPROOT_CONTROL_BASE_SIZE) / TAPROOT_CONTROL_NODE_SIZE; + for (size_t depth = 0; depth < levels; ++depth) { + // Can't descend into a node which we already know is a leaf. + if (node->explored && !node->inner) return std::nullopt; + + // Extract partner hash from Merkle branch in control block. + uint256 hash; + std::copy(control.begin() + TAPROOT_CONTROL_BASE_SIZE + (levels - 1 - depth) * TAPROOT_CONTROL_NODE_SIZE, + control.begin() + TAPROOT_CONTROL_BASE_SIZE + (levels - depth) * TAPROOT_CONTROL_NODE_SIZE, + hash.begin()); + + if (node->sub[0]) { + // Descend into the existing left or right branch. + bool desc = false; + for (int i = 0; i < 2; ++i) { + if (node->sub[i]->hash == hash || (node->sub[i]->hash.IsNull() && node->sub[1-i]->hash != hash)) { + node->sub[i]->hash = hash; + node = &*node->sub[1-i]; + desc = true; + break; + } + } + if (!desc) return std::nullopt; // This probably requires a hash collision to hit. + } else { + // We're in an unexplored node. Create subtrees and descend. + node->explored = true; + node->inner = true; + node->sub[0] = std::make_unique<TreeNode>(); + node->sub[1] = std::make_unique<TreeNode>(); + node->sub[1]->hash = hash; + node = &*node->sub[0]; + } + } + // Cannot turn a known inner node into a leaf. + if (node->sub[0]) return std::nullopt; + node->explored = true; + node->inner = false; + node->leaf = &key; + node->hash = leaf_hash; + } + } + + // Recursive processing to turn the tree into flattened output. Use an explicit stack here to avoid + // overflowing the call stack (the tree may be 128 levels deep). + std::vector<TreeNode*> stack{&root}; + while (!stack.empty()) { + TreeNode& node = *stack.back(); + if (!node.explored) { + // Unexplored node, which means the tree is incomplete. + return std::nullopt; + } else if (!node.inner) { + // Leaf node; produce output. + ret.emplace_back(stack.size() - 1, node.leaf->first, node.leaf->second); + node.done = true; + stack.pop_back(); + } else if (node.sub[0]->done && !node.sub[1]->done && !node.sub[1]->explored && !node.sub[1]->hash.IsNull() && + (CHashWriter{HASHER_TAPBRANCH} << node.sub[1]->hash << node.sub[1]->hash).GetSHA256() == node.hash) { + // Whenever there are nodes with two identical subtrees under it, we run into a problem: + // the control blocks for the leaves underneath those will be identical as well, and thus + // they will all be matched to the same path in the tree. The result is that at the location + // where the duplicate occurred, the left child will contain a normal tree that can be explored + // and processed, but the right one will remain unexplored. + // + // This situation can be detected, by encountering an inner node with unexplored right subtree + // with known hash, and H_TapBranch(hash, hash) is equal to the parent node (this node)'s hash. + // + // To deal with this, simply process the left tree a second time (set its done flag to false; + // noting that the done flag of its children have already been set to false after processing + // those). To avoid ending up in an infinite loop, set the done flag of the right (unexplored) + // subtree to true. + node.sub[0]->done = false; + node.sub[1]->done = true; + } else if (node.sub[0]->done && node.sub[1]->done) { + // An internal node which we're finished with. + node.sub[0]->done = false; + node.sub[1]->done = false; + node.done = true; + stack.pop_back(); + } else if (!node.sub[0]->done) { + // An internal node whose left branch hasn't been processed yet. Do so first. + stack.push_back(&*node.sub[0]); + } else if (!node.sub[1]->done) { + // An internal node whose right branch hasn't been processed yet. Do so first. + stack.push_back(&*node.sub[1]); + } + } + + return ret; +} diff --git a/src/script/standard.h b/src/script/standard.h index d7ea5cef27..ac4e2f3276 100644 --- a/src/script/standard.h +++ b/src/script/standard.h @@ -11,6 +11,7 @@ #include <uint256.h> #include <util/hash_type.h> +#include <map> #include <string> #include <variant> @@ -209,15 +210,50 @@ CScript GetScriptForRawPubKey(const CPubKey& pubkey); /** Generate a multisig script. */ CScript GetScriptForMultisig(int nRequired, const std::vector<CPubKey>& keys); +struct ShortestVectorFirstComparator +{ + bool operator()(const std::vector<unsigned char>& a, const std::vector<unsigned char>& b) const + { + if (a.size() < b.size()) return true; + if (a.size() > b.size()) return false; + return a < b; + } +}; + +struct TaprootSpendData +{ + /** The BIP341 internal key. */ + XOnlyPubKey internal_key; + /** The Merkle root of the script tree (0 if no scripts). */ + uint256 merkle_root; + /** Map from (script, leaf_version) to (sets of) control blocks. + * The control blocks are sorted by size, so that the signing logic can + * easily prefer the cheapest one. */ + std::map<std::pair<CScript, int>, std::set<std::vector<unsigned char>, ShortestVectorFirstComparator>> scripts; + /** Merge other TaprootSpendData (for the same scriptPubKey) into this. */ + void Merge(TaprootSpendData other); +}; + /** Utility class to construct Taproot outputs from internal key and script tree. */ class TaprootBuilder { private: + /** Information about a tracked leaf in the Merkle tree. */ + struct LeafInfo + { + CScript script; //!< The script. + int leaf_version; //!< The leaf version for that script. + std::vector<uint256> merkle_branch; //!< The hashing partners above this leaf. + }; + /** Information associated with a node in the Merkle tree. */ struct NodeInfo { /** Merkle hash of this node. */ uint256 hash; + /** Tracked leaves underneath this node (either from the node itself, or its children). + * The merkle_branch field for each is the partners to get to *this* node. */ + std::vector<LeafInfo> leaves; }; /** Whether the builder is in a valid state so far. */ bool m_valid = true; @@ -260,7 +296,8 @@ private: std::vector<std::optional<NodeInfo>> m_branch; XOnlyPubKey m_internal_key; //!< The internal key, set when finalizing. - XOnlyPubKey m_output_key; //!< The output key, computed when finalizing. */ + XOnlyPubKey m_output_key; //!< The output key, computed when finalizing. + bool m_parity; //!< The tweak parity, computed when finalizing. /** Combine information about a parent Merkle tree node from its child nodes. */ static NodeInfo Combine(NodeInfo&& a, NodeInfo&& b); @@ -269,8 +306,9 @@ private: public: /** Add a new script at a certain depth in the tree. Add() operations must be called - * in depth-first traversal order of binary tree. */ - TaprootBuilder& Add(int depth, const CScript& script, int leaf_version); + * in depth-first traversal order of binary tree. If track is true, it will be included in + * the GetSpendData() output. */ + TaprootBuilder& Add(int depth, const CScript& script, int leaf_version, bool track = true); /** Like Add(), but for a Merkle node with a given hash to the tree. */ TaprootBuilder& AddOmitted(int depth, const uint256& hash); /** Finalize the construction. Can only be called when IsComplete() is true. @@ -285,6 +323,16 @@ public: WitnessV1Taproot GetOutput(); /** Check if a list of depths is legal (will lead to IsComplete()). */ static bool ValidDepths(const std::vector<int>& depths); + /** Compute spending data (after Finalize()). */ + TaprootSpendData GetSpendData() const; }; +/** Given a TaprootSpendData and the output key, reconstruct its script tree. + * + * If the output doesn't match the spenddata, or if the data in spenddata is incomplete, + * std::nullopt is returned. Otherwise, a vector of (depth, script, leaf_ver) tuples is + * returned, corresponding to a depth-first traversal of the script tree. + */ +std::optional<std::vector<std::tuple<int, CScript, int>>> InferTaprootTree(const TaprootSpendData& spenddata, const XOnlyPubKey& output); + #endif // BITCOIN_SCRIPT_STANDARD_H diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp index 49b40924e0..eb5c37b34d 100644 --- a/src/test/addrman_tests.cpp +++ b/src/test/addrman_tests.cpp @@ -74,9 +74,9 @@ public: // Simulates connection failure so that we can test eviction of offline nodes void SimConnFail(const CService& addr) { - LOCK(cs); int64_t nLastSuccess = 1; - Good_(addr, true, nLastSuccess); // Set last good connection in the deep past. + // Set last good connection in the deep past. + Good(addr, true, nLastSuccess); bool count_failure = false; int64_t nLastTry = GetAdjustedTime()-61; diff --git a/src/test/blockfilter_index_tests.cpp b/src/test/blockfilter_index_tests.cpp index 2f532ef598..2eb653e9ec 100644 --- a/src/test/blockfilter_index_tests.cpp +++ b/src/test/blockfilter_index_tests.cpp @@ -62,7 +62,7 @@ CBlock BuildChainTestingSetup::CreateBlock(const CBlockIndex* prev, const CScript& scriptPubKey) { const CChainParams& chainparams = Params(); - std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler(::ChainstateActive(), *m_node.mempool, chainparams).CreateNewBlock(scriptPubKey); + std::unique_ptr<CBlockTemplate> pblocktemplate = BlockAssembler(m_node.chainman->ActiveChainstate(), *m_node.mempool, chainparams).CreateNewBlock(scriptPubKey); CBlock& block = pblocktemplate->block; block.hashPrevBlock = prev->GetBlockHash(); block.nTime = prev->nTime + 1; @@ -117,9 +117,9 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) std::vector<BlockFilter> filters; std::vector<uint256> filter_hashes; - for (const CBlockIndex* block_index = ::ChainActive().Genesis(); + for (const CBlockIndex* block_index = m_node.chainman->ActiveChain().Genesis(); block_index != nullptr; - block_index = ::ChainActive().Next(block_index)) { + block_index = m_node.chainman->ActiveChain().Next(block_index)) { BOOST_CHECK(!filter_index.LookupFilter(block_index, filter)); BOOST_CHECK(!filter_index.LookupFilterHeader(block_index, filter_header)); BOOST_CHECK(!filter_index.LookupFilterRange(block_index->nHeight, block_index, filters)); @@ -131,7 +131,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) // BlockUntilSyncedToCurrentChain should return false before index is started. BOOST_CHECK(!filter_index.BlockUntilSyncedToCurrentChain()); - BOOST_REQUIRE(filter_index.Start(::ChainstateActive())); + BOOST_REQUIRE(filter_index.Start(m_node.chainman->ActiveChainstate())); // Allow filter index to catch up with the block index. constexpr int64_t timeout_ms = 10 * 1000; @@ -145,9 +145,9 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) { LOCK(cs_main); const CBlockIndex* block_index; - for (block_index = ::ChainActive().Genesis(); + for (block_index = m_node.chainman->ActiveChain().Genesis(); block_index != nullptr; - block_index = ::ChainActive().Next(block_index)) { + block_index = m_node.chainman->ActiveChain().Next(block_index)) { CheckFilterLookups(filter_index, block_index, last_header); } } @@ -156,7 +156,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) const CBlockIndex* tip; { LOCK(cs_main); - tip = ::ChainActive().Tip(); + tip = m_node.chainman->ActiveChain().Tip(); } CKey coinbase_key_A, coinbase_key_B; coinbase_key_A.MakeNewKey(true); @@ -178,7 +178,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) const CBlockIndex* block_index; { LOCK(cs_main); - block_index = g_chainman.m_blockman.LookupBlockIndex(block->GetHash()); + block_index = m_node.chainman->m_blockman.LookupBlockIndex(block->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); @@ -196,7 +196,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) const CBlockIndex* block_index; { LOCK(cs_main); - block_index = g_chainman.m_blockman.LookupBlockIndex(block->GetHash()); + block_index = m_node.chainman->m_blockman.LookupBlockIndex(block->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); @@ -210,7 +210,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) const CBlockIndex* block_index; { LOCK(cs_main); - block_index = g_chainman.m_blockman.LookupBlockIndex(block->GetHash()); + block_index = m_node.chainman->m_blockman.LookupBlockIndex(block->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); @@ -231,14 +231,14 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) { LOCK(cs_main); - block_index = g_chainman.m_blockman.LookupBlockIndex(chainA[i]->GetHash()); + block_index = m_node.chainman->m_blockman.LookupBlockIndex(chainA[i]->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); CheckFilterLookups(filter_index, block_index, chainA_last_header); { LOCK(cs_main); - block_index = g_chainman.m_blockman.LookupBlockIndex(chainB[i]->GetHash()); + block_index = m_node.chainman->m_blockman.LookupBlockIndex(chainB[i]->GetHash()); } BOOST_CHECK(filter_index.BlockUntilSyncedToCurrentChain()); CheckFilterLookups(filter_index, block_index, chainB_last_header); @@ -250,7 +250,7 @@ BOOST_FIXTURE_TEST_CASE(blockfilter_index_initial_sync, BuildChainTestingSetup) { LOCK(cs_main); - tip = ::ChainActive().Tip(); + tip = m_node.chainman->ActiveChain().Tip(); } BOOST_CHECK(filter_index.LookupFilterRange(0, tip, filters)); BOOST_CHECK(filter_index.LookupFilterHashRange(0, tip, filter_hashes)); diff --git a/src/test/coinstatsindex_tests.cpp b/src/test/coinstatsindex_tests.cpp index 106fcd2a33..597d7a7340 100644 --- a/src/test/coinstatsindex_tests.cpp +++ b/src/test/coinstatsindex_tests.cpp @@ -22,7 +22,7 @@ BOOST_FIXTURE_TEST_CASE(coinstatsindex_initial_sync, TestChain100Setup) const CBlockIndex* block_index; { LOCK(cs_main); - block_index = ChainActive().Tip(); + block_index = m_node.chainman->ActiveChain().Tip(); } // CoinStatsIndex should not be found before it is started. @@ -32,7 +32,7 @@ BOOST_FIXTURE_TEST_CASE(coinstatsindex_initial_sync, TestChain100Setup) // is started. BOOST_CHECK(!coin_stats_index.BlockUntilSyncedToCurrentChain()); - BOOST_REQUIRE(coin_stats_index.Start(::ChainstateActive())); + BOOST_REQUIRE(coin_stats_index.Start(m_node.chainman->ActiveChainstate())); // Allow the CoinStatsIndex to catch up with the block index that is syncing // in a background thread. @@ -46,7 +46,7 @@ BOOST_FIXTURE_TEST_CASE(coinstatsindex_initial_sync, TestChain100Setup) const CBlockIndex* genesis_block_index; { LOCK(cs_main); - genesis_block_index = ChainActive().Genesis(); + genesis_block_index = m_node.chainman->ActiveChain().Genesis(); } BOOST_CHECK(coin_stats_index.LookUpStats(genesis_block_index, coin_stats)); @@ -64,7 +64,7 @@ BOOST_FIXTURE_TEST_CASE(coinstatsindex_initial_sync, TestChain100Setup) const CBlockIndex* new_block_index; { LOCK(cs_main); - new_block_index = ChainActive().Tip(); + new_block_index = m_node.chainman->ActiveChain().Tip(); } coin_stats_index.LookUpStats(new_block_index, new_coin_stats); diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp index a56ce51acb..5668ead1fb 100644 --- a/src/test/denialofservice_tests.cpp +++ b/src/test/denialofservice_tests.cpp @@ -14,36 +14,19 @@ #include <script/signingprovider.h> #include <script/standard.h> #include <serialize.h> +#include <test/util/net.h> +#include <test/util/setup_common.h> #include <txorphanage.h> #include <util/string.h> #include <util/system.h> #include <util/time.h> #include <validation.h> -#include <test/util/setup_common.h> - #include <array> #include <stdint.h> #include <boost/test/unit_test.hpp> -struct CConnmanTest : public CConnman { - using CConnman::CConnman; - void AddNode(CNode& node) - { - LOCK(cs_vNodes); - vNodes.push_back(&node); - } - void ClearNodes() - { - LOCK(cs_vNodes); - for (CNode* node : vNodes) { - delete node; - } - vNodes.clear(); - } -}; - static CService ip(uint32_t i) { struct in_addr s; @@ -83,8 +66,8 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) // This test requires that we have a chain with non-zero work. { LOCK(cs_main); - BOOST_CHECK(::ChainActive().Tip() != nullptr); - BOOST_CHECK(::ChainActive().Tip()->nChainWork > 0); + BOOST_CHECK(m_node.chainman->ActiveChain().Tip() != nullptr); + BOOST_CHECK(m_node.chainman->ActiveChain().Tip()->nChainWork > 0); } // Test starts here @@ -120,7 +103,7 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction) peerLogic->FinalizeNode(dummyNode1); } -static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerManager &peerLogic, CConnmanTest* connman) +static void AddRandomOutboundPeer(std::vector<CNode*>& vNodes, PeerManager& peerLogic, ConnmanTestMsg& connman) { CAddress addr(ip(g_insecure_rand_ctx.randbits(32)), NODE_NONE); vNodes.emplace_back(new CNode(id++, ServiceFlags(NODE_NETWORK | NODE_WITNESS), INVALID_SOCKET, addr, /* nKeyedNetGroupIn */ 0, /* nLocalHostNonceIn */ 0, CAddress(), /* pszDest */ "", ConnectionType::OUTBOUND_FULL_RELAY, /* inbound_onion */ false)); @@ -130,13 +113,13 @@ static void AddRandomOutboundPeer(std::vector<CNode *> &vNodes, PeerManager &pee peerLogic.InitializeNode(&node); node.fSuccessfullyConnected = true; - connman->AddNode(node); + connman.AddTestNode(node); } BOOST_AUTO_TEST_CASE(stale_tip_peer_management) { const CChainParams& chainparams = Params(); - auto connman = std::make_unique<CConnmanTest>(0x1337, 0x1337, *m_node.addrman); + auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman); auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool, false); @@ -150,8 +133,8 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management) std::vector<CNode *> vNodes; // Mock some outbound peers - for (int i=0; i<max_outbound_full_relay; ++i) { - AddRandomOutboundPeer(vNodes, *peerLogic, connman.get()); + for (int i = 0; i < max_outbound_full_relay; ++i) { + AddRandomOutboundPeer(vNodes, *peerLogic, *connman); } peerLogic->CheckForStaleTipAndEvictPeers(); @@ -176,7 +159,7 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management) // If we add one more peer, something should get marked for eviction // on the next check (since we're mocking the time to be in the future, the // required time connected check should be satisfied). - AddRandomOutboundPeer(vNodes, *peerLogic, connman.get()); + AddRandomOutboundPeer(vNodes, *peerLogic, *connman); peerLogic->CheckForStaleTipAndEvictPeers(); for (int i = 0; i < max_outbound_full_relay; ++i) { @@ -202,14 +185,14 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management) peerLogic->FinalizeNode(*node); } - connman->ClearNodes(); + connman->ClearTestNodes(); } BOOST_AUTO_TEST_CASE(peer_discouragement) { const CChainParams& chainparams = Params(); - auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME); - auto connman = std::make_unique<CConnmanTest>(0x1337, 0x1337, *m_node.addrman); + auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); + auto connman = std::make_unique<ConnmanTestMsg>(0x1337, 0x1337, *m_node.addrman); auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool, false); @@ -233,7 +216,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) nodes[0]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(nodes[0]); nodes[0]->fSuccessfullyConnected = true; - connman->AddNode(*nodes[0]); + connman->AddTestNode(*nodes[0]); peerLogic->Misbehaving(nodes[0]->GetId(), DISCOURAGEMENT_THRESHOLD, /* message */ ""); // Should be discouraged { LOCK(nodes[0]->cs_sendProcessing); @@ -249,7 +232,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) nodes[1]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(nodes[1]); nodes[1]->fSuccessfullyConnected = true; - connman->AddNode(*nodes[1]); + connman->AddTestNode(*nodes[1]); peerLogic->Misbehaving(nodes[1]->GetId(), DISCOURAGEMENT_THRESHOLD - 1, /* message */ ""); { LOCK(nodes[1]->cs_sendProcessing); @@ -280,7 +263,7 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) nodes[2]->SetCommonVersion(PROTOCOL_VERSION); peerLogic->InitializeNode(nodes[2]); nodes[2]->fSuccessfullyConnected = true; - connman->AddNode(*nodes[2]); + connman->AddTestNode(*nodes[2]); peerLogic->Misbehaving(nodes[2]->GetId(), DISCOURAGEMENT_THRESHOLD, /* message */ ""); { LOCK(nodes[2]->cs_sendProcessing); @@ -296,13 +279,13 @@ BOOST_AUTO_TEST_CASE(peer_discouragement) for (CNode* node : nodes) { peerLogic->FinalizeNode(*node); } - connman->ClearNodes(); + connman->ClearTestNodes(); } BOOST_AUTO_TEST_CASE(DoS_bantime) { const CChainParams& chainparams = Params(); - auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME); + auto banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); auto connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman); auto peerLogic = PeerManager::make(chainparams, *connman, *m_node.addrman, banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool, false); diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp index 98ae32a8d0..db0b461873 100644 --- a/src/test/fuzz/addrman.cpp +++ b/src/test/fuzz/addrman.cpp @@ -57,15 +57,6 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman) (void)addr_man.SelectTriedCollision(); }, [&] { - (void)addr_man.Select(fuzzed_data_provider.ConsumeBool()); - }, - [&] { - (void)addr_man.GetAddr( - /* max_addresses */ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096), - /* max_pct */ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096), - /* network */ std::nullopt); - }, - [&] { const std::optional<CAddress> opt_address = ConsumeDeserializable<CAddress>(fuzzed_data_provider); const std::optional<CNetAddr> opt_net_addr = ConsumeDeserializable<CNetAddr>(fuzzed_data_provider); if (opt_address && opt_net_addr) { @@ -109,12 +100,15 @@ FUZZ_TARGET_INIT(addrman, initialize_addrman) if (opt_service) { addr_man.SetServices(*opt_service, ConsumeWeakEnum(fuzzed_data_provider, ALL_SERVICE_FLAGS)); } - }, - [&] { - (void)addr_man.Check(); }); } - (void)addr_man.size(); + const CAddrMan& const_addr_man{addr_man}; + (void)/*const_*/addr_man.GetAddr( + /* max_addresses */ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096), + /* max_pct */ fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 4096), + /* network */ std::nullopt); + (void)/*const_*/addr_man.Select(fuzzed_data_provider.ConsumeBool()); + (void)const_addr_man.size(); CDataStream data_stream(SER_NETWORK, PROTOCOL_VERSION); - data_stream << addr_man; + data_stream << const_addr_man; } diff --git a/src/test/fuzz/banman.cpp b/src/test/fuzz/banman.cpp index 759a70a857..182aabc79b 100644 --- a/src/test/fuzz/banman.cpp +++ b/src/test/fuzz/banman.cpp @@ -9,8 +9,10 @@ #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> #include <test/util/setup_common.h> +#include <util/readwritefile.h> #include <util/system.h> +#include <cassert> #include <cstdint> #include <limits> #include <string> @@ -30,6 +32,13 @@ void initialize_banman() static const auto testing_setup = MakeNoLogFileContext<>(); } +static bool operator==(const CBanEntry& lhs, const CBanEntry& rhs) +{ + return lhs.nVersion == rhs.nVersion && + lhs.nCreateTime == rhs.nCreateTime && + lhs.nBanUntil == rhs.nBanUntil; +} + FUZZ_TARGET_INIT(banman, initialize_banman) { // The complexity is O(N^2), where N is the input size, because each call @@ -38,10 +47,23 @@ FUZZ_TARGET_INIT(banman, initialize_banman) int limit_max_ops{300}; FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; SetMockTime(ConsumeTime(fuzzed_data_provider)); - const fs::path banlist_file = gArgs.GetDataDirNet() / "fuzzed_banlist.dat"; - fs::remove(banlist_file); + fs::path banlist_file = gArgs.GetDataDirNet() / "fuzzed_banlist"; + + const bool start_with_corrupted_banlist{fuzzed_data_provider.ConsumeBool()}; + bool force_read_and_write_to_err{false}; + if (start_with_corrupted_banlist) { + const std::string sfx{fuzzed_data_provider.ConsumeBool() ? ".dat" : ".json"}; + assert(WriteBinaryFile(banlist_file.string() + sfx, + fuzzed_data_provider.ConsumeRandomLengthString())); + } else { + force_read_and_write_to_err = fuzzed_data_provider.ConsumeBool(); + if (force_read_and_write_to_err) { + banlist_file = fs::path{"path"} / "to" / "inaccessible" / "fuzzed_banlist"; + } + } + { - BanMan ban_man{banlist_file, nullptr, ConsumeBanTimeOffset(fuzzed_data_provider)}; + BanMan ban_man{banlist_file, /* client_interface */ nullptr, /* default_ban_time */ ConsumeBanTimeOffset(fuzzed_data_provider)}; while (--limit_max_ops >= 0 && fuzzed_data_provider.ConsumeBool()) { CallOneOf( fuzzed_data_provider, @@ -79,6 +101,17 @@ FUZZ_TARGET_INIT(banman, initialize_banman) ban_man.Discourage(ConsumeNetAddr(fuzzed_data_provider)); }); } + if (!force_read_and_write_to_err) { + ban_man.DumpBanlist(); + SetMockTime(ConsumeTime(fuzzed_data_provider)); + banmap_t banmap; + ban_man.GetBanned(banmap); + BanMan ban_man_read{banlist_file, /* client_interface */ nullptr, /* default_ban_time */ 0}; + banmap_t banmap_read; + ban_man_read.GetBanned(banmap_read); + assert(banmap == banmap_read); + } } - fs::remove(banlist_file); + fs::remove(banlist_file.string() + ".dat"); + fs::remove(banlist_file.string() + ".json"); } diff --git a/src/test/fuzz/base_encode_decode.cpp b/src/test/fuzz/base_encode_decode.cpp index 4470e13a61..2b4f15115b 100644 --- a/src/test/fuzz/base_encode_decode.cpp +++ b/src/test/fuzz/base_encode_decode.cpp @@ -14,7 +14,12 @@ #include <string> #include <vector> -FUZZ_TARGET(base_encode_decode) +void initialize_base_encode_decode() +{ + static const ECCVerifyHandle verify_handle; +} + +FUZZ_TARGET_INIT(base_encode_decode, initialize_base_encode_decode) { const std::string random_encoded_string(buffer.begin(), buffer.end()); diff --git a/src/test/fuzz/coins_view.cpp b/src/test/fuzz/coins_view.cpp index b509ee0b26..f452696689 100644 --- a/src/test/fuzz/coins_view.cpp +++ b/src/test/fuzz/coins_view.cpp @@ -27,6 +27,7 @@ #include <vector> namespace { +const TestingSetup* g_setup; const Coin EMPTY_COIN{}; bool operator==(const Coin& a, const Coin& b) @@ -39,6 +40,7 @@ bool operator==(const Coin& a, const Coin& b) void initialize_coins_view() { static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(); + g_setup = testing_setup.get(); } FUZZ_TARGET_INIT(coins_view, initialize_coins_view) @@ -181,8 +183,8 @@ FUZZ_TARGET_INIT(coins_view, initialize_coins_view) } { - const CCoinsViewCursor* coins_view_cursor = backend_coins_view.Cursor(); - assert(coins_view_cursor == nullptr); + std::unique_ptr<CCoinsViewCursor> coins_view_cursor = backend_coins_view.Cursor(); + assert(!coins_view_cursor); (void)backend_coins_view.EstimateSize(); (void)backend_coins_view.GetBestBlock(); (void)backend_coins_view.GetHeadBlocks(); @@ -268,7 +270,7 @@ FUZZ_TARGET_INIT(coins_view, initialize_coins_view) CCoinsStats stats{CoinStatsHashType::HASH_SERIALIZED}; bool expected_code_path = false; try { - (void)GetUTXOStats(&coins_view_cache, WITH_LOCK(::cs_main, return std::ref(g_chainman.m_blockman)), stats); + (void)GetUTXOStats(&coins_view_cache, WITH_LOCK(::cs_main, return std::ref(g_setup->m_node.chainman->m_blockman)), stats); } catch (const std::logic_error&) { expected_code_path = true; } diff --git a/src/test/fuzz/crypto.cpp b/src/test/fuzz/crypto.cpp index eeeac18968..f83747e424 100644 --- a/src/test/fuzz/crypto.cpp +++ b/src/test/fuzz/crypto.cpp @@ -19,6 +19,10 @@ FUZZ_TARGET(crypto) { + // Hashing is expensive with sanitizers enabled, so limit the number of + // calls + int limit_max_ops{30}; + FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; std::vector<uint8_t> data = ConsumeRandomLengthByteVector(fuzzed_data_provider); if (data.empty()) { @@ -36,7 +40,7 @@ FUZZ_TARGET(crypto) SHA3_256 sha3; CSipHasher sip_hasher{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>()}; - while (fuzzed_data_provider.ConsumeBool()) { + while (--limit_max_ops >= 0 && fuzzed_data_provider.ConsumeBool()) { CallOneOf( fuzzed_data_provider, [&] { diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 1290c78712..721e4360d0 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -53,9 +53,9 @@ struct invalid_fuzzing_input_exception : public std::exception { }; template <typename T> -CDataStream Serialize(const T& obj, const int version = INIT_PROTO_VERSION) +CDataStream Serialize(const T& obj, const int version = INIT_PROTO_VERSION, const int ser_type = SER_NETWORK) { - CDataStream ds(SER_NETWORK, version); + CDataStream ds(ser_type, version); ds << obj; return ds; } @@ -69,9 +69,9 @@ T Deserialize(CDataStream ds) } template <typename T> -void DeserializeFromFuzzingInput(FuzzBufferType buffer, T& obj, const std::optional<int> protocol_version = std::nullopt) +void DeserializeFromFuzzingInput(FuzzBufferType buffer, T& obj, const std::optional<int> protocol_version = std::nullopt, const int ser_type = SER_NETWORK) { - CDataStream ds(buffer, SER_NETWORK, INIT_PROTO_VERSION); + CDataStream ds(buffer, ser_type, INIT_PROTO_VERSION); if (protocol_version) { ds.SetVersion(*protocol_version); } else { @@ -92,9 +92,9 @@ void DeserializeFromFuzzingInput(FuzzBufferType buffer, T& obj, const std::optio } template <typename T> -void AssertEqualAfterSerializeDeserialize(const T& obj, const int version = INIT_PROTO_VERSION) +void AssertEqualAfterSerializeDeserialize(const T& obj, const int version = INIT_PROTO_VERSION, const int ser_type = SER_NETWORK) { - assert(Deserialize<T>(Serialize(obj, version)) == obj); + assert(Deserialize<T>(Serialize(obj, version, ser_type)) == obj); } } // namespace @@ -136,8 +136,7 @@ FUZZ_TARGET_DESERIALIZE(partial_merkle_tree_deserialize, { FUZZ_TARGET_DESERIALIZE(pub_key_deserialize, { CPubKey pub_key; DeserializeFromFuzzingInput(buffer, pub_key); - // TODO: The following equivalence should hold for CPubKey? Fix. - // AssertEqualAfterSerializeDeserialize(pub_key); + AssertEqualAfterSerializeDeserialize(pub_key); }) FUZZ_TARGET_DESERIALIZE(script_deserialize, { CScript script; @@ -251,9 +250,37 @@ FUZZ_TARGET_DESERIALIZE(messageheader_deserialize, { DeserializeFromFuzzingInput(buffer, mh); (void)mh.IsCommandValid(); }) -FUZZ_TARGET_DESERIALIZE(address_deserialize, { +FUZZ_TARGET_DESERIALIZE(address_deserialize_v1_notime, { CAddress a; - DeserializeFromFuzzingInput(buffer, a); + DeserializeFromFuzzingInput(buffer, a, INIT_PROTO_VERSION); + // A CAddress without nTime (as is expected under INIT_PROTO_VERSION) will roundtrip + // in all 5 formats (with/without nTime, v1/v2, network/disk) + AssertEqualAfterSerializeDeserialize(a, INIT_PROTO_VERSION); + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); + AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); +}) +FUZZ_TARGET_DESERIALIZE(address_deserialize_v1_withtime, { + CAddress a; + DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION); + // A CAddress in V1 mode will roundtrip in all 4 formats that have nTime. + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); + AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); +}) +FUZZ_TARGET_DESERIALIZE(address_deserialize_v2, { + CAddress a; + DeserializeFromFuzzingInput(buffer, a, PROTOCOL_VERSION | ADDRV2_FORMAT); + // A CAddress in V2 mode will roundtrip in both V2 formats, and also in the V1 formats + // with time if it's V1 compatible. + if (a.IsAddrV1Compatible()) { + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION); + AssertEqualAfterSerializeDeserialize(a, 0, SER_DISK); + } + AssertEqualAfterSerializeDeserialize(a, PROTOCOL_VERSION | ADDRV2_FORMAT); + AssertEqualAfterSerializeDeserialize(a, ADDRV2_FORMAT, SER_DISK); }) FUZZ_TARGET_DESERIALIZE(inv_deserialize, { CInv i; diff --git a/src/test/fuzz/load_external_block_file.cpp b/src/test/fuzz/load_external_block_file.cpp index dbd0c76d42..a7770c90e8 100644 --- a/src/test/fuzz/load_external_block_file.cpp +++ b/src/test/fuzz/load_external_block_file.cpp @@ -13,9 +13,14 @@ #include <cstdint> #include <vector> +namespace { +const TestingSetup* g_setup; +} // namespace + void initialize_load_external_block_file() { static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(); + g_setup = testing_setup.get(); } FUZZ_TARGET_INIT(load_external_block_file, initialize_load_external_block_file) @@ -27,5 +32,5 @@ FUZZ_TARGET_INIT(load_external_block_file, initialize_load_external_block_file) return; } FlatFilePos flat_file_pos; - ::ChainstateActive().LoadExternalBlockFile(Params(), fuzzed_block_file, fuzzed_data_provider.ConsumeBool() ? &flat_file_pos : nullptr); + g_setup->m_node.chainman->ActiveChainstate().LoadExternalBlockFile(Params(), fuzzed_block_file, fuzzed_data_provider.ConsumeBool() ? &flat_file_pos : nullptr); } diff --git a/src/test/fuzz/node_eviction.cpp b/src/test/fuzz/node_eviction.cpp index 70ffc6bf37..a3f71426fa 100644 --- a/src/test/fuzz/node_eviction.cpp +++ b/src/test/fuzz/node_eviction.cpp @@ -31,7 +31,7 @@ FUZZ_TARGET(node_eviction) /* nKeyedNetGroup */ fuzzed_data_provider.ConsumeIntegral<uint64_t>(), /* prefer_evict */ fuzzed_data_provider.ConsumeBool(), /* m_is_local */ fuzzed_data_provider.ConsumeBool(), - /* m_is_onion */ fuzzed_data_provider.ConsumeBool(), + /* m_network */ fuzzed_data_provider.PickValueInArray(ALL_NETWORKS), }); } // Make a copy since eviction_candidates may be in some valid but otherwise diff --git a/src/test/fuzz/process_message.cpp b/src/test/fuzz/process_message.cpp index 7b99193ad0..c4e4d4c785 100644 --- a/src/test/fuzz/process_message.cpp +++ b/src/test/fuzz/process_message.cpp @@ -58,7 +58,19 @@ void initialize_process_message() static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(); g_setup = testing_setup.get(); + + // Temporary debug for https://bugs.chromium.org/p/oss-fuzz/issues/detail?id=35027 + { + LOCK(::cs_main); + assert(CheckDiskSpace(gArgs.GetDataDirNet())); + assert(CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * g_setup->m_node.chainman->ActiveChainstate().CoinsTip().GetCacheSize())); + } for (int i = 0; i < 2 * COINBASE_MATURITY; i++) { + { + LOCK(::cs_main); + assert(CheckDiskSpace(gArgs.GetDataDirNet())); + assert(CheckDiskSpace(gArgs.GetDataDirNet(), 48 * 2 * 2 * g_setup->m_node.chainman->ActiveChainstate().CoinsTip().GetCacheSize())); + } MineBlock(g_setup->m_node, CScript() << OP_TRUE); } SyncWithValidationInterfaceQueue(); diff --git a/src/test/fuzz/validation_load_mempool.cpp b/src/test/fuzz/validation_load_mempool.cpp index e1a21b6c53..c2aaf486c5 100644 --- a/src/test/fuzz/validation_load_mempool.cpp +++ b/src/test/fuzz/validation_load_mempool.cpp @@ -14,9 +14,14 @@ #include <cstdint> #include <vector> +namespace { +const TestingSetup* g_setup; +} // namespace + void initialize_validation_load_mempool() { static const auto testing_setup = MakeNoLogFileContext<const TestingSetup>(); + g_setup = testing_setup.get(); } FUZZ_TARGET_INIT(validation_load_mempool, initialize_validation_load_mempool) @@ -29,6 +34,6 @@ FUZZ_TARGET_INIT(validation_load_mempool, initialize_validation_load_mempool) auto fuzzed_fopen = [&](const fs::path&, const char*) { return fuzzed_file_provider.open(); }; - (void)LoadMempool(pool, ::ChainstateActive(), fuzzed_fopen); + (void)LoadMempool(pool, g_setup->m_node.chainman->ActiveChainstate(), fuzzed_fopen); (void)DumpMempool(pool, fuzzed_fopen, true); } diff --git a/src/test/interfaces_tests.cpp b/src/test/interfaces_tests.cpp index 73463b071e..42a7c7798c 100644 --- a/src/test/interfaces_tests.cpp +++ b/src/test/interfaces_tests.cpp @@ -98,7 +98,7 @@ BOOST_AUTO_TEST_CASE(findCommonAncestor) auto* orig_tip = active.Tip(); for (int i = 0; i < 10; ++i) { BlockValidationState state; - ChainstateActive().InvalidateBlock(state, Params(), active.Tip()); + m_node.chainman->ActiveChainstate().InvalidateBlock(state, Params(), active.Tip()); } BOOST_CHECK_EQUAL(active.Height(), orig_tip->nHeight - 10); coinbaseKey.MakeNewKey(true); diff --git a/src/test/key_tests.cpp b/src/test/key_tests.cpp index cb66d5164e..b915982d98 100644 --- a/src/test/key_tests.cpp +++ b/src/test/key_tests.cpp @@ -300,6 +300,48 @@ BOOST_AUTO_TEST_CASE(bip340_test_vectors) auto sig = ParseHex(test.first[2]); BOOST_CHECK_EQUAL(XOnlyPubKey(pubkey).VerifySchnorr(uint256(msg), sig), test.second); } + + static const std::vector<std::array<std::string, 5>> SIGN_VECTORS = { + {{"0000000000000000000000000000000000000000000000000000000000000003", "F9308A019258C31049344F85F89D5229B531C845836F99B08601F113BCE036F9", "0000000000000000000000000000000000000000000000000000000000000000", "0000000000000000000000000000000000000000000000000000000000000000", "E907831F80848D1069A5371B402410364BDF1C5F8307B0084C55F1CE2DCA821525F66A4A85EA8B71E482A74F382D2CE5EBEEE8FDB2172F477DF4900D310536C0"}}, + {{"B7E151628AED2A6ABF7158809CF4F3C762E7160F38B4DA56A784D9045190CFEF", "DFF1D77F2A671C5F36183726DB2341BE58FEAE1DA2DECED843240F7B502BA659", "0000000000000000000000000000000000000000000000000000000000000001", "243F6A8885A308D313198A2E03707344A4093822299F31D0082EFA98EC4E6C89", "6896BD60EEAE296DB48A229FF71DFE071BDE413E6D43F917DC8DCF8C78DE33418906D11AC976ABCCB20B091292BFF4EA897EFCB639EA871CFA95F6DE339E4B0A"}}, + {{"C90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B14E5C9", "DD308AFEC5777E13121FA72B9CC1B7CC0139715309B086C960E18FD969774EB8", "C87AA53824B4D7AE2EB035A2B5BBBCCC080E76CDC6D1692C4B0B62D798E6D906", "7E2D58D8B3BCDF1ABADEC7829054F90DDA9805AAB56C77333024B9D0A508B75C", "5831AAEED7B44BB74E5EAB94BA9D4294C49BCF2A60728D8B4C200F50DD313C1BAB745879A5AD954A72C45A91C3A51D3C7ADEA98D82F8481E0E1E03674A6F3FB7"}}, + {{"0B432B2677937381AEF05BB02A66ECD012773062CF3FA2549E44F58ED2401710", "25D1DFF95105F5253C4022F628A996AD3A0D95FBF21D468A1B33F8C160D8F517", "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", "FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF", "7EB0509757E246F19449885651611CB965ECC1A187DD51B64FDA1EDC9637D5EC97582B9CB13DB3933705B32BA982AF5AF25FD78881EBB32771FC5922EFC66EA3"}}, + }; + + for (const auto& [sec_hex, pub_hex, aux_hex, msg_hex, sig_hex] : SIGN_VECTORS) { + auto sec = ParseHex(sec_hex); + auto pub = ParseHex(pub_hex); + uint256 aux256(ParseHex(aux_hex)); + uint256 msg256(ParseHex(msg_hex)); + auto sig = ParseHex(sig_hex); + unsigned char sig64[64]; + + // Run the untweaked test vectors above, comparing with exact expected signature. + CKey key; + key.Set(sec.begin(), sec.end(), true); + XOnlyPubKey pubkey(key.GetPubKey()); + BOOST_CHECK(std::equal(pubkey.begin(), pubkey.end(), pub.begin(), pub.end())); + bool ok = key.SignSchnorr(msg256, sig64, nullptr, &aux256); + BOOST_CHECK(ok); + BOOST_CHECK(std::vector<unsigned char>(sig64, sig64 + 64) == sig); + // Verify those signatures for good measure. + BOOST_CHECK(pubkey.VerifySchnorr(msg256, sig64)); + + // Do 10 iterations where we sign with a random Merkle root to tweak, + // and compare against the resulting tweaked keys, with random aux. + // In iteration i=0 we tweak with empty Merkle tree. + for (int i = 0; i < 10; ++i) { + uint256 merkle_root; + if (i) merkle_root = InsecureRand256(); + auto tweaked = pubkey.CreateTapTweak(i ? &merkle_root : nullptr); + BOOST_CHECK(tweaked); + XOnlyPubKey tweaked_key = tweaked->first; + aux256 = InsecureRand256(); + bool ok = key.SignSchnorr(msg256, sig64, &merkle_root, &aux256); + BOOST_CHECK(ok); + BOOST_CHECK(tweaked_key.VerifySchnorr(msg256, sig64)); + } + } } BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/miner_tests.cpp b/src/test/miner_tests.cpp index c47d0eae1e..e20c5e4e8f 100644 --- a/src/test/miner_tests.cpp +++ b/src/test/miner_tests.cpp @@ -28,8 +28,8 @@ struct MinerTestingSetup : public TestingSetup { void TestPackageSelection(const CChainParams& chainparams, const CScript& scriptPubKey, const std::vector<CTransactionRef>& txFirst) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_node.mempool->cs); bool TestSequenceLocks(const CTransaction& tx, int flags) EXCLUSIVE_LOCKS_REQUIRED(::cs_main, m_node.mempool->cs) { - CCoinsViewMemPool viewMempool(&m_node.chainman->ActiveChainstate().CoinsTip(), *m_node.mempool); - return CheckSequenceLocks(m_node.chainman->ActiveChain().Tip(), viewMempool, tx, flags); + CCoinsViewMemPool view_mempool(&m_node.chainman->ActiveChainstate().CoinsTip(), *m_node.mempool); + return CheckSequenceLocks(m_node.chainman->ActiveChain().Tip(), view_mempool, tx, flags); } BlockAssembler AssemblerForTest(const CChainParams& params); }; @@ -45,7 +45,7 @@ BlockAssembler MinerTestingSetup::AssemblerForTest(const CChainParams& params) options.nBlockMaxWeight = MAX_BLOCK_WEIGHT; options.blockMinFeeRate = blockMinFeeRate; - return BlockAssembler(::ChainstateActive(), *m_node.mempool, params, options); + return BlockAssembler(m_node.chainman->ActiveChainstate(), *m_node.mempool, params, options); } constexpr static struct { @@ -82,11 +82,11 @@ constexpr static struct { {2, 0xbbbeb305}, {2, 0xfe1c810a}, }; -static CBlockIndex CreateBlockIndex(int nHeight) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static CBlockIndex CreateBlockIndex(int nHeight, CBlockIndex* active_chain_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { CBlockIndex index; index.nHeight = nHeight; - index.pprev = ::ChainActive().Tip(); + index.pprev = active_chain_tip; return index; } @@ -228,17 +228,17 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) { LOCK(cs_main); pblock->nVersion = 1; - pblock->nTime = ::ChainActive().Tip()->GetMedianTimePast()+1; + pblock->nTime = m_node.chainman->ActiveChain().Tip()->GetMedianTimePast()+1; CMutableTransaction txCoinbase(*pblock->vtx[0]); txCoinbase.nVersion = 1; txCoinbase.vin[0].scriptSig = CScript(); txCoinbase.vin[0].scriptSig.push_back(bi.extranonce); - txCoinbase.vin[0].scriptSig.push_back(::ChainActive().Height()); + txCoinbase.vin[0].scriptSig.push_back(m_node.chainman->ActiveChain().Height()); txCoinbase.vout.resize(1); // Ignore the (optional) segwit commitment added by CreateNewBlock (as the hardcoded nonces don't account for this) txCoinbase.vout[0].scriptPubKey = CScript(); pblock->vtx[0] = MakeTransactionRef(std::move(txCoinbase)); if (txFirst.size() == 0) - baseheight = ::ChainActive().Height(); + baseheight = m_node.chainman->ActiveChain().Height(); if (txFirst.size() < 4) txFirst.push_back(pblock->vtx[0]); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); @@ -364,29 +364,29 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) m_node.mempool->clear(); // subsidy changing - int nHeight = ::ChainActive().Height(); + int nHeight = m_node.chainman->ActiveChain().Height(); // Create an actual 209999-long block chain (without valid blocks). - while (::ChainActive().Tip()->nHeight < 209999) { - CBlockIndex* prev = ::ChainActive().Tip(); + while (m_node.chainman->ActiveChain().Tip()->nHeight < 209999) { + CBlockIndex* prev = m_node.chainman->ActiveChain().Tip(); CBlockIndex* next = new CBlockIndex(); next->phashBlock = new uint256(InsecureRand256()); - ::ChainstateActive().CoinsTip().SetBestBlock(next->GetBlockHash()); + m_node.chainman->ActiveChainstate().CoinsTip().SetBestBlock(next->GetBlockHash()); next->pprev = prev; next->nHeight = prev->nHeight + 1; next->BuildSkip(); - ::ChainActive().SetTip(next); + m_node.chainman->ActiveChain().SetTip(next); } BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey)); // Extend to a 210000-long block chain. - while (::ChainActive().Tip()->nHeight < 210000) { - CBlockIndex* prev = ::ChainActive().Tip(); + while (m_node.chainman->ActiveChain().Tip()->nHeight < 210000) { + CBlockIndex* prev = m_node.chainman->ActiveChain().Tip(); CBlockIndex* next = new CBlockIndex(); next->phashBlock = new uint256(InsecureRand256()); - ::ChainstateActive().CoinsTip().SetBestBlock(next->GetBlockHash()); + m_node.chainman->ActiveChainstate().CoinsTip().SetBestBlock(next->GetBlockHash()); next->pprev = prev; next->nHeight = prev->nHeight + 1; next->BuildSkip(); - ::ChainActive().SetTip(next); + m_node.chainman->ActiveChain().SetTip(next); } BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey)); @@ -409,16 +409,16 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) m_node.mempool->clear(); // Delete the dummy blocks again. - while (::ChainActive().Tip()->nHeight > nHeight) { - CBlockIndex* del = ::ChainActive().Tip(); - ::ChainActive().SetTip(del->pprev); - ::ChainstateActive().CoinsTip().SetBestBlock(del->pprev->GetBlockHash()); + while (m_node.chainman->ActiveChain().Tip()->nHeight > nHeight) { + CBlockIndex* del = m_node.chainman->ActiveChain().Tip(); + m_node.chainman->ActiveChain().SetTip(del->pprev); + m_node.chainman->ActiveChainstate().CoinsTip().SetBestBlock(del->pprev->GetBlockHash()); delete del->phashBlock; delete del; } // non-final txs in mempool - SetMockTime(::ChainActive().Tip()->GetMedianTimePast()+1); + SetMockTime(m_node.chainman->ActiveChain().Tip()->GetMedianTimePast()+1); int flags = LOCKTIME_VERIFY_SEQUENCE|LOCKTIME_MEDIAN_TIME_PAST; // height map std::vector<int> prevheights; @@ -430,7 +430,7 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) tx.vin[0].prevout.hash = txFirst[0]->GetHash(); // only 1 transaction tx.vin[0].prevout.n = 0; tx.vin[0].scriptSig = CScript() << OP_1; - tx.vin[0].nSequence = ::ChainActive().Tip()->nHeight + 1; // txFirst[0] is the 2nd block + tx.vin[0].nSequence = m_node.chainman->ActiveChain().Tip()->nHeight + 1; // txFirst[0] is the 2nd block prevheights[0] = baseheight + 1; tx.vout.resize(1); tx.vout[0].nValue = BLOCKSUBSIDY-HIGHFEE; @@ -438,53 +438,62 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) tx.nLockTime = 0; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Fee(HIGHFEE).Time(GetTime()).SpendsCoinbase(true).FromTx(tx)); - BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(m_node.chainman->ActiveChain().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail - BOOST_CHECK(SequenceLocks(CTransaction(tx), flags, prevheights, CreateBlockIndex(::ChainActive().Tip()->nHeight + 2))); // Sequence locks pass on 2nd block + + { + CBlockIndex* active_chain_tip = m_node.chainman->ActiveChain().Tip(); + BOOST_CHECK(SequenceLocks(CTransaction(tx), flags, prevheights, CreateBlockIndex(active_chain_tip->nHeight + 2, active_chain_tip))); // Sequence locks pass on 2nd block + } // relative time locked tx.vin[0].prevout.hash = txFirst[1]->GetHash(); - tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | (((::ChainActive().Tip()->GetMedianTimePast()+1-::ChainActive()[1]->GetMedianTimePast()) >> CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) + 1); // txFirst[1] is the 3rd block + tx.vin[0].nSequence = CTxIn::SEQUENCE_LOCKTIME_TYPE_FLAG | (((m_node.chainman->ActiveChain().Tip()->GetMedianTimePast()+1-m_node.chainman->ActiveChain()[1]->GetMedianTimePast()) >> CTxIn::SEQUENCE_LOCKTIME_GRANULARITY) + 1); // txFirst[1] is the 3rd block prevheights[0] = baseheight + 2; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(m_node.chainman->ActiveChain().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) - ::ChainActive().Tip()->GetAncestor(::ChainActive().Tip()->nHeight - i)->nTime += 512; //Trick the MedianTimePast - BOOST_CHECK(SequenceLocks(CTransaction(tx), flags, prevheights, CreateBlockIndex(::ChainActive().Tip()->nHeight + 1))); // Sequence locks pass 512 seconds later + m_node.chainman->ActiveChain().Tip()->GetAncestor(m_node.chainman->ActiveChain().Tip()->nHeight - i)->nTime += 512; //Trick the MedianTimePast + + { + CBlockIndex* active_chain_tip = m_node.chainman->ActiveChain().Tip(); + BOOST_CHECK(SequenceLocks(CTransaction(tx), flags, prevheights, CreateBlockIndex(active_chain_tip->nHeight + 1, active_chain_tip))); // Sequence locks pass 512 seconds later + } + for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) - ::ChainActive().Tip()->GetAncestor(::ChainActive().Tip()->nHeight - i)->nTime -= 512; //undo tricked MTP + m_node.chainman->ActiveChain().Tip()->GetAncestor(m_node.chainman->ActiveChain().Tip()->nHeight - i)->nTime -= 512; //undo tricked MTP // absolute height locked tx.vin[0].prevout.hash = txFirst[2]->GetHash(); tx.vin[0].nSequence = CTxIn::SEQUENCE_FINAL - 1; prevheights[0] = baseheight + 3; - tx.nLockTime = ::ChainActive().Tip()->nHeight + 1; + tx.nLockTime = m_node.chainman->ActiveChain().Tip()->nHeight + 1; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(!CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime fails + BOOST_CHECK(!CheckFinalTx(m_node.chainman->ActiveChain().Tip(), CTransaction(tx), flags)); // Locktime fails BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass - BOOST_CHECK(IsFinalTx(CTransaction(tx), ::ChainActive().Tip()->nHeight + 2, ::ChainActive().Tip()->GetMedianTimePast())); // Locktime passes on 2nd block + BOOST_CHECK(IsFinalTx(CTransaction(tx), m_node.chainman->ActiveChain().Tip()->nHeight + 2, m_node.chainman->ActiveChain().Tip()->GetMedianTimePast())); // Locktime passes on 2nd block // absolute time locked tx.vin[0].prevout.hash = txFirst[3]->GetHash(); - tx.nLockTime = ::ChainActive().Tip()->GetMedianTimePast(); + tx.nLockTime = m_node.chainman->ActiveChain().Tip()->GetMedianTimePast(); prevheights.resize(1); prevheights[0] = baseheight + 4; hash = tx.GetHash(); m_node.mempool->addUnchecked(entry.Time(GetTime()).FromTx(tx)); - BOOST_CHECK(!CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime fails + BOOST_CHECK(!CheckFinalTx(m_node.chainman->ActiveChain().Tip(), CTransaction(tx), flags)); // Locktime fails BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass - BOOST_CHECK(IsFinalTx(CTransaction(tx), ::ChainActive().Tip()->nHeight + 2, ::ChainActive().Tip()->GetMedianTimePast() + 1)); // Locktime passes 1 second later + BOOST_CHECK(IsFinalTx(CTransaction(tx), m_node.chainman->ActiveChain().Tip()->nHeight + 2, m_node.chainman->ActiveChain().Tip()->GetMedianTimePast() + 1)); // Locktime passes 1 second later // mempool-dependent transactions (not added) tx.vin[0].prevout.hash = hash; - prevheights[0] = ::ChainActive().Tip()->nHeight + 1; + prevheights[0] = m_node.chainman->ActiveChain().Tip()->nHeight + 1; tx.nLockTime = 0; tx.vin[0].nSequence = 0; - BOOST_CHECK(CheckFinalTx(::ChainActive().Tip(), CTransaction(tx), flags)); // Locktime passes + BOOST_CHECK(CheckFinalTx(m_node.chainman->ActiveChain().Tip(), CTransaction(tx), flags)); // Locktime passes BOOST_CHECK(TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks pass tx.vin[0].nSequence = 1; BOOST_CHECK(!TestSequenceLocks(CTransaction(tx), flags)); // Sequence locks fail @@ -502,14 +511,14 @@ BOOST_AUTO_TEST_CASE(CreateNewBlock_validity) BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 3U); // However if we advance height by 1 and time by 512, all of them should be mined for (int i = 0; i < CBlockIndex::nMedianTimeSpan; i++) - ::ChainActive().Tip()->GetAncestor(::ChainActive().Tip()->nHeight - i)->nTime += 512; //Trick the MedianTimePast - ::ChainActive().Tip()->nHeight++; - SetMockTime(::ChainActive().Tip()->GetMedianTimePast() + 1); + m_node.chainman->ActiveChain().Tip()->GetAncestor(m_node.chainman->ActiveChain().Tip()->nHeight - i)->nTime += 512; //Trick the MedianTimePast + m_node.chainman->ActiveChain().Tip()->nHeight++; + SetMockTime(m_node.chainman->ActiveChain().Tip()->GetMedianTimePast() + 1); BOOST_CHECK(pblocktemplate = AssemblerForTest(chainparams).CreateNewBlock(scriptPubKey)); BOOST_CHECK_EQUAL(pblocktemplate->block.vtx.size(), 5U); - ::ChainActive().Tip()->nHeight--; + m_node.chainman->ActiveChain().Tip()->nHeight--; SetMockTime(0); m_node.mempool->clear(); diff --git a/src/test/net_peer_eviction_tests.cpp b/src/test/net_peer_eviction_tests.cpp index 31d391bf7d..4bfd487b86 100644 --- a/src/test/net_peer_eviction_tests.cpp +++ b/src/test/net_peer_eviction_tests.cpp @@ -2,7 +2,9 @@ // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. +#include <netaddress.h> #include <net.h> +#include <test/util/net.h> #include <test/util/setup_common.h> #include <boost/test/unit_test.hpp> @@ -15,11 +17,6 @@ BOOST_FIXTURE_TEST_SUITE(net_peer_eviction_tests, BasicTestingSetup) -namespace { -constexpr int NODE_EVICTION_TEST_ROUNDS{10}; -constexpr int NODE_EVICTION_TEST_UP_TO_N_NODES{200}; -} // namespace - std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(const int n_candidates, FastRandomContext& random_context) { std::vector<NodeEvictionCandidate> candidates; @@ -36,7 +33,7 @@ std::vector<NodeEvictionCandidate> GetRandomNodeEvictionCandidates(const int n_c /* nKeyedNetGroup */ random_context.randrange(100), /* prefer_evict */ random_context.randbool(), /* m_is_local */ random_context.randbool(), - /* m_is_onion */ random_context.randbool(), + /* m_network */ ALL_NETWORKS[random_context.randrange(ALL_NETWORKS.size())], }); } return candidates; @@ -94,7 +91,8 @@ BOOST_AUTO_TEST_CASE(peer_protection_test) BOOST_CHECK(IsProtected( num_peers, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; - c.m_is_onion = c.m_is_local = false; + c.m_is_local = false; + c.m_network = NET_IPV4; }, /* protected_peer_ids */ {0, 1, 2, 3, 4, 5}, /* unprotected_peer_ids */ {6, 7, 8, 9, 10, 11}, @@ -104,129 +102,359 @@ BOOST_AUTO_TEST_CASE(peer_protection_test) BOOST_CHECK(IsProtected( num_peers, [num_peers](NodeEvictionCandidate& c) { c.nTimeConnected = num_peers - c.id; - c.m_is_onion = c.m_is_local = false; + c.m_is_local = false; + c.m_network = NET_IPV6; }, /* protected_peer_ids */ {6, 7, 8, 9, 10, 11}, /* unprotected_peer_ids */ {0, 1, 2, 3, 4, 5}, random_context)); - // Test protection of onion and localhost peers... + // Test protection of onion, localhost, and I2P peers... // Expect 1/4 onion peers to be protected from eviction, - // independently of other characteristics. + // if no localhost or I2P peers. BOOST_CHECK(IsProtected( num_peers, [](NodeEvictionCandidate& c) { - c.m_is_onion = (c.id == 3 || c.id == 8 || c.id == 9); + c.m_is_local = false; + c.m_network = (c.id == 3 || c.id == 8 || c.id == 9) ? NET_ONION : NET_IPV4; }, /* protected_peer_ids */ {3, 8, 9}, /* unprotected_peer_ids */ {}, random_context)); - // Expect 1/4 onion peers and 1/4 of the others to be protected - // from eviction, sorted by longest uptime (lowest nTimeConnected). + // Expect 1/4 onion peers and 1/4 of the other peers to be protected, + // sorted by longest uptime (lowest nTimeConnected), if no localhost or I2P peers. BOOST_CHECK(IsProtected( num_peers, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; c.m_is_local = false; - c.m_is_onion = (c.id == 3 || c.id > 7); + c.m_network = (c.id == 3 || c.id > 7) ? NET_ONION : NET_IPV6; }, /* protected_peer_ids */ {0, 1, 2, 3, 8, 9}, /* unprotected_peer_ids */ {4, 5, 6, 7, 10, 11}, random_context)); // Expect 1/4 localhost peers to be protected from eviction, - // if no onion peers. + // if no onion or I2P peers. BOOST_CHECK(IsProtected( num_peers, [](NodeEvictionCandidate& c) { - c.m_is_onion = false; c.m_is_local = (c.id == 1 || c.id == 9 || c.id == 11); + c.m_network = NET_IPV4; }, /* protected_peer_ids */ {1, 9, 11}, /* unprotected_peer_ids */ {}, random_context)); // Expect 1/4 localhost peers and 1/4 of the other peers to be protected, - // sorted by longest uptime (lowest nTimeConnected), if no onion peers. + // sorted by longest uptime (lowest nTimeConnected), if no onion or I2P peers. BOOST_CHECK(IsProtected( num_peers, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; - c.m_is_onion = false; c.m_is_local = (c.id > 6); + c.m_network = NET_IPV6; }, /* protected_peer_ids */ {0, 1, 2, 7, 8, 9}, /* unprotected_peer_ids */ {3, 4, 5, 6, 10, 11}, random_context)); - // Combined test: expect 1/4 onion and 2 localhost peers to be protected - // from eviction, sorted by longest uptime. + // Expect 1/4 I2P peers to be protected from eviction, + // if no onion or localhost peers. + BOOST_CHECK(IsProtected( + num_peers, [](NodeEvictionCandidate& c) { + c.m_is_local = false; + c.m_network = (c.id == 2 || c.id == 7 || c.id == 10) ? NET_I2P : NET_IPV4; + }, + /* protected_peer_ids */ {2, 7, 10}, + /* unprotected_peer_ids */ {}, + random_context)); + + // Expect 1/4 I2P peers and 1/4 of the other peers to be protected, + // sorted by longest uptime (lowest nTimeConnected), if no onion or localhost peers. BOOST_CHECK(IsProtected( num_peers, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; - c.m_is_onion = (c.id == 0 || c.id == 5 || c.id == 10); - c.m_is_local = (c.id == 1 || c.id == 9 || c.id == 11); + c.m_is_local = false; + c.m_network = (c.id == 4 || c.id > 8) ? NET_I2P : NET_IPV6; }, - /* protected_peer_ids */ {0, 1, 2, 5, 9, 10}, - /* unprotected_peer_ids */ {3, 4, 6, 7, 8, 11}, + /* protected_peer_ids */ {0, 1, 2, 4, 9, 10}, + /* unprotected_peer_ids */ {3, 5, 6, 7, 8, 11}, random_context)); - // Combined test: expect having only 1 onion to allow allocating the - // remaining 2 of the 1/4 to localhost peers, sorted by longest uptime. + // Tests with 2 networks... + + // Combined test: expect having 1 localhost and 1 onion peer out of 4 to + // protect 1 localhost, 0 onion and 1 other peer, sorted by longest uptime; + // stable sort breaks tie with array order of localhost first. BOOST_CHECK(IsProtected( - num_peers + 4, [](NodeEvictionCandidate& c) { + 4, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; - c.m_is_onion = (c.id == 15); - c.m_is_local = (c.id > 6 && c.id < 11); + c.m_is_local = (c.id == 4); + c.m_network = (c.id == 3) ? NET_ONION : NET_IPV4; }, - /* protected_peer_ids */ {0, 1, 2, 3, 7, 8, 9, 15}, - /* unprotected_peer_ids */ {4, 5, 6, 10, 11, 12, 13, 14}, + /* protected_peer_ids */ {0, 4}, + /* unprotected_peer_ids */ {1, 2}, + random_context)); + + // Combined test: expect having 1 localhost and 1 onion peer out of 7 to + // protect 1 localhost, 0 onion, and 2 other peers (3 total), sorted by + // uptime; stable sort breaks tie with array order of localhost first. + BOOST_CHECK(IsProtected( + 7, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 6); + c.m_network = (c.id == 5) ? NET_ONION : NET_IPV4; + }, + /* protected_peer_ids */ {0, 1, 6}, + /* unprotected_peer_ids */ {2, 3, 4, 5}, + random_context)); + + // Combined test: expect having 1 localhost and 1 onion peer out of 8 to + // protect protect 1 localhost, 1 onion and 2 other peers (4 total), sorted + // by uptime; stable sort breaks tie with array order of localhost first. + BOOST_CHECK(IsProtected( + 8, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 6); + c.m_network = (c.id == 5) ? NET_ONION : NET_IPV4; + }, + /* protected_peer_ids */ {0, 1, 5, 6}, + /* unprotected_peer_ids */ {2, 3, 4, 7}, random_context)); - // Combined test: expect 2 onions (< 1/4) to allow allocating the minimum 2 - // localhost peers, sorted by longest uptime. + // Combined test: expect having 3 localhost and 3 onion peers out of 12 to + // protect 2 localhost and 1 onion, plus 3 other peers, sorted by longest + // uptime; stable sort breaks ties with the array order of localhost first. BOOST_CHECK(IsProtected( num_peers, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; - c.m_is_onion = (c.id == 7 || c.id == 9); - c.m_is_local = (c.id == 6 || c.id == 11); + c.m_is_local = (c.id == 6 || c.id == 9 || c.id == 11); + c.m_network = (c.id == 7 || c.id == 8 || c.id == 10) ? NET_ONION : NET_IPV6; }, - /* protected_peer_ids */ {0, 1, 6, 7, 9, 11}, - /* unprotected_peer_ids */ {2, 3, 4, 5, 8, 10}, + /* protected_peer_ids */ {0, 1, 2, 6, 7, 9}, + /* unprotected_peer_ids */ {3, 4, 5, 8, 10, 11}, random_context)); - // Combined test: when > 1/4, expect max 1/4 onion and 2 localhost peers - // to be protected from eviction, sorted by longest uptime. + // Combined test: expect having 4 localhost and 1 onion peer out of 12 to + // protect 2 localhost and 1 onion, plus 3 other peers, sorted by longest uptime. BOOST_CHECK(IsProtected( num_peers, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; - c.m_is_onion = (c.id > 3 && c.id < 8); - c.m_is_local = (c.id > 7); + c.m_is_local = (c.id > 4 && c.id < 9); + c.m_network = (c.id == 10) ? NET_ONION : NET_IPV4; }, - /* protected_peer_ids */ {0, 4, 5, 6, 8, 9}, - /* unprotected_peer_ids */ {1, 2, 3, 7, 10, 11}, + /* protected_peer_ids */ {0, 1, 2, 5, 6, 10}, + /* unprotected_peer_ids */ {3, 4, 7, 8, 9, 11}, random_context)); - // Combined test: idem > 1/4 with only 8 peers: expect 2 onion and 2 - // localhost peers (1/4 + 2) to be protected, sorted by longest uptime. + // Combined test: expect having 4 localhost and 2 onion peers out of 16 to + // protect 2 localhost and 2 onions, plus 4 other peers, sorted by longest uptime. BOOST_CHECK(IsProtected( - 8, [](NodeEvictionCandidate& c) { + 16, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 6 || c.id == 9 || c.id == 11 || c.id == 12); + c.m_network = (c.id == 8 || c.id == 10) ? NET_ONION : NET_IPV6; + }, + /* protected_peer_ids */ {0, 1, 2, 3, 6, 8, 9, 10}, + /* unprotected_peer_ids */ {4, 5, 7, 11, 12, 13, 14, 15}, + random_context)); + + // Combined test: expect having 5 localhost and 1 onion peer out of 16 to + // protect 3 localhost (recovering the unused onion slot), 1 onion, and 4 + // others, sorted by longest uptime. + BOOST_CHECK(IsProtected( + 16, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id > 10); + c.m_network = (c.id == 10) ? NET_ONION : NET_IPV4; + }, + /* protected_peer_ids */ {0, 1, 2, 3, 10, 11, 12, 13}, + /* unprotected_peer_ids */ {4, 5, 6, 7, 8, 9, 14, 15}, + random_context)); + + // Combined test: expect having 1 localhost and 4 onion peers out of 16 to + // protect 1 localhost and 3 onions (recovering the unused localhost slot), + // plus 4 others, sorted by longest uptime. + BOOST_CHECK(IsProtected( + 16, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 15); + c.m_network = (c.id > 6 && c.id < 11) ? NET_ONION : NET_IPV6; + }, + /* protected_peer_ids */ {0, 1, 2, 3, 7, 8, 9, 15}, + /* unprotected_peer_ids */ {5, 6, 10, 11, 12, 13, 14}, + random_context)); + + // Combined test: expect having 2 onion and 4 I2P out of 12 peers to protect + // 2 onion (prioritized for having fewer candidates) and 1 I2P, plus 3 + // others, sorted by longest uptime. + BOOST_CHECK(IsProtected( + num_peers, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; - c.m_is_onion = (c.id > 1 && c.id < 5); - c.m_is_local = (c.id > 4); + c.m_is_local = false; + if (c.id == 8 || c.id == 10) { + c.m_network = NET_ONION; + } else if (c.id == 6 || c.id == 9 || c.id == 11 || c.id == 12) { + c.m_network = NET_I2P; + } else { + c.m_network = NET_IPV4; + } }, - /* protected_peer_ids */ {2, 3, 5, 6}, - /* unprotected_peer_ids */ {0, 1, 4, 7}, + /* protected_peer_ids */ {0, 1, 2, 6, 8, 10}, + /* unprotected_peer_ids */ {3, 4, 5, 7, 9, 11}, random_context)); - // Combined test: idem > 1/4 with only 6 peers: expect 1 onion peer and no - // localhost peers (1/4 + 0) to be protected, sorted by longest uptime. + // Tests with 3 networks... + + // Combined test: expect having 1 localhost, 1 I2P and 1 onion peer out of 4 + // to protect 1 I2P, 0 localhost, 0 onion and 1 other peer (2 total), sorted + // by longest uptime; stable sort breaks tie with array order of I2P first. BOOST_CHECK(IsProtected( - 6, [](NodeEvictionCandidate& c) { + 4, [](NodeEvictionCandidate& c) { c.nTimeConnected = c.id; - c.m_is_onion = (c.id == 4 || c.id == 5); c.m_is_local = (c.id == 3); + if (c.id == 4) { + c.m_network = NET_I2P; + } else if (c.id == 2) { + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV6; + } }, - /* protected_peer_ids */ {0, 1, 4}, - /* unprotected_peer_ids */ {2, 3, 5}, + /* protected_peer_ids */ {0, 4}, + /* unprotected_peer_ids */ {1, 2}, + random_context)); + + // Combined test: expect having 1 localhost, 1 I2P and 1 onion peer out of 7 + // to protect 1 I2P, 0 localhost, 0 onion and 2 other peers (3 total) sorted + // by longest uptime; stable sort breaks tie with array order of I2P first. + BOOST_CHECK(IsProtected( + 7, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 4); + if (c.id == 6) { + c.m_network = NET_I2P; + } else if (c.id == 5) { + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV6; + } + }, + /* protected_peer_ids */ {0, 1, 6}, + /* unprotected_peer_ids */ {2, 3, 4, 5}, + random_context)); + + // Combined test: expect having 1 localhost, 1 I2P and 1 onion peer out of 8 + // to protect 1 I2P, 1 localhost, 0 onion and 2 other peers (4 total) sorted + // by uptime; stable sort breaks tie with array order of I2P then localhost. + BOOST_CHECK(IsProtected( + 8, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 6); + if (c.id == 5) { + c.m_network = NET_I2P; + } else if (c.id == 4) { + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV6; + } + }, + /* protected_peer_ids */ {0, 1, 5, 6}, + /* unprotected_peer_ids */ {2, 3, 4, 7}, + random_context)); + + // Combined test: expect having 4 localhost, 2 I2P, and 2 onion peers out of + // 16 to protect 1 localhost, 2 I2P, and 1 onion (4/16 total), plus 4 others + // for 8 total, sorted by longest uptime. + BOOST_CHECK(IsProtected( + 16, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 6 || c.id > 11); + if (c.id == 7 || c.id == 11) { + c.m_network = NET_I2P; + } else if (c.id == 9 || c.id == 10) { + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV4; + } + }, + /* protected_peer_ids */ {0, 1, 2, 3, 6, 7, 9, 11}, + /* unprotected_peer_ids */ {4, 5, 8, 10, 12, 13, 14, 15}, + random_context)); + + // Combined test: expect having 1 localhost, 8 I2P and 1 onion peer out of + // 24 to protect 1, 4, and 1 (6 total), plus 6 others for 12/24 total, + // sorted by longest uptime. + BOOST_CHECK(IsProtected( + 24, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 12); + if (c.id > 14 && c.id < 23) { // 4 protected instead of usual 2 + c.m_network = NET_I2P; + } else if (c.id == 23) { + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV6; + } + }, + /* protected_peer_ids */ {0, 1, 2, 3, 4, 5, 12, 15, 16, 17, 18, 23}, + /* unprotected_peer_ids */ {6, 7, 8, 9, 10, 11, 13, 14, 19, 20, 21, 22}, + random_context)); + + // Combined test: expect having 1 localhost, 3 I2P and 6 onion peers out of + // 24 to protect 1, 3, and 2 (6 total, I2P has fewer candidates and so gets the + // unused localhost slot), plus 6 others for 12/24 total, sorted by longest uptime. + BOOST_CHECK(IsProtected( + 24, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 15); + if (c.id == 12 || c.id == 14 || c.id == 17) { + c.m_network = NET_I2P; + } else if (c.id > 17) { // 4 protected instead of usual 2 + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV4; + } + }, + /* protected_peer_ids */ {0, 1, 2, 3, 4, 5, 12, 14, 15, 17, 18, 19}, + /* unprotected_peer_ids */ {6, 7, 8, 9, 10, 11, 13, 16, 20, 21, 22, 23}, + random_context)); + + // Combined test: expect having 1 localhost, 7 I2P and 4 onion peers out of + // 24 to protect 1 localhost, 2 I2P, and 3 onions (6 total), plus 6 others + // for 12/24 total, sorted by longest uptime. + BOOST_CHECK(IsProtected( + 24, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id == 13); + if (c.id > 16) { + c.m_network = NET_I2P; + } else if (c.id == 12 || c.id == 14 || c.id == 15 || c.id == 16) { + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV6; + } + }, + /* protected_peer_ids */ {0, 1, 2, 3, 4, 5, 12, 13, 14, 15, 17, 18}, + /* unprotected_peer_ids */ {6, 7, 8, 9, 10, 11, 16, 19, 20, 21, 22, 23}, + random_context)); + + // Combined test: expect having 8 localhost, 4 I2P, and 3 onion peers out of + // 24 to protect 2 of each (6 total), plus 6 others for 12/24 total, sorted + // by longest uptime. + BOOST_CHECK(IsProtected( + 24, [](NodeEvictionCandidate& c) { + c.nTimeConnected = c.id; + c.m_is_local = (c.id > 15); + if (c.id > 10 && c.id < 15) { + c.m_network = NET_I2P; + } else if (c.id > 6 && c.id < 10) { + c.m_network = NET_ONION; + } else { + c.m_network = NET_IPV4; + } + }, + /* protected_peer_ids */ {0, 1, 2, 3, 4, 5, 7, 8, 11, 12, 16, 17}, + /* unprotected_peer_ids */ {6, 9, 10, 13, 14, 15, 18, 19, 20, 21, 22, 23}, random_context)); } @@ -257,91 +485,89 @@ BOOST_AUTO_TEST_CASE(peer_eviction_test) { FastRandomContext random_context{true}; - for (int i = 0; i < NODE_EVICTION_TEST_ROUNDS; ++i) { - for (int number_of_nodes = 0; number_of_nodes < NODE_EVICTION_TEST_UP_TO_N_NODES; ++number_of_nodes) { - // Four nodes with the highest keyed netgroup values should be - // protected from eviction. - BOOST_CHECK(!IsEvicted( - number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { - candidate.nKeyedNetGroup = number_of_nodes - candidate.id; - }, - {0, 1, 2, 3}, random_context)); - - // Eight nodes with the lowest minimum ping time should be protected - // from eviction. - BOOST_CHECK(!IsEvicted( - number_of_nodes, [](NodeEvictionCandidate& candidate) { - candidate.m_min_ping_time = std::chrono::microseconds{candidate.id}; - }, - {0, 1, 2, 3, 4, 5, 6, 7}, random_context)); - - // Four nodes that most recently sent us novel transactions accepted - // into our mempool should be protected from eviction. - BOOST_CHECK(!IsEvicted( - number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { - candidate.nLastTXTime = number_of_nodes - candidate.id; - }, - {0, 1, 2, 3}, random_context)); - - // Up to eight non-tx-relay peers that most recently sent us novel - // blocks should be protected from eviction. - BOOST_CHECK(!IsEvicted( - number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { - candidate.nLastBlockTime = number_of_nodes - candidate.id; - if (candidate.id <= 7) { - candidate.fRelayTxes = false; - candidate.fRelevantServices = true; - } - }, - {0, 1, 2, 3, 4, 5, 6, 7}, random_context)); - - // Four peers that most recently sent us novel blocks should be - // protected from eviction. - BOOST_CHECK(!IsEvicted( - number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { - candidate.nLastBlockTime = number_of_nodes - candidate.id; - }, - {0, 1, 2, 3}, random_context)); - - // Combination of the previous two tests. - BOOST_CHECK(!IsEvicted( - number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { - candidate.nLastBlockTime = number_of_nodes - candidate.id; - if (candidate.id <= 7) { - candidate.fRelayTxes = false; - candidate.fRelevantServices = true; - } - }, - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, random_context)); - - // Combination of all tests above. - BOOST_CHECK(!IsEvicted( - number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { - candidate.nKeyedNetGroup = number_of_nodes - candidate.id; // 4 protected - candidate.m_min_ping_time = std::chrono::microseconds{candidate.id}; // 8 protected - candidate.nLastTXTime = number_of_nodes - candidate.id; // 4 protected - candidate.nLastBlockTime = number_of_nodes - candidate.id; // 4 protected - }, - {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, random_context)); - - // An eviction is expected given >= 29 random eviction candidates. The eviction logic protects at most - // four peers by net group, eight by lowest ping time, four by last time of novel tx, up to eight non-tx-relay - // peers by last novel block time, and four more peers by last novel block time. - if (number_of_nodes >= 29) { - BOOST_CHECK(SelectNodeToEvict(GetRandomNodeEvictionCandidates(number_of_nodes, random_context))); - } - - // No eviction is expected given <= 20 random eviction candidates. The eviction logic protects at least - // four peers by net group, eight by lowest ping time, four by last time of novel tx and four peers by last - // novel block time. - if (number_of_nodes <= 20) { - BOOST_CHECK(!SelectNodeToEvict(GetRandomNodeEvictionCandidates(number_of_nodes, random_context))); - } + for (int number_of_nodes = 0; number_of_nodes < 200; ++number_of_nodes) { + // Four nodes with the highest keyed netgroup values should be + // protected from eviction. + BOOST_CHECK(!IsEvicted( + number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { + candidate.nKeyedNetGroup = number_of_nodes - candidate.id; + }, + {0, 1, 2, 3}, random_context)); + + // Eight nodes with the lowest minimum ping time should be protected + // from eviction. + BOOST_CHECK(!IsEvicted( + number_of_nodes, [](NodeEvictionCandidate& candidate) { + candidate.m_min_ping_time = std::chrono::microseconds{candidate.id}; + }, + {0, 1, 2, 3, 4, 5, 6, 7}, random_context)); + + // Four nodes that most recently sent us novel transactions accepted + // into our mempool should be protected from eviction. + BOOST_CHECK(!IsEvicted( + number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { + candidate.nLastTXTime = number_of_nodes - candidate.id; + }, + {0, 1, 2, 3}, random_context)); + + // Up to eight non-tx-relay peers that most recently sent us novel + // blocks should be protected from eviction. + BOOST_CHECK(!IsEvicted( + number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { + candidate.nLastBlockTime = number_of_nodes - candidate.id; + if (candidate.id <= 7) { + candidate.fRelayTxes = false; + candidate.fRelevantServices = true; + } + }, + {0, 1, 2, 3, 4, 5, 6, 7}, random_context)); + + // Four peers that most recently sent us novel blocks should be + // protected from eviction. + BOOST_CHECK(!IsEvicted( + number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { + candidate.nLastBlockTime = number_of_nodes - candidate.id; + }, + {0, 1, 2, 3}, random_context)); + + // Combination of the previous two tests. + BOOST_CHECK(!IsEvicted( + number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { + candidate.nLastBlockTime = number_of_nodes - candidate.id; + if (candidate.id <= 7) { + candidate.fRelayTxes = false; + candidate.fRelevantServices = true; + } + }, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}, random_context)); + + // Combination of all tests above. + BOOST_CHECK(!IsEvicted( + number_of_nodes, [number_of_nodes](NodeEvictionCandidate& candidate) { + candidate.nKeyedNetGroup = number_of_nodes - candidate.id; // 4 protected + candidate.m_min_ping_time = std::chrono::microseconds{candidate.id}; // 8 protected + candidate.nLastTXTime = number_of_nodes - candidate.id; // 4 protected + candidate.nLastBlockTime = number_of_nodes - candidate.id; // 4 protected + }, + {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}, random_context)); + + // An eviction is expected given >= 29 random eviction candidates. The eviction logic protects at most + // four peers by net group, eight by lowest ping time, four by last time of novel tx, up to eight non-tx-relay + // peers by last novel block time, and four more peers by last novel block time. + if (number_of_nodes >= 29) { + BOOST_CHECK(SelectNodeToEvict(GetRandomNodeEvictionCandidates(number_of_nodes, random_context))); + } - // Cases left to test: - // * "If any remaining peers are preferred for eviction consider only them. [...]" - // * "Identify the network group with the most connections and youngest member. [...]" + // No eviction is expected given <= 20 random eviction candidates. The eviction logic protects at least + // four peers by net group, eight by lowest ping time, four by last time of novel tx and four peers by last + // novel block time. + if (number_of_nodes <= 20) { + BOOST_CHECK(!SelectNodeToEvict(GetRandomNodeEvictionCandidates(number_of_nodes, random_context))); } + + // Cases left to test: + // * "If any remaining peers are preferred for eviction consider only them. [...]" + // * "Identify the network group with the most connections and youngest member. [...]" } } diff --git a/src/test/txindex_tests.cpp b/src/test/txindex_tests.cpp index d47c54fd6e..3ce7ecb5f2 100644 --- a/src/test/txindex_tests.cpp +++ b/src/test/txindex_tests.cpp @@ -28,7 +28,7 @@ BOOST_FIXTURE_TEST_CASE(txindex_initial_sync, TestChain100Setup) // BlockUntilSyncedToCurrentChain should return false before txindex is started. BOOST_CHECK(!txindex.BlockUntilSyncedToCurrentChain()); - BOOST_REQUIRE(txindex.Start(::ChainstateActive())); + BOOST_REQUIRE(txindex.Start(m_node.chainman->ActiveChainstate())); // Allow tx index to catch up with the block index. constexpr int64_t timeout_ms = 10 * 1000; diff --git a/src/test/txvalidation_tests.cpp b/src/test/txvalidation_tests.cpp index 95ad85d0f8..ade9e210f2 100644 --- a/src/test/txvalidation_tests.cpp +++ b/src/test/txvalidation_tests.cpp @@ -37,7 +37,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_reject_coinbase, TestChain100Setup) LOCK(cs_main); unsigned int initialPoolSize = m_node.mempool->size(); - const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, MakeTransactionRef(coinbaseTx), + const MempoolAcceptResult result = AcceptToMemoryPool(m_node.chainman->ActiveChainstate(), *m_node.mempool, MakeTransactionRef(coinbaseTx), true /* bypass_limits */); BOOST_CHECK(result.m_result_type == MempoolAcceptResult::ResultType::INVALID); diff --git a/src/test/txvalidationcache_tests.cpp b/src/test/txvalidationcache_tests.cpp index 3244b58082..23195c0a26 100644 --- a/src/test/txvalidationcache_tests.cpp +++ b/src/test/txvalidationcache_tests.cpp @@ -31,7 +31,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) const auto ToMemPool = [this](const CMutableTransaction& tx) { LOCK(cs_main); - const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, MakeTransactionRef(tx), + const MempoolAcceptResult result = AcceptToMemoryPool(m_node.chainman->ActiveChainstate(), *m_node.mempool, MakeTransactionRef(tx), true /* bypass_limits */); return result.m_result_type == MempoolAcceptResult::ResultType::VALID; }; @@ -63,7 +63,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) block = CreateAndProcessBlock(spends, scriptPubKey); { LOCK(cs_main); - BOOST_CHECK(::ChainActive().Tip()->GetBlockHash() != block.GetHash()); + BOOST_CHECK(m_node.chainman->ActiveChain().Tip()->GetBlockHash() != block.GetHash()); } // Test 2: ... and should be rejected if spend1 is in the memory pool @@ -71,7 +71,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) block = CreateAndProcessBlock(spends, scriptPubKey); { LOCK(cs_main); - BOOST_CHECK(::ChainActive().Tip()->GetBlockHash() != block.GetHash()); + BOOST_CHECK(m_node.chainman->ActiveChain().Tip()->GetBlockHash() != block.GetHash()); } m_node.mempool->clear(); @@ -80,7 +80,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) block = CreateAndProcessBlock(spends, scriptPubKey); { LOCK(cs_main); - BOOST_CHECK(::ChainActive().Tip()->GetBlockHash() != block.GetHash()); + BOOST_CHECK(m_node.chainman->ActiveChain().Tip()->GetBlockHash() != block.GetHash()); } m_node.mempool->clear(); @@ -91,7 +91,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) block = CreateAndProcessBlock(oneSpend, scriptPubKey); { LOCK(cs_main); - BOOST_CHECK(::ChainActive().Tip()->GetBlockHash() == block.GetHash()); + BOOST_CHECK(m_node.chainman->ActiveChain().Tip()->GetBlockHash() == block.GetHash()); } // spends[1] should have been removed from the mempool when the // block with spends[0] is accepted: @@ -109,7 +109,7 @@ BOOST_FIXTURE_TEST_CASE(tx_mempool_block_doublespend, TestChain100Setup) // should fail. // Capture this interaction with the upgraded_nop argument: set it when evaluating // any script flag that is implemented as an upgraded NOP code. -static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t failing_flags, bool add_to_cache) EXCLUSIVE_LOCKS_REQUIRED(cs_main) +static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t failing_flags, bool add_to_cache, CCoinsViewCache& active_coins_tip) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { PrecomputedTransactionData txdata; // If we add many more flags, this loop can get too expensive, but we can @@ -126,7 +126,7 @@ static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t fail // WITNESS requires P2SH test_flags |= SCRIPT_VERIFY_P2SH; } - bool ret = CheckInputScripts(tx, state, &::ChainstateActive().CoinsTip(), test_flags, true, add_to_cache, txdata, nullptr); + bool ret = CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, nullptr); // CheckInputScripts should succeed iff test_flags doesn't intersect with // failing_flags bool expected_return_value = !(test_flags & failing_flags); @@ -136,13 +136,13 @@ static void ValidateCheckInputsForAllFlags(const CTransaction &tx, uint32_t fail if (ret && add_to_cache) { // Check that we get a cache hit if the tx was valid std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputScripts(tx, state, &::ChainstateActive().CoinsTip(), test_flags, true, add_to_cache, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, &scriptchecks)); BOOST_CHECK(scriptchecks.empty()); } else { // Check that we get script executions to check, if the transaction // was invalid, or we didn't add to cache. std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputScripts(tx, state, &::ChainstateActive().CoinsTip(), test_flags, true, add_to_cache, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputScripts(tx, state, &active_coins_tip, test_flags, true, add_to_cache, txdata, &scriptchecks)); BOOST_CHECK_EQUAL(scriptchecks.size(), tx.vin.size()); } } @@ -205,20 +205,20 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) TxValidationState state; PrecomputedTransactionData ptd_spend_tx; - BOOST_CHECK(!CheckInputScripts(CTransaction(spend_tx), state, &::ChainstateActive().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, nullptr)); + BOOST_CHECK(!CheckInputScripts(CTransaction(spend_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, nullptr)); // If we call again asking for scriptchecks (as happens in // ConnectBlock), we should add a script check object for this -- we're // not caching invalidity (if that changes, delete this test case). std::vector<CScriptCheck> scriptchecks; - BOOST_CHECK(CheckInputScripts(CTransaction(spend_tx), state, &::ChainstateActive().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, &scriptchecks)); + BOOST_CHECK(CheckInputScripts(CTransaction(spend_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_DERSIG, true, true, ptd_spend_tx, &scriptchecks)); BOOST_CHECK_EQUAL(scriptchecks.size(), 1U); // Test that CheckInputScripts returns true iff DERSIG-enforcing flags are // not present. Don't add these checks to the cache, so that we can // test later that block validation works fine in the absence of cached // successes. - ValidateCheckInputsForAllFlags(CTransaction(spend_tx), SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false); + ValidateCheckInputsForAllFlags(CTransaction(spend_tx), SCRIPT_VERIFY_DERSIG | SCRIPT_VERIFY_LOW_S | SCRIPT_VERIFY_STRICTENC, false, m_node.chainman->ActiveChainstate().CoinsTip()); } // And if we produce a block with this tx, it should be valid (DERSIG not @@ -227,8 +227,8 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) block = CreateAndProcessBlock({spend_tx}, p2pk_scriptPubKey); LOCK(cs_main); - BOOST_CHECK(::ChainActive().Tip()->GetBlockHash() == block.GetHash()); - BOOST_CHECK(::ChainstateActive().CoinsTip().GetBestBlock() == block.GetHash()); + BOOST_CHECK(m_node.chainman->ActiveChain().Tip()->GetBlockHash() == block.GetHash()); + BOOST_CHECK(m_node.chainman->ActiveChainstate().CoinsTip().GetBestBlock() == block.GetHash()); // Test P2SH: construct a transaction that is valid without P2SH, and // then test validity with P2SH. @@ -244,7 +244,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) std::vector<unsigned char> vchSig2(p2pk_scriptPubKey.begin(), p2pk_scriptPubKey.end()); invalid_under_p2sh_tx.vin[0].scriptSig << vchSig2; - ValidateCheckInputsForAllFlags(CTransaction(invalid_under_p2sh_tx), SCRIPT_VERIFY_P2SH, true); + ValidateCheckInputsForAllFlags(CTransaction(invalid_under_p2sh_tx), SCRIPT_VERIFY_P2SH, true, m_node.chainman->ActiveChainstate().CoinsTip()); } // Test CHECKLOCKTIMEVERIFY @@ -267,13 +267,13 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) vchSig.push_back((unsigned char)SIGHASH_ALL); invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 101; - ValidateCheckInputsForAllFlags(CTransaction(invalid_with_cltv_tx), SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true); + ValidateCheckInputsForAllFlags(CTransaction(invalid_with_cltv_tx), SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, m_node.chainman->ActiveChainstate().CoinsTip()); // Make it valid, and check again invalid_with_cltv_tx.vin[0].scriptSig = CScript() << vchSig << 100; TxValidationState state; PrecomputedTransactionData txdata; - BOOST_CHECK(CheckInputScripts(CTransaction(invalid_with_cltv_tx), state, ::ChainstateActive().CoinsTip(), SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, true, txdata, nullptr)); + BOOST_CHECK(CheckInputScripts(CTransaction(invalid_with_cltv_tx), state, m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_CHECKLOCKTIMEVERIFY, true, true, txdata, nullptr)); } // TEST CHECKSEQUENCEVERIFY @@ -295,13 +295,13 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) vchSig.push_back((unsigned char)SIGHASH_ALL); invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 101; - ValidateCheckInputsForAllFlags(CTransaction(invalid_with_csv_tx), SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true); + ValidateCheckInputsForAllFlags(CTransaction(invalid_with_csv_tx), SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, m_node.chainman->ActiveChainstate().CoinsTip()); // Make it valid, and check again invalid_with_csv_tx.vin[0].scriptSig = CScript() << vchSig << 100; TxValidationState state; PrecomputedTransactionData txdata; - BOOST_CHECK(CheckInputScripts(CTransaction(invalid_with_csv_tx), state, &::ChainstateActive().CoinsTip(), SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, true, txdata, nullptr)); + BOOST_CHECK(CheckInputScripts(CTransaction(invalid_with_csv_tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_CHECKSEQUENCEVERIFY, true, true, txdata, nullptr)); } // TODO: add tests for remaining script flags @@ -324,11 +324,11 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) UpdateInput(valid_with_witness_tx.vin[0], sigdata); // This should be valid under all script flags. - ValidateCheckInputsForAllFlags(CTransaction(valid_with_witness_tx), 0, true); + ValidateCheckInputsForAllFlags(CTransaction(valid_with_witness_tx), 0, true, m_node.chainman->ActiveChainstate().CoinsTip()); // Remove the witness, and check that it is now invalid. valid_with_witness_tx.vin[0].scriptWitness.SetNull(); - ValidateCheckInputsForAllFlags(CTransaction(valid_with_witness_tx), SCRIPT_VERIFY_WITNESS, true); + ValidateCheckInputsForAllFlags(CTransaction(valid_with_witness_tx), SCRIPT_VERIFY_WITNESS, true, m_node.chainman->ActiveChainstate().CoinsTip()); } { @@ -353,7 +353,7 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) } // This should be valid under all script flags - ValidateCheckInputsForAllFlags(CTransaction(tx), 0, true); + ValidateCheckInputsForAllFlags(CTransaction(tx), 0, true, m_node.chainman->ActiveChainstate().CoinsTip()); // Check that if the second input is invalid, but the first input is // valid, the transaction is not cached. @@ -363,12 +363,12 @@ BOOST_FIXTURE_TEST_CASE(checkinputs_test, TestChain100Setup) TxValidationState state; PrecomputedTransactionData txdata; // This transaction is now invalid under segwit, because of the second input. - BOOST_CHECK(!CheckInputScripts(CTransaction(tx), state, &::ChainstateActive().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, nullptr)); + BOOST_CHECK(!CheckInputScripts(CTransaction(tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, nullptr)); std::vector<CScriptCheck> scriptchecks; // Make sure this transaction was not cached (ie because the first // input was valid) - BOOST_CHECK(CheckInputScripts(CTransaction(tx), state, &::ChainstateActive().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, &scriptchecks)); + BOOST_CHECK(CheckInputScripts(CTransaction(tx), state, &m_node.chainman->ActiveChainstate().CoinsTip(), SCRIPT_VERIFY_P2SH | SCRIPT_VERIFY_WITNESS, true, true, txdata, &scriptchecks)); // Should get 2 script checks back -- caching is on a whole-transaction basis. BOOST_CHECK_EQUAL(scriptchecks.size(), 2U); } diff --git a/src/test/util/mining.cpp b/src/test/util/mining.cpp index 1204873828..f6a11bc02e 100644 --- a/src/test/util/mining.cpp +++ b/src/test/util/mining.cpp @@ -74,12 +74,12 @@ CTxIn MineBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey) std::shared_ptr<CBlock> PrepareBlock(const NodeContext& node, const CScript& coinbase_scriptPubKey) { auto block = std::make_shared<CBlock>( - BlockAssembler{::ChainstateActive(), *Assert(node.mempool), Params()} + BlockAssembler{Assert(node.chainman)->ActiveChainstate(), *Assert(node.mempool), Params()} .CreateNewBlock(coinbase_scriptPubKey) ->block); LOCK(cs_main); - block->nTime = ::ChainActive().Tip()->GetMedianTimePast() + 1; + block->nTime = Assert(node.chainman)->ActiveChain().Tip()->GetMedianTimePast() + 1; block->hashMerkleRoot = BlockMerkleRoot(*block); return block; diff --git a/src/test/util/net.h b/src/test/util/net.h index 71685d437a..1b49a671bd 100644 --- a/src/test/util/net.h +++ b/src/test/util/net.h @@ -6,9 +6,11 @@ #define BITCOIN_TEST_UTIL_NET_H #include <compat.h> +#include <netaddress.h> #include <net.h> #include <util/sock.h> +#include <array> #include <cassert> #include <cstring> #include <string> @@ -67,6 +69,16 @@ constexpr ConnectionType ALL_CONNECTION_TYPES[]{ ConnectionType::ADDR_FETCH, }; +constexpr auto ALL_NETWORKS = std::array{ + Network::NET_UNROUTABLE, + Network::NET_IPV4, + Network::NET_IPV6, + Network::NET_ONION, + Network::NET_I2P, + Network::NET_CJDNS, + Network::NET_INTERNAL, +}; + /** * A mocked Sock alternative that returns a statically contained data upon read and succeeds * and ignores all writes. The data to be returned is given to the constructor and when it is diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 863c3ab565..f71d9148b6 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -76,6 +76,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve : m_path_root{fs::temp_directory_path() / "test_common_" PACKAGE_NAME / g_insecure_rand_ctx_temp_path.rand256().ToString()}, m_args{} { + m_node.args = &gArgs; const std::vector<const char*> arguments = Cat( { "dummy", @@ -94,7 +95,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve gArgs.ForceSetArg("-datadir", m_path_root.string()); gArgs.ClearPathCache(); { - SetupServerArgs(m_node); + SetupServerArgs(*m_node.args); std::string error; const bool success{m_node.args->ParseParameters(arguments.size(), arguments.data(), error)}; assert(success); @@ -145,7 +146,7 @@ ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::ve m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>(); m_node.mempool = std::make_unique<CTxMemPool>(m_node.fee_estimator.get(), 1); - m_node.chainman = &::g_chainman; + m_node.chainman = std::make_unique<ChainstateManager>(); // Start script-checking threads. Set g_parallel_script_checks to true so they are used. constexpr int script_check_threads = 2; @@ -167,7 +168,7 @@ ChainTestingSetup::~ChainTestingSetup() m_node.mempool.reset(); m_node.scheduler.reset(); m_node.chainman->Reset(); - m_node.chainman = nullptr; + m_node.chainman.reset(); pblocktree.reset(); } @@ -180,22 +181,22 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const RegisterAllCoreRPCCommands(tableRPC); m_node.chainman->InitializeChainstate(*m_node.mempool); - ::ChainstateActive().InitCoinsDB( + m_node.chainman->ActiveChainstate().InitCoinsDB( /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false); - assert(!::ChainstateActive().CanFlushToDisk()); - ::ChainstateActive().InitCoinsCache(1 << 23); - assert(::ChainstateActive().CanFlushToDisk()); - if (!::ChainstateActive().LoadGenesisBlock(chainparams)) { + assert(!m_node.chainman->ActiveChainstate().CanFlushToDisk()); + m_node.chainman->ActiveChainstate().InitCoinsCache(1 << 23); + assert(m_node.chainman->ActiveChainstate().CanFlushToDisk()); + if (!m_node.chainman->ActiveChainstate().LoadGenesisBlock(chainparams)) { throw std::runtime_error("LoadGenesisBlock failed."); } BlockValidationState state; - if (!::ChainstateActive().ActivateBestChain(state, chainparams)) { + if (!m_node.chainman->ActiveChainstate().ActivateBestChain(state, chainparams)) { throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString())); } m_node.addrman = std::make_unique<CAddrMan>(); - m_node.banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME); + m_node.banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); m_node.connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman); // Deterministic randomness for tests. m_node.peerman = PeerManager::make(chainparams, *m_node.connman, *m_node.addrman, m_node.banman.get(), *m_node.scheduler, *m_node.chainman, @@ -240,7 +241,7 @@ CBlock TestChain100Setup::CreateAndProcessBlock(const std::vector<CMutableTransa { const CChainParams& chainparams = Params(); CTxMemPool empty_pool; - CBlock block = BlockAssembler(::ChainstateActive(), empty_pool, chainparams).CreateNewBlock(scriptPubKey)->block; + CBlock block = BlockAssembler(m_node.chainman->ActiveChainstate(), empty_pool, chainparams).CreateNewBlock(scriptPubKey)->block; Assert(block.vtx.size() == 1); for (const CMutableTransaction& tx : txns) { @@ -299,7 +300,7 @@ CMutableTransaction TestChain100Setup::CreateValidMempoolTransaction(CTransactio // If submit=true, add transaction to the mempool. if (submit) { LOCK(cs_main); - const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool.get(), MakeTransactionRef(mempool_txn), /* bypass_limits */ false); + const MempoolAcceptResult result = AcceptToMemoryPool(m_node.chainman->ActiveChainstate(), *m_node.mempool.get(), MakeTransactionRef(mempool_txn), /* bypass_limits */ false); assert(result.m_result_type == MempoolAcceptResult::ResultType::VALID); } diff --git a/src/test/validation_block_tests.cpp b/src/test/validation_block_tests.cpp index 552be0a2da..e0bc10d660 100644 --- a/src/test/validation_block_tests.cpp +++ b/src/test/validation_block_tests.cpp @@ -84,8 +84,8 @@ std::shared_ptr<CBlock> MinerTestingSetup::Block(const uint256& prev_hash) std::shared_ptr<CBlock> MinerTestingSetup::FinalizeBlock(std::shared_ptr<CBlock> pblock) { - LOCK(cs_main); // For g_chainman.m_blockman.LookupBlockIndex - GenerateCoinbaseCommitment(*pblock, g_chainman.m_blockman.LookupBlockIndex(pblock->hashPrevBlock), Params().GetConsensus()); + LOCK(cs_main); // For m_node.chainman->m_blockman.LookupBlockIndex + GenerateCoinbaseCommitment(*pblock, m_node.chainman->m_blockman.LookupBlockIndex(pblock->hashPrevBlock), Params().GetConsensus()); pblock->hashMerkleRoot = BlockMerkleRoot(*pblock); @@ -162,7 +162,7 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering) const CBlockIndex* initial_tip = nullptr; { LOCK(cs_main); - initial_tip = ::ChainActive().Tip(); + initial_tip = m_node.chainman->ActiveChain().Tip(); } auto sub = std::make_shared<TestSubscriber>(initial_tip->GetBlockHash()); RegisterSharedValidationInterface(sub); @@ -198,7 +198,7 @@ BOOST_AUTO_TEST_CASE(processnewblock_signals_ordering) UnregisterSharedValidationInterface(sub); LOCK(cs_main); - BOOST_CHECK_EQUAL(sub->m_expected_tip, ::ChainActive().Tip()->GetBlockHash()); + BOOST_CHECK_EQUAL(sub->m_expected_tip, m_node.chainman->ActiveChain().Tip()->GetBlockHash()); } /** @@ -232,7 +232,7 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) // Run the test multiple times for (int test_runs = 3; test_runs > 0; --test_runs) { - BOOST_CHECK_EQUAL(last_mined->GetHash(), ::ChainActive().Tip()->GetBlockHash()); + BOOST_CHECK_EQUAL(last_mined->GetHash(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); // Later on split from here const uint256 split_hash{last_mined->hashPrevBlock}; @@ -273,7 +273,7 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) { LOCK(cs_main); for (const auto& tx : txs) { - const MempoolAcceptResult result = AcceptToMemoryPool(::ChainstateActive(), *m_node.mempool, tx, false /* bypass_limits */); + const MempoolAcceptResult result = AcceptToMemoryPool(m_node.chainman->ActiveChainstate(), *m_node.mempool, tx, false /* bypass_limits */); BOOST_REQUIRE(result.m_result_type == MempoolAcceptResult::ResultType::VALID); } } @@ -306,7 +306,7 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) } LOCK(cs_main); // We are done with the reorg, so the tip must have changed - assert(tip_init != ::ChainActive().Tip()->GetBlockHash()); + assert(tip_init != m_node.chainman->ActiveChain().Tip()->GetBlockHash()); }}; // Submit the reorg in this thread to invalidate and remove the txs from the tx pool @@ -314,7 +314,7 @@ BOOST_AUTO_TEST_CASE(mempool_locks_reorg) ProcessBlock(b); } // Check that the reorg was eventually successful - BOOST_CHECK_EQUAL(last_mined->GetHash(), ::ChainActive().Tip()->GetBlockHash()); + BOOST_CHECK_EQUAL(last_mined->GetHash(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); // We can join the other thread, which returns when the reorg was successful rpc_thread.join(); diff --git a/src/txdb.cpp b/src/txdb.cpp index 762f71feb1..4b76bee5ab 100644 --- a/src/txdb.cpp +++ b/src/txdb.cpp @@ -168,9 +168,34 @@ bool CBlockTreeDB::ReadLastBlockFile(int &nFile) { return Read(DB_LAST_BLOCK, nFile); } -CCoinsViewCursor *CCoinsViewDB::Cursor() const +/** Specialization of CCoinsViewCursor to iterate over a CCoinsViewDB */ +class CCoinsViewDBCursor: public CCoinsViewCursor { - CCoinsViewDBCursor *i = new CCoinsViewDBCursor(const_cast<CDBWrapper&>(*m_db).NewIterator(), GetBestBlock()); +public: + // Prefer using CCoinsViewDB::Cursor() since we want to perform some + // cache warmup on instantiation. + CCoinsViewDBCursor(CDBIterator* pcursorIn, const uint256&hashBlockIn): + CCoinsViewCursor(hashBlockIn), pcursor(pcursorIn) {} + ~CCoinsViewDBCursor() {} + + bool GetKey(COutPoint &key) const override; + bool GetValue(Coin &coin) const override; + unsigned int GetValueSize() const override; + + bool Valid() const override; + void Next() override; + +private: + std::unique_ptr<CDBIterator> pcursor; + std::pair<char, COutPoint> keyTmp; + + friend class CCoinsViewDB; +}; + +std::unique_ptr<CCoinsViewCursor> CCoinsViewDB::Cursor() const +{ + auto i = std::make_unique<CCoinsViewDBCursor>( + const_cast<CDBWrapper&>(*m_db).NewIterator(), GetBestBlock()); /* It seems that there are no "const iterators" for LevelDB. Since we only need read operations on it, use a const-cast to get around that restriction. */ diff --git a/src/txdb.h b/src/txdb.h index 0cf7e2f1b8..845d80788f 100644 --- a/src/txdb.h +++ b/src/txdb.h @@ -60,7 +60,7 @@ public: uint256 GetBestBlock() const override; std::vector<uint256> GetHeadBlocks() const override; bool BatchWrite(CCoinsMap &mapCoins, const uint256 &hashBlock) override; - CCoinsViewCursor *Cursor() const override; + std::unique_ptr<CCoinsViewCursor> Cursor() const override; //! Attempt to update from an older database format. Returns whether an error occurred. bool Upgrade(); @@ -70,28 +70,6 @@ public: void ResizeCache(size_t new_cache_size) EXCLUSIVE_LOCKS_REQUIRED(cs_main); }; -/** Specialization of CCoinsViewCursor to iterate over a CCoinsViewDB */ -class CCoinsViewDBCursor: public CCoinsViewCursor -{ -public: - ~CCoinsViewDBCursor() {} - - bool GetKey(COutPoint &key) const override; - bool GetValue(Coin &coin) const override; - unsigned int GetValueSize() const override; - - bool Valid() const override; - void Next() override; - -private: - CCoinsViewDBCursor(CDBIterator* pcursorIn, const uint256 &hashBlockIn): - CCoinsViewCursor(hashBlockIn), pcursor(pcursorIn) {} - std::unique_ptr<CDBIterator> pcursor; - std::pair<char, COutPoint> keyTmp; - - friend class CCoinsViewDB; -}; - /** Access to the block database (blocks/index/) */ class CBlockTreeDB : public CDBWrapper { diff --git a/src/txmempool.cpp b/src/txmempool.cpp index 4413da7ea7..c5a4bbf1b0 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -513,11 +513,10 @@ void CTxMemPool::removeForReorg(CChainState& active_chainstate, int flags) for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { const CTransaction& tx = it->GetTx(); LockPoints lp = it->GetLockPoints(); - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); bool validLP = TestLockPointValidity(active_chainstate.m_chain, &lp); - CCoinsViewMemPool viewMempool(&active_chainstate.CoinsTip(), *this); + CCoinsViewMemPool view_mempool(&active_chainstate.CoinsTip(), *this); if (!CheckFinalTx(active_chainstate.m_chain.Tip(), tx, flags) - || !CheckSequenceLocks(active_chainstate.m_chain.Tip(), viewMempool, tx, flags, &lp, validLP)) { + || !CheckSequenceLocks(active_chainstate.m_chain.Tip(), view_mempool, tx, flags, &lp, validLP)) { // Note if CheckSequenceLocks fails the LockPoints may still be invalid // So it's critical that we remove the tx and not depend on the LockPoints. txToRemove.insert(it); @@ -638,10 +637,8 @@ void CTxMemPool::check(CChainState& active_chainstate) const uint64_t innerUsage = 0; CCoinsViewCache& active_coins_tip = active_chainstate.CoinsTip(); - assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(active_coins_tip)); // TODO: REVIEW-ONLY, REMOVE IN FUTURE COMMIT CCoinsViewCache mempoolDuplicate(const_cast<CCoinsViewCache*>(&active_coins_tip)); const int64_t spendheight = active_chainstate.m_chain.Height() + 1; - assert(g_chainman.m_blockman.GetSpendHeight(mempoolDuplicate) == spendheight); // TODO: REVIEW-ONLY, REMOVE IN FUTURE COMMIT std::list<const CTxMemPoolEntry*> waitingOnDependants; for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) { diff --git a/src/txmempool.h b/src/txmempool.h index 46b89049bb..ae4b16d377 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -874,7 +874,8 @@ protected: public: CCoinsViewMemPool(CCoinsView* baseIn, const CTxMemPool& mempoolIn); bool GetCoin(const COutPoint &outpoint, Coin &coin) const override; - /** Add the coins created by this transaction. */ + /** Add the coins created by this transaction. These coins are only temporarily stored in + * m_temp_added and cannot be flushed to the back end. Only used for package validation. */ void PackageAddTransaction(const CTransactionRef& tx); }; diff --git a/src/util/system.cpp b/src/util/system.cpp index 13ccf7463e..ee7a76be29 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -620,14 +620,14 @@ void ArgsManager::ForceSetArg(const std::string& strArg, const std::string& strV m_settings.forced_settings[SettingName(strArg)] = strValue; } -void ArgsManager::AddCommand(const std::string& cmd, const std::string& help, const OptionsCategory& cat) +void ArgsManager::AddCommand(const std::string& cmd, const std::string& help) { Assert(cmd.find('=') == std::string::npos); Assert(cmd.at(0) != '-'); LOCK(cs_args); m_accept_any_command = false; // latch to false - std::map<std::string, Arg>& arg_map = m_available_args[cat]; + std::map<std::string, Arg>& arg_map = m_available_args[OptionsCategory::COMMANDS]; auto ret = arg_map.emplace(cmd, Arg{"", help, ArgsManager::COMMAND}); Assert(ret.second); // Fail on duplicate commands } @@ -1243,9 +1243,9 @@ void runCommand(const std::string& strCommand) } #endif -#ifdef ENABLE_EXTERNAL_SIGNER UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in) { +#ifdef ENABLE_EXTERNAL_SIGNER namespace bp = boost::process; UniValue result_json; @@ -1277,8 +1277,10 @@ UniValue RunCommandParseJSON(const std::string& str_command, const std::string& if (!result_json.read(result)) throw std::runtime_error("Unable to parse JSON: " + result); return result_json; -} +#else + throw std::runtime_error("Compiled without external signing support (required for external signing)."); #endif // ENABLE_EXTERNAL_SIGNER +} void SetupEnvironment() { diff --git a/src/util/system.h b/src/util/system.h index c4317c62d0..3547bad585 100644 --- a/src/util/system.h +++ b/src/util/system.h @@ -102,7 +102,6 @@ std::string ShellEscape(const std::string& arg); #if HAVE_SYSTEM void runCommand(const std::string& strCommand); #endif -#ifdef ENABLE_EXTERNAL_SIGNER /** * Execute a command which returns JSON, and parse the result. * @@ -111,7 +110,6 @@ void runCommand(const std::string& strCommand); * @return parsed JSON */ UniValue RunCommandParseJSON(const std::string& str_command, const std::string& str_std_in=""); -#endif // ENABLE_EXTERNAL_SIGNER /** * Most paths passed as configuration arguments are treated as relative to @@ -376,7 +374,7 @@ public: /** * Add subcommand */ - void AddCommand(const std::string& cmd, const std::string& help, const OptionsCategory& cat); + void AddCommand(const std::string& cmd, const std::string& help); /** * Add many hidden arguments diff --git a/src/validation.cpp b/src/validation.cpp index dceceefbdc..fe4de0d218 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -101,21 +101,6 @@ bool CBlockIndexWorkComparator::operator()(const CBlockIndex *pa, const CBlockIn return false; } -ChainstateManager g_chainman; - -CChainState& ChainstateActive() -{ - LOCK(::cs_main); - assert(g_chainman.m_active_chainstate); - return *g_chainman.m_active_chainstate; -} - -CChain& ChainActive() -{ - LOCK(::cs_main); - return ::ChainstateActive().m_chain; -} - /** * Mutex to guard access to validation specific variables, such as reading * or changing the chainstate. @@ -161,7 +146,6 @@ void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false); CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const { AssertLockHeld(cs_main); - assert(std::addressof(g_chainman.BlockIndex()) == std::addressof(m_block_index)); BlockMap::const_iterator it = m_block_index.find(hash); return it == m_block_index.end() ? nullptr : it->second; } @@ -170,7 +154,6 @@ CBlockIndex* BlockManager::FindForkInGlobalIndex(const CChain& chain, const CBlo { AssertLockHeld(cs_main); - assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this)); // Find the latest block common to locator and chain - we expect that // locator.vHave is sorted descending by height. for (const uint256& hash : locator.vHave) { @@ -198,7 +181,6 @@ bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, i { AssertLockHeld(cs_main); assert(active_chain_tip); // TODO: Make active_chain_tip a reference - assert(std::addressof(*::ChainActive().Tip()) == std::addressof(*active_chain_tip)); // By convention a negative value for flags indicates that the // current network-enforced consensus rules should be used. In @@ -237,7 +219,6 @@ bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp) if (lp->maxInputBlock) { // Check whether ::ChainActive() is an extension of the block at which the LockPoints // calculation was valid. If not LockPoints are no longer valid - assert(std::addressof(::ChainActive()) == std::addressof(active_chain)); if (!active_chain.Contains(lp->maxInputBlock)) { return false; } @@ -331,7 +312,6 @@ static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, siz std::vector<COutPoint> vNoSpendsRemaining; pool.TrimToSize(limit, &vNoSpendsRemaining); - assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_cache)); for (const COutPoint& removed : vNoSpendsRemaining) coins_cache.Uncache(removed); } @@ -339,7 +319,6 @@ static void LimitMempoolSize(CTxMemPool& pool, CCoinsViewCache& coins_cache, siz static bool IsCurrentForFeeEstimation(CChainState& active_chainstate) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { AssertLockHeld(cs_main); - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); if (active_chainstate.IsInitialBlockDownload()) return false; if (active_chainstate.m_chain.Tip()->GetBlockTime() < count_seconds(GetTime<std::chrono::seconds>() - MAX_FEE_ESTIMATION_TIP_AGE)) @@ -366,7 +345,6 @@ static void UpdateMempoolForReorg(CChainState& active_chainstate, CTxMemPool& me { AssertLockHeld(cs_main); AssertLockHeld(mempool.cs); - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); std::vector<uint256> vHashUpdate; // disconnectpool's insertion_order index sorts the entries from // oldest to newest, but the oldest entry will be the last tx from the @@ -433,7 +411,6 @@ static bool CheckInputsFromMempoolAndCache(const CTransaction& tx, TxValidationS assert(txFrom->vout.size() > txin.prevout.n); assert(txFrom->vout[txin.prevout.n] == coin.out); } else { - assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(coins_tip)); const Coin& coinFromUTXOSet = coins_tip.AccessCoin(txin.prevout); assert(!coinFromUTXOSet.IsSpent()); assert(coinFromUTXOSet.out == coin.out); @@ -454,7 +431,6 @@ public: m_limit_ancestor_size(gArgs.GetArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT)*1000), m_limit_descendants(gArgs.GetArg("-limitdescendantcount", DEFAULT_DESCENDANT_LIMIT)), m_limit_descendant_size(gArgs.GetArg("-limitdescendantsize", DEFAULT_DESCENDANT_SIZE_LIMIT)*1000) { - assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); } // We put the arguments we're handed into a struct, so we can pass them @@ -472,8 +448,10 @@ public: */ std::vector<COutPoint>& m_coins_to_uncache; const bool m_test_accept; - /** Disable BIP125 RBFing; disallow all conflicts with mempool transactions. */ - const bool disallow_mempool_conflicts; + /** Whether we allow transactions to replace mempool transactions by BIP125 rules. If false, + * any transaction spending the same inputs as a transaction in the mempool is considered + * a conflict. */ + const bool m_allow_bip125_replacement{true}; }; // Single transaction acceptance @@ -482,7 +460,7 @@ public: /** * Multiple transaction acceptance. Transactions may or may not be interdependent, * but must not conflict with each other. Parents must come before children if any - * dependencies exist, otherwise a TX_MISSING_INPUTS error will be returned. + * dependencies exist. */ PackageMempoolAcceptResult AcceptMultipleTransactions(const std::vector<CTransactionRef>& txns, ATMPArgs& args) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -605,7 +583,6 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // Only accept nLockTime-using transactions that can be mined in the next // block; we don't want our mempool filled up with transactions that can't // be mined yet. - assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); if (!CheckFinalTx(m_active_chainstate.m_chain.Tip(), tx, STANDARD_LOCKTIME_VERIFY_FLAGS)) return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-final"); @@ -619,6 +596,10 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) { const CTransaction* ptxConflicting = m_pool.GetConflictTx(txin.prevout); if (ptxConflicting) { + if (!args.m_allow_bip125_replacement) { + // Transaction conflicts with a mempool tx, but we're not allowing replacements. + return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "bip125-replacement-disallowed"); + } if (!setConflicts.count(ptxConflicting->GetHash())) { // Allow opt-out of transaction replacement by setting @@ -645,7 +626,7 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) break; } } - if (fReplacementOptOut || args.disallow_mempool_conflicts) { + if (fReplacementOptOut) { return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "txn-mempool-conflict"); } @@ -657,7 +638,6 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) LockPoints lp; m_view.SetBackend(m_viewmempool); - assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); const CCoinsViewCache& coins_cache = m_active_chainstate.CoinsTip(); // do all inputs exist? for (const CTxIn& txin : tx.vin) { @@ -695,18 +675,15 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) // be mined yet. // Pass in m_view which has all of the relevant inputs cached. Note that, since m_view's // backend was removed, it no longer pulls coins from the mempool. - assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); if (!CheckSequenceLocks(m_active_chainstate.m_chain.Tip(), m_view, tx, STANDARD_LOCKTIME_VERIFY_FLAGS, &lp)) return state.Invalid(TxValidationResult::TX_PREMATURE_SPEND, "non-BIP68-final"); - assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_active_chainstate.m_blockman)); if (!Consensus::CheckTxInputs(tx, state, m_view, m_active_chainstate.m_blockman.GetSpendHeight(m_view), ws.m_base_fees)) { return false; // state filled in by CheckTxInputs } // Check for non-standard pay-to-script-hash in inputs const auto& params = args.m_chainparams.GetConsensus(); - assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); auto taproot_state = VersionBitsState(m_active_chainstate.m_chain.Tip(), params, Consensus::DEPLOYMENT_TAPROOT, versionbitscache); if (fRequireStandard && !AreInputsStandard(tx, m_view, taproot_state == ThresholdState::ACTIVE)) { return state.Invalid(TxValidationResult::TX_INPUTS_NOT_STANDARD, "bad-txns-nonstandard-inputs"); @@ -733,7 +710,6 @@ bool MemPoolAccept::PreChecks(ATMPArgs& args, Workspace& ws) } } - assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); entry.reset(new CTxMemPoolEntry(ptx, ws.m_base_fees, nAcceptTime, m_active_chainstate.m_chain.Height(), fSpendsCoinbase, nSigOpsCost, lp)); unsigned int nSize = entry->GetTxSize(); @@ -986,9 +962,7 @@ bool MemPoolAccept::ConsensusScriptChecks(const ATMPArgs& args, Workspace& ws, P // There is a similar check in CreateNewBlock() to prevent creating // invalid blocks (using TestBlockValidity), however allowing such // transactions into the mempool can be exploited as a DoS attack. - assert(std::addressof(::ChainActive()) == std::addressof(m_active_chainstate.m_chain)); unsigned int currentBlockScriptVerifyFlags = GetBlockScriptFlags(m_active_chainstate.m_chain.Tip(), chainparams.GetConsensus()); - assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); if (!CheckInputsFromMempoolAndCache(tx, state, m_view, m_pool, currentBlockScriptVerifyFlags, txdata, m_active_chainstate.CoinsTip())) { return error("%s: BUG! PLEASE REPORT THIS! CheckInputScripts failed against latest-block but not STANDARD flags %s, %s", __func__, hash.ToString(), state.ToString()); @@ -1029,7 +1003,6 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws) // - it's not being re-added during a reorg which bypasses typical mempool fee limits // - the node is not behind // - the transaction is not dependent on any other transactions in the mempool - assert(std::addressof(::ChainstateActive()) == std::addressof(m_active_chainstate)); bool validForFeeEstimation = !fReplacementTransaction && !bypass_limits && IsCurrentForFeeEstimation(m_active_chainstate) && m_pool.HasNoInputsOf(tx); // Store transaction in memory @@ -1037,7 +1010,6 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws) // trim mempool and check if tx was trimmed if (!bypass_limits) { - assert(std::addressof(::ChainstateActive().CoinsTip()) == std::addressof(m_active_chainstate.CoinsTip())); LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); if (!m_pool.exists(hash)) return state.Invalid(TxValidationResult::TX_MEMPOOL_POLICY, "mempool full"); @@ -1080,65 +1052,15 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: { AssertLockHeld(cs_main); + // These context-free package limits can be done before taking the mempool lock. PackageValidationState package_state; - const unsigned int package_count = txns.size(); - - // These context-free package limits can be checked before taking the mempool lock. - if (package_count > MAX_PACKAGE_COUNT) { - package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-many-transactions"); - return PackageMempoolAcceptResult(package_state, {}); - } + if (!CheckPackage(txns, package_state)) return PackageMempoolAcceptResult(package_state, {}); - const int64_t total_size = std::accumulate(txns.cbegin(), txns.cend(), 0, - [](int64_t sum, const auto& tx) { return sum + GetVirtualTransactionSize(*tx); }); - // If the package only contains 1 tx, it's better to report the policy violation on individual tx size. - if (package_count > 1 && total_size > MAX_PACKAGE_SIZE * 1000) { - package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-too-large"); - return PackageMempoolAcceptResult(package_state, {}); - } - - // Construct workspaces and check package policies. std::vector<Workspace> workspaces{}; - workspaces.reserve(package_count); - { - std::unordered_set<uint256, SaltedTxidHasher> later_txids; - std::transform(txns.cbegin(), txns.cend(), std::inserter(later_txids, later_txids.end()), - [](const auto& tx) { return tx->GetHash(); }); - // Require the package to be sorted in order of dependency, i.e. parents appear before children. - // An unsorted package will fail anyway on missing-inputs, but it's better to quit earlier and - // fail on something less ambiguous (missing-inputs could also be an orphan or trying to - // spend nonexistent coins). - for (const auto& tx : txns) { - for (const auto& input : tx->vin) { - if (later_txids.find(input.prevout.hash) != later_txids.end()) { - // The parent is a subsequent transaction in the package. - package_state.Invalid(PackageValidationResult::PCKG_POLICY, "package-not-sorted"); - return PackageMempoolAcceptResult(package_state, {}); - } - } - later_txids.erase(tx->GetHash()); - workspaces.emplace_back(Workspace(tx)); - } - } + workspaces.reserve(txns.size()); + std::transform(txns.cbegin(), txns.cend(), std::back_inserter(workspaces), + [](const auto& tx) { return Workspace(tx); }); std::map<const uint256, const MempoolAcceptResult> results; - { - // Don't allow any conflicting transactions, i.e. spending the same inputs, in a package. - std::unordered_set<COutPoint, SaltedOutpointHasher> inputs_seen; - for (const auto& tx : txns) { - for (const auto& input : tx->vin) { - if (inputs_seen.find(input.prevout) != inputs_seen.end()) { - // This input is also present in another tx in the package. - package_state.Invalid(PackageValidationResult::PCKG_POLICY, "conflict-in-package"); - return PackageMempoolAcceptResult(package_state, {}); - } - } - // Batch-add all the inputs for a tx at a time. If we added them 1 at a time, we could - // catch duplicate inputs within a single tx. This is a more severe, consensus error, - // and we want to report that from CheckTransaction instead. - std::transform(tx->vin.cbegin(), tx->vin.cend(), std::inserter(inputs_seen, inputs_seen.end()), - [](const auto& input) { return input.prevout; }); - } - } LOCK(m_pool.cs); @@ -1151,10 +1073,10 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: return PackageMempoolAcceptResult(package_state, std::move(results)); } // Make the coins created by this transaction available for subsequent transactions in the - // package to spend. Since we already checked conflicts in the package and RBFs are - // impossible, we don't need to track the coins spent. Note that this logic will need to be - // updated if RBFs in packages are allowed in the future. - assert(args.disallow_mempool_conflicts); + // package to spend. Since we already checked conflicts in the package and we don't allow + // replacements, we don't need to track the coins spent. Note that this logic will need to be + // updated if package replace-by-fee is allowed in the future. + assert(!args.m_allow_bip125_replacement); m_viewmempool.PackageAddTransaction(ws.m_ptx); } @@ -1188,9 +1110,8 @@ static MempoolAcceptResult AcceptToMemoryPoolWithTime(const CChainParams& chainp { std::vector<COutPoint> coins_to_uncache; MemPoolAccept::ATMPArgs args { chainparams, nAcceptTime, bypass_limits, coins_to_uncache, - test_accept, /* disallow_mempool_conflicts */ false }; + test_accept, /* m_allow_bip125_replacement */ true }; - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); const MempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptSingleTransaction(tx, args); if (result.m_result_type != MempoolAcceptResult::ResultType::VALID) { // Remove coins that were not present in the coins cache before calling @@ -1210,7 +1131,6 @@ static MempoolAcceptResult AcceptToMemoryPoolWithTime(const CChainParams& chainp MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPool& pool, const CTransactionRef& tx, bool bypass_limits, bool test_accept) { - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); return AcceptToMemoryPoolWithTime(Params(), pool, active_chainstate, tx, GetTime(), bypass_limits, test_accept); } @@ -1225,12 +1145,10 @@ PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTx std::vector<COutPoint> coins_to_uncache; const CChainParams& chainparams = Params(); MemPoolAccept::ATMPArgs args { chainparams, GetTime(), /* bypass_limits */ false, coins_to_uncache, - test_accept, /* disallow_mempool_conflicts */ true }; - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); + test_accept, /* m_allow_bip125_replacement */ false }; const PackageMempoolAcceptResult result = MemPoolAccept(pool, active_chainstate).AcceptMultipleTransactions(package, args); // Uncache coins pertaining to transactions that were not submitted to the mempool. - // Ensure the cache is still within its size limits. for (const COutPoint& hashTx : coins_to_uncache) { active_chainstate.CoinsTip().Uncache(hashTx); } @@ -1366,7 +1284,6 @@ static void AlertNotify(const std::string& strMessage) void CChainState::CheckForkWarningConditions() { AssertLockHeld(cs_main); - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); // Before we get past initial download, we cannot reliably alert about forks // (we assume we don't get stuck on a fork before finishing our initial sync) @@ -1385,7 +1302,6 @@ void CChainState::CheckForkWarningConditions() // Called both upon regular invalid block discovery *and* InvalidateBlock void CChainState::InvalidChainFound(CBlockIndex* pindexNew) { - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); if (!pindexBestInvalid || pindexNew->nChainWork > pindexBestInvalid->nChainWork) pindexBestInvalid = pindexNew; if (pindexBestHeader != nullptr && pindexBestHeader->GetAncestor(pindexNew->nHeight) == pindexNew) { @@ -1446,7 +1362,6 @@ bool CScriptCheck::operator()() { int BlockManager::GetSpendHeight(const CCoinsViewCache& inputs) { AssertLockHeld(cs_main); - assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this)); CBlockIndex* pindexPrev = LookupBlockIndex(inputs.GetBestBlock()); return pindexPrev->nHeight + 1; } @@ -2330,14 +2245,13 @@ static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const C } bilingual_str warning_messages; - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); if (!active_chainstate.IsInitialBlockDownload()) { const CBlockIndex* pindex = pindexNew; for (int bit = 0; bit < VERSIONBITS_NUM_BITS; bit++) { WarningBitsConditionChecker checker(bit); ThresholdState state = checker.GetStateFor(pindex, chainParams.GetConsensus(), warningcache[bit]); if (state == ThresholdState::ACTIVE || state == ThresholdState::LOCKED_IN) { - const bilingual_str warning = strprintf(_("Warning: unknown new rules activated (versionbit %i)"), bit); + const bilingual_str warning = strprintf(_("Unknown new rules activated (versionbit %i)"), bit); if (state == ThresholdState::ACTIVE) { DoWarning(warning); } else { @@ -2346,7 +2260,6 @@ static void UpdateTip(CTxMemPool& mempool, const CBlockIndex* pindexNew, const C } } } - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); LogPrintf("%s: new best=%s height=%d version=0x%08x log2_work=%f tx=%lu date='%s' progress=%f cache=%.1fMiB(%utxo)%s\n", __func__, pindexNew->GetBlockHash().ToString(), pindexNew->nHeight, pindexNew->nVersion, log(pindexNew->nChainWork.getdouble())/log(2.0), (unsigned long)pindexNew->nChainTx, @@ -2606,7 +2519,6 @@ bool CChainState::ActivateBestChainStep(BlockValidationState& state, const CChai { AssertLockHeld(cs_main); AssertLockHeld(m_mempool.cs); - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); const CBlockIndex* pindexOldTip = m_chain.Tip(); const CBlockIndex* pindexFork = m_chain.FindFork(pindexMostWork); @@ -2706,7 +2618,6 @@ static bool NotifyHeaderTip(CChainState& chainstate) LOCKS_EXCLUDED(cs_main) { if (pindexHeader != pindexHeaderOld) { fNotify = true; - assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate)); fInitialBlockDownload = chainstate.IsInitialBlockDownload(); pindexHeaderOld = pindexHeader; } @@ -2917,7 +2828,6 @@ bool CChainState::InvalidateBlock(BlockValidationState& state, const CChainParam // transactions back to the mempool if disconnecting was successful, // and we're not doing a very deep invalidation (in which case // keeping the mempool up to date is probably futile anyway). - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); UpdateMempoolForReorg(*this, m_mempool, disconnectpool, /* fAddToMempool = */ (++disconnected <= 10) && ret); if (!ret) return false; assert(invalid_walk_tip->pprev == m_chain.Tip()); @@ -3248,7 +3158,6 @@ CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data) for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) { const uint256& hash = i.second; - assert(std::addressof(g_chainman.m_blockman) == std::addressof(*this)); CBlockIndex* pindex = LookupBlockIndex(hash); if (pindex) { return pindex; @@ -3281,7 +3190,6 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, BlockValidatio // Don't accept any forks from the main chain prior to last checkpoint. // GetLastCheckpoint finds the last checkpoint in MapCheckpoints that's in our // BlockIndex(). - assert(std::addressof(g_chainman.m_blockman) == std::addressof(blockman)); CBlockIndex* pcheckpoint = blockman.GetLastCheckpoint(params.Checkpoints()); if (pcheckpoint && nHeight < pcheckpoint->nHeight) { LogPrintf("ERROR: %s: forked chain older than last checkpoint (height %d)\n", __func__, nHeight); @@ -3485,7 +3393,6 @@ bool BlockManager::AcceptBlockHeader(const CBlockHeader& block, BlockValidationS // Exposed wrapper for AcceptBlockHeader bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& headers, BlockValidationState& state, const CChainParams& chainparams, const CBlockIndex** ppindex) { - assert(std::addressof(::ChainstateActive()) == std::addressof(ActiveChainstate())); AssertLockNotHeld(cs_main); { LOCK(cs_main); @@ -3576,7 +3483,6 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block // Write block to history file if (fNewBlock) *fNewBlock = true; - assert(std::addressof(::ChainActive()) == std::addressof(m_chain)); try { FlatFilePos blockPos = SaveBlockToDisk(block, pindex->nHeight, m_chain, chainparams, dbp); if (blockPos.IsNull()) { @@ -3598,7 +3504,6 @@ bool CChainState::AcceptBlock(const std::shared_ptr<const CBlock>& pblock, Block bool ChainstateManager::ProcessNewBlock(const CChainParams& chainparams, const std::shared_ptr<const CBlock>& block, bool force_processing, bool* new_block) { AssertLockNotHeld(cs_main); - assert(std::addressof(::ChainstateActive()) == std::addressof(ActiveChainstate())); { CBlockIndex *pindex = nullptr; @@ -3643,7 +3548,6 @@ bool TestBlockValidity(BlockValidationState& state, bool fCheckMerkleRoot) { AssertLockHeld(cs_main); - assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate)); assert(pindexPrev && pindexPrev == chainstate.m_chain.Tip()); CCoinsViewCache viewNew(&chainstate.CoinsTip()); uint256 block_hash(block.GetHash()); @@ -3653,7 +3557,6 @@ bool TestBlockValidity(BlockValidationState& state, indexDummy.phashBlock = &block_hash; // NOTE: CheckBlockHeader is called by CheckBlock - assert(std::addressof(g_chainman.m_blockman) == std::addressof(chainstate.m_blockman)); if (!ContextualCheckBlockHeader(block, state, chainstate.m_blockman, chainparams, pindexPrev, GetAdjustedTime())) return error("%s: Consensus::ContextualCheckBlockHeader: %s", __func__, state.ToString()); if (!CheckBlock(block, state, chainparams.GetConsensus(), fCheckPOW, fCheckMerkleRoot)) @@ -3733,7 +3636,6 @@ void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeigh { BlockValidationState state; const CChainParams& chainparams = Params(); - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); if (!active_chainstate.FlushStateToDisk( chainparams, state, FlushStateMode::NONE, nManualPruneHeight)) { LogPrintf("%s: failed to flush state (%s)\n", __func__, state.ToString()); @@ -3887,7 +3789,6 @@ void BlockManager::Unload() { bool CChainState::LoadBlockIndexDB(const CChainParams& chainparams) { - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); if (!m_blockman.LoadBlockIndex( chainparams.GetConsensus(), *pblocktree, setBlockIndexCandidates)) { @@ -3944,7 +3845,6 @@ bool CChainState::LoadBlockIndexDB(const CChainParams& chainparams) void CChainState::LoadMempool(const ArgsManager& args) { if (args.GetArg("-persistmempool", DEFAULT_PERSIST_MEMPOOL)) { - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); ::LoadMempool(m_mempool, *this); } m_mempool.SetIsLoaded(!ShutdownRequested()); @@ -3996,7 +3896,6 @@ bool CVerifyDB::VerifyDB( { AssertLockHeld(cs_main); - assert(std::addressof(::ChainstateActive()) == std::addressof(chainstate)); if (chainstate.m_chain.Tip() == nullptr || chainstate.m_chain.Tip()->pprev == nullptr) return true; @@ -4265,7 +4164,6 @@ bool CChainState::LoadGenesisBlock(const CChainParams& chainparams) if (m_blockman.m_block_index.count(chainparams.GenesisBlock().GetHash())) return true; - assert(std::addressof(::ChainActive()) == std::addressof(m_chain)); try { const CBlock& block = chainparams.GenesisBlock(); FlatFilePos blockPos = SaveBlockToDisk(block, 0, m_chain, chainparams, nullptr); @@ -4329,7 +4227,6 @@ void CChainState::LoadExternalBlockFile(const CChainParams& chainparams, FILE* f { LOCK(cs_main); // detect out of order blocks, and store them for later - assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_blockman)); if (hash != chainparams.GetConsensus().hashGenesisBlock && !m_blockman.LookupBlockIndex(block.hashPrevBlock)) { LogPrint(BCLog::REINDEX, "%s: Out of order block %s, parent %s not known\n", __func__, hash.ToString(), block.hashPrevBlock.ToString()); @@ -4339,11 +4236,9 @@ void CChainState::LoadExternalBlockFile(const CChainParams& chainparams, FILE* f } // process in case the block isn't known yet - assert(std::addressof(g_chainman.m_blockman) == std::addressof(m_blockman)); CBlockIndex* pindex = m_blockman.LookupBlockIndex(hash); if (!pindex || (pindex->nStatus & BLOCK_HAVE_DATA) == 0) { BlockValidationState state; - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); if (AcceptBlock(pblock, state, chainparams, nullptr, true, dbp, nullptr)) { nLoaded++; } @@ -4358,13 +4253,11 @@ void CChainState::LoadExternalBlockFile(const CChainParams& chainparams, FILE* f // Activate the genesis block so normal node progress can continue if (hash == chainparams.GetConsensus().hashGenesisBlock) { BlockValidationState state; - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); if (!ActivateBestChain(state, chainparams, nullptr)) { break; } } - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); NotifyHeaderTip(*this); // Recursively process earlier encountered successors of this block @@ -4383,7 +4276,6 @@ void CChainState::LoadExternalBlockFile(const CChainParams& chainparams, FILE* f head.ToString()); LOCK(cs_main); BlockValidationState dummy; - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); if (AcceptBlock(pblockrecursive, dummy, chainparams, nullptr, true, &it->second, nullptr)) { nLoaded++; @@ -4392,7 +4284,6 @@ void CChainState::LoadExternalBlockFile(const CChainParams& chainparams, FILE* f } range.first++; mapBlocksUnknownParent.erase(it); - assert(std::addressof(::ChainstateActive()) == std::addressof(*this)); NotifyHeaderTip(*this); } } @@ -4673,7 +4564,6 @@ bool LoadMempool(CTxMemPool& pool, CChainState& active_chainstate, FopenFn mocka } if (nTime > nNow - nExpiryTimeout) { LOCK(cs_main); - assert(std::addressof(::ChainstateActive()) == std::addressof(active_chainstate)); if (AcceptToMemoryPoolWithTime(chainparams, pool, active_chainstate, tx, nTime, false /* bypass_limits */, false /* test_accept */).m_result_type == MempoolAcceptResult::ResultType::VALID) { ++count; diff --git a/src/validation.h b/src/validation.h index 359a6c779f..5720ba8071 100644 --- a/src/validation.h +++ b/src/validation.h @@ -234,11 +234,13 @@ MempoolAcceptResult AcceptToMemoryPool(CChainState& active_chainstate, CTxMemPoo bool bypass_limits, bool test_accept=false) EXCLUSIVE_LOCKS_REQUIRED(cs_main); /** -* Atomically test acceptance of a package. If the package only contains one tx, package rules still apply. +* Atomically test acceptance of a package. If the package only contains one tx, package rules still +* apply. Package validation does not allow BIP125 replacements, so the transaction(s) cannot spend +* the same inputs as any transaction in the mempool. * @param[in] txns Group of transactions which may be independent or contain -* parent-child dependencies. The transactions must not conflict, i.e. -* must not spend the same inputs, even if it would be a valid BIP125 -* replace-by-fee. Parents must appear before children. +* parent-child dependencies. The transactions must not conflict +* with each other, i.e., must not spend the same inputs. If any +* dependencies exist, parents must appear before children. * @returns a PackageMempoolAcceptResult which includes a MempoolAcceptResult for each transaction. * If a transaction fails, validation will exit early and some results may be missing. */ @@ -269,9 +271,13 @@ bool TestLockPointValidity(CChain& active_chain, const LockPoints* lp) EXCLUSIVE * Check if transaction will be BIP68 final in the next block to be created on top of tip. * @param[in] tip Chain tip to check tx sequence locks against. For example, * the tip of the current active chain. - * @param[in] coins_view Any CCoinsView that provides access to the relevant coins - * for checking sequence locks. Any CCoinsView can be passed in; - * it is assumed to be consistent with the tip. + * @param[in] coins_view Any CCoinsView that provides access to the relevant coins for + * checking sequence locks. For example, it can be a CCoinsViewCache + * that isn't connected to anything but contains all the relevant + * coins, or a CCoinsViewMemPool that is connected to the + * mempool and chainstate UTXO set. In the latter case, the caller is + * responsible for holding the appropriate locks to ensure that + * calls to GetCoin() return correct coins. * Simulates calling SequenceLocks() with data from the tip passed in. * Optionally stores in LockPoints the resulting height and time calculated and the hash * of the block needed for calculation or skips the calculation and uses the LockPoints @@ -885,10 +891,6 @@ private: CAutoFile& coins_file, const SnapshotMetadata& metadata); - // For access to m_active_chainstate. - friend CChainState& ChainstateActive(); - friend CChain& ChainActive(); - public: std::thread m_load_block; //! A single BlockManager instance is shared across each constructed @@ -1011,16 +1013,13 @@ public: //! Check to see if caches are out of balance and if so, call //! ResizeCoinsCaches() as needed. void MaybeRebalanceCaches() EXCLUSIVE_LOCKS_REQUIRED(::cs_main); -}; - -/** DEPRECATED! Please use node.chainman instead. May only be used in validation.cpp internally */ -extern ChainstateManager g_chainman GUARDED_BY(::cs_main); -/** Please prefer the identical ChainstateManager::ActiveChainstate */ -CChainState& ChainstateActive(); - -/** Please prefer the identical ChainstateManager::ActiveChain */ -CChain& ChainActive(); + ~ChainstateManager() { + LOCK(::cs_main); + UnloadBlockIndex(/* mempool */ nullptr, *this); + Reset(); + } +}; /** Global variable that points to the active block tree (protected by cs_main) */ extern std::unique_ptr<CBlockTreeDB> pblocktree; diff --git a/src/wallet/external_signer_scriptpubkeyman.cpp b/src/wallet/external_signer_scriptpubkeyman.cpp index fe2c810afa..efef1ec754 100644 --- a/src/wallet/external_signer_scriptpubkeyman.cpp +++ b/src/wallet/external_signer_scriptpubkeyman.cpp @@ -13,8 +13,6 @@ #include <utility> #include <vector> -#ifdef ENABLE_EXTERNAL_SIGNER - bool ExternalSignerScriptPubKeyMan::SetupDescriptor(std::unique_ptr<Descriptor> desc) { LOCK(cs_desc_man); @@ -62,10 +60,10 @@ bool ExternalSignerScriptPubKeyMan::DisplayAddress(const CScript scriptPubKey, c } // If sign is true, transaction must previously have been filled -TransactionError ExternalSignerScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psbt, int sighash_type, bool sign, bool bip32derivs, int* n_signed) const +TransactionError ExternalSignerScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psbt, const PrecomputedTransactionData& txdata, int sighash_type, bool sign, bool bip32derivs, int* n_signed) const { if (!sign) { - return DescriptorScriptPubKeyMan::FillPSBT(psbt, sighash_type, false, bip32derivs, n_signed); + return DescriptorScriptPubKeyMan::FillPSBT(psbt, txdata, sighash_type, false, bip32derivs, n_signed); } // Already complete if every input is now signed @@ -84,5 +82,3 @@ TransactionError ExternalSignerScriptPubKeyMan::FillPSBT(PartiallySignedTransact FinalizePSBT(psbt); // This won't work in a multisig setup return TransactionError::OK; } - -#endif diff --git a/src/wallet/external_signer_scriptpubkeyman.h b/src/wallet/external_signer_scriptpubkeyman.h index 1786958912..8eed947b7b 100644 --- a/src/wallet/external_signer_scriptpubkeyman.h +++ b/src/wallet/external_signer_scriptpubkeyman.h @@ -5,7 +5,6 @@ #ifndef BITCOIN_WALLET_EXTERNAL_SIGNER_SCRIPTPUBKEYMAN_H #define BITCOIN_WALLET_EXTERNAL_SIGNER_SCRIPTPUBKEYMAN_H -#ifdef ENABLE_EXTERNAL_SIGNER #include <wallet/scriptpubkeyman.h> #include <memory> @@ -29,8 +28,6 @@ class ExternalSignerScriptPubKeyMan : public DescriptorScriptPubKeyMan bool DisplayAddress(const CScript scriptPubKey, const ExternalSigner &signer) const; - TransactionError FillPSBT(PartiallySignedTransaction& psbt, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const override; + TransactionError FillPSBT(PartiallySignedTransaction& psbt, const PrecomputedTransactionData& txdata, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const override; }; -#endif - #endif // BITCOIN_WALLET_EXTERNAL_SIGNER_SCRIPTPUBKEYMAN_H diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp index ee92316b89..5a832d020b 100644 --- a/src/wallet/interfaces.cpp +++ b/src/wallet/interfaces.cpp @@ -349,9 +349,9 @@ public: TransactionError fillPSBT(int sighash_type, bool sign, bool bip32derivs, + size_t* n_signed, PartiallySignedTransaction& psbtx, - bool& complete, - size_t* n_signed) override + bool& complete) override { return m_wallet->FillPSBT(psbtx, complete, sighash_type, sign, bip32derivs, n_signed); } diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp index 726b13beac..35649ab02c 100644 --- a/src/wallet/rpcdump.cpp +++ b/src/wallet/rpcdump.cpp @@ -286,6 +286,9 @@ RPCHelpMan importaddress() if (fP2SH) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Cannot use the p2sh flag with an address - use a script instead"); } + if (OutputTypeFromDestination(dest) == OutputType::BECH32M) { + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Bech32m addresses cannot be imported into legacy wallets"); + } pwallet->MarkDirty(); @@ -962,6 +965,9 @@ static UniValue ProcessImportLegacy(ImportData& import_data, std::map<CKeyID, CP if (!IsValidDestination(dest)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Invalid address \"" + output + "\""); } + if (OutputTypeFromDestination(dest) == OutputType::BECH32M) { + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Bech32m addresses cannot be imported into legacy wallets"); + } script = GetScriptForDestination(dest); } else { if (!IsHex(output)) { @@ -1086,6 +1092,9 @@ static UniValue ProcessImportDescriptor(ImportData& import_data, std::map<CKeyID if (!parsed_desc) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, error); } + if (parsed_desc->GetOutputType() == OutputType::BECH32M) { + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Bech32m descriptors cannot be imported into legacy wallets"); + } have_solving_data = parsed_desc->IsSolvable(); const bool watch_only = data.exists("watchonly") ? data["watchonly"].get_bool() : false; @@ -1530,6 +1539,18 @@ static UniValue ProcessDescriptorImport(CWallet& wallet, const UniValue& data, c } } + // Taproot descriptors cannot be imported if Taproot is not yet active. + // Check if this is a Taproot descriptor + CTxDestination dest; + ExtractDestination(scripts[0], dest); + if (std::holds_alternative<WitnessV1Taproot>(dest)) { + // Check if Taproot is active + if (!wallet.chain().isTaprootActive()) { + // Taproot is not active, raise an error + throw JSONRPCError(RPC_WALLET_ERROR, "Cannot import tr() descriptor when Taproot is not active"); + } + } + // If private keys are enabled, check some things. if (!wallet.IsWalletFlagSet(WALLET_FLAG_DISABLE_PRIVATE_KEYS)) { if (keys.keys.empty()) { diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp index 534c974178..bc5d771b6e 100644 --- a/src/wallet/rpcwallet.cpp +++ b/src/wallet/rpcwallet.cpp @@ -269,6 +269,9 @@ static RPCHelpMan getnewaddress() if (!ParseOutputType(request.params[1].get_str(), output_type)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[1].get_str())); } + if (output_type == OutputType::BECH32M && pwallet->GetLegacyScriptPubKeyMan()) { + throw JSONRPCError(RPC_INVALID_PARAMETER, "Legacy wallets cannot provide bech32m addresses"); + } } CTxDestination dest; @@ -313,6 +316,9 @@ static RPCHelpMan getrawchangeaddress() if (!ParseOutputType(request.params[0].get_str(), output_type)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[0].get_str())); } + if (output_type == OutputType::BECH32M && pwallet->GetLegacyScriptPubKeyMan()) { + throw JSONRPCError(RPC_INVALID_PARAMETER, "Legacy wallets cannot provide bech32m addresses"); + } } CTxDestination dest; @@ -1004,6 +1010,9 @@ static RPCHelpMan addmultisigaddress() if (!ParseOutputType(request.params[3].get_str(), output_type)) { throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, strprintf("Unknown address type '%s'", request.params[3].get_str())); } + if (output_type == OutputType::BECH32M) { + throw JSONRPCError(RPC_INVALID_ADDRESS_OR_KEY, "Bech32m multisig addresses cannot be created with legacy wallets"); + } } // Construct using pay-to-script-hash: @@ -3320,7 +3329,8 @@ RPCHelpMan signrawtransactionwithwallet() }, }, }, - {"sighashtype", RPCArg::Type::STR, RPCArg::Default{"ALL"}, "The signature hash type. Must be one of\n" + {"sighashtype", RPCArg::Type::STR, RPCArg::Default{"DEFAULT"}, "The signature hash type. Must be one of\n" + " \"DEFAULT\"\n" " \"ALL\"\n" " \"NONE\"\n" " \"SINGLE\"\n" @@ -3542,7 +3552,7 @@ static RPCHelpMan bumpfee_helper(std::string method_name) } else { PartiallySignedTransaction psbtx(mtx); bool complete = false; - const TransactionError err = pwallet->FillPSBT(psbtx, complete, SIGHASH_ALL, false /* sign */, true /* bip32derivs */); + const TransactionError err = pwallet->FillPSBT(psbtx, complete, SIGHASH_DEFAULT, false /* sign */, true /* bip32derivs */); CHECK_NONFATAL(err == TransactionError::OK); CHECK_NONFATAL(!complete); CDataStream ssTx(SER_NETWORK, PROTOCOL_VERSION); @@ -3847,13 +3857,18 @@ RPCHelpMan getaddressinfo() isminetype mine = pwallet->IsMine(dest); ret.pushKV("ismine", bool(mine & ISMINE_SPENDABLE)); - bool solvable = provider && IsSolvable(*provider, scriptPubKey); - ret.pushKV("solvable", solvable); - - if (solvable) { - ret.pushKV("desc", InferDescriptor(scriptPubKey, *provider)->ToString()); + if (provider) { + auto inferred = InferDescriptor(scriptPubKey, *provider); + bool solvable = inferred->IsSolvable() || IsSolvable(*provider, scriptPubKey); + ret.pushKV("solvable", solvable); + if (solvable) { + ret.pushKV("desc", inferred->ToString()); + } + } else { + ret.pushKV("solvable", false); } + DescriptorScriptPubKeyMan* desc_spk_man = dynamic_cast<DescriptorScriptPubKeyMan*>(pwallet->GetScriptPubKeyMan(scriptPubKey)); if (desc_spk_man) { std::string desc_str; @@ -4175,8 +4190,8 @@ static RPCHelpMan send() // First fill transaction with our data without signing, // so external signers are not asked sign more than once. bool complete; - pwallet->FillPSBT(psbtx, complete, SIGHASH_ALL, false, true); - const TransactionError err = pwallet->FillPSBT(psbtx, complete, SIGHASH_ALL, true, false); + pwallet->FillPSBT(psbtx, complete, SIGHASH_DEFAULT, false, true); + const TransactionError err = pwallet->FillPSBT(psbtx, complete, SIGHASH_DEFAULT, true, false); if (err != TransactionError::OK) { throw JSONRPCTransactionError(err); } @@ -4291,7 +4306,8 @@ static RPCHelpMan walletprocesspsbt() { {"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "The transaction base64 string"}, {"sign", RPCArg::Type::BOOL, RPCArg::Default{true}, "Also sign the transaction when updating"}, - {"sighashtype", RPCArg::Type::STR, RPCArg::Default{"ALL"}, "The signature hash type to sign with if not specified by the PSBT. Must be one of\n" + {"sighashtype", RPCArg::Type::STR, RPCArg::Default{"DEFAULT"}, "The signature hash type to sign with if not specified by the PSBT. Must be one of\n" + " \"DEFAULT\"\n" " \"ALL\"\n" " \"NONE\"\n" " \"SINGLE\"\n" @@ -4315,6 +4331,11 @@ static RPCHelpMan walletprocesspsbt() std::shared_ptr<CWallet> const pwallet = GetWalletForJSONRPCRequest(request); if (!pwallet) return NullUniValue; + const CWallet& wallet{*pwallet}; + // Make sure the results are valid at least up to the most recent block + // the user could have gotten from another RPC command prior to now + wallet.BlockUntilSyncedToCurrentChain(); + RPCTypeCheck(request.params, {UniValue::VSTR, UniValue::VBOOL, UniValue::VSTR}); // Unserialize the transaction @@ -4331,7 +4352,7 @@ static RPCHelpMan walletprocesspsbt() bool sign = request.params[1].isNull() ? true : request.params[1].get_bool(); bool bip32derivs = request.params[3].isNull() ? true : request.params[3].get_bool(); bool complete = true; - const TransactionError err = pwallet->FillPSBT(psbtx, complete, nHashType, sign, bip32derivs); + const TransactionError err{wallet.FillPSBT(psbtx, complete, nHashType, sign, bip32derivs)}; if (err != TransactionError::OK) { throw JSONRPCTransactionError(err); } @@ -4429,6 +4450,11 @@ static RPCHelpMan walletcreatefundedpsbt() std::shared_ptr<CWallet> const pwallet = GetWalletForJSONRPCRequest(request); if (!pwallet) return NullUniValue; + CWallet& wallet{*pwallet}; + // Make sure the results are valid at least up to the most recent block + // the user could have gotten from another RPC command prior to now + wallet.BlockUntilSyncedToCurrentChain(); + RPCTypeCheck(request.params, { UniValue::VARR, UniValueType(), // ARR or OBJ, checked later @@ -4440,7 +4466,7 @@ static RPCHelpMan walletcreatefundedpsbt() CAmount fee; int change_position; - bool rbf = pwallet->m_signal_rbf; + bool rbf{wallet.m_signal_rbf}; const UniValue &replaceable_arg = request.params[3]["replaceable"]; if (!replaceable_arg.isNull()) { RPCTypeCheckArgument(replaceable_arg, UniValue::VBOOL); @@ -4451,7 +4477,7 @@ static RPCHelpMan walletcreatefundedpsbt() // Automatically select coins, unless at least one is manually selected. Can // be overridden by options.add_inputs. coin_control.m_add_inputs = rawTx.vin.size() == 0; - FundTransaction(*pwallet, rawTx, fee, change_position, request.params[3], coin_control, /* override_min_fee */ true); + FundTransaction(wallet, rawTx, fee, change_position, request.params[3], coin_control, /* override_min_fee */ true); // Make a blank psbt PartiallySignedTransaction psbtx(rawTx); @@ -4459,7 +4485,7 @@ static RPCHelpMan walletcreatefundedpsbt() // Fill transaction with out data but don't sign bool bip32derivs = request.params[4].isNull() ? true : request.params[4].get_bool(); bool complete = true; - const TransactionError err = pwallet->FillPSBT(psbtx, complete, 1, false, bip32derivs); + const TransactionError err{wallet.FillPSBT(psbtx, complete, 1, false, bip32derivs)}; if (err != TransactionError::OK) { throw JSONRPCTransactionError(err); } diff --git a/src/wallet/scriptpubkeyman.cpp b/src/wallet/scriptpubkeyman.cpp index 2eb9ca5c6d..44c3912544 100644 --- a/src/wallet/scriptpubkeyman.cpp +++ b/src/wallet/scriptpubkeyman.cpp @@ -13,7 +13,6 @@ #include <util/system.h> #include <util/time.h> #include <util/translation.h> -#include <external_signer.h> #include <wallet/scriptpubkeyman.h> #include <optional> @@ -23,6 +22,12 @@ const uint32_t BIP32_HARDENED_KEY_LIMIT = 0x80000000; bool LegacyScriptPubKeyMan::GetNewDestination(const OutputType type, CTxDestination& dest, std::string& error) { + if (LEGACY_OUTPUT_TYPES.count(type) == 0) { + error = _("Error: Legacy wallets only support the \"legacy\", \"p2sh-segwit\", and \"bech32\" address types").translated; + return false; + } + assert(type != OutputType::BECH32M); + LOCK(cs_KeyStore); error.clear(); @@ -290,14 +295,22 @@ bool LegacyScriptPubKeyMan::Encrypt(const CKeyingMaterial& master_key, WalletBat return true; } -bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool) +bool LegacyScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error) { + if (LEGACY_OUTPUT_TYPES.count(type) == 0) { + error = _("Error: Legacy wallets only support the \"legacy\", \"p2sh-segwit\", and \"bech32\" address types").translated; + return false; + } + assert(type != OutputType::BECH32M); + LOCK(cs_KeyStore); if (!CanGetAddresses(internal)) { + error = _("Error: Keypool ran out, please call keypoolrefill first").translated; return false; } if (!ReserveKeyFromKeyPool(index, keypool, internal)) { + error = _("Error: Keypool ran out, please call keypoolrefill first").translated; return false; } address = GetDestinationForKey(keypool.vchPubKey, type); @@ -597,7 +610,7 @@ SigningResult LegacyScriptPubKeyMan::SignMessage(const std::string& message, con return SigningResult::SIGNING_FAILED; } -TransactionError LegacyScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psbtx, int sighash_type, bool sign, bool bip32derivs, int* n_signed) const +TransactionError LegacyScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psbtx, const PrecomputedTransactionData& txdata, int sighash_type, bool sign, bool bip32derivs, int* n_signed) const { if (n_signed) { *n_signed = 0; @@ -626,7 +639,7 @@ TransactionError LegacyScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psb } SignatureData sigdata; input.FillSignatureData(sigdata); - SignPSBTInput(HidingSigningProvider(this, !sign, !bip32derivs), psbtx, i, sighash_type); + SignPSBTInput(HidingSigningProvider(this, !sign, !bip32derivs), psbtx, i, &txdata, sighash_type); bool signed_one = PSBTInputSigned(input); if (n_signed && (signed_one || !sign)) { @@ -1295,6 +1308,7 @@ void LegacyScriptPubKeyMan::AddKeypoolPubkeyWithDB(const CPubKey& pubkey, const void LegacyScriptPubKeyMan::KeepDestination(int64_t nIndex, const OutputType& type) { + assert(type != OutputType::BECH32M); // Remove from key pool WalletBatch batch(m_storage.GetDatabase()); batch.ErasePool(nIndex); @@ -1328,6 +1342,7 @@ void LegacyScriptPubKeyMan::ReturnDestination(int64_t nIndex, bool fInternal, co bool LegacyScriptPubKeyMan::GetKeyFromPool(CPubKey& result, const OutputType type, bool internal) { + assert(type != OutputType::BECH32M); if (!CanGetAddresses(internal)) { return false; } @@ -1396,6 +1411,7 @@ bool LegacyScriptPubKeyMan::ReserveKeyFromKeyPool(int64_t& nIndex, CKeyPool& key void LegacyScriptPubKeyMan::LearnRelatedScripts(const CPubKey& key, OutputType type) { + assert(type != OutputType::BECH32M); if (key.IsCompressed() && (type == OutputType::P2SH_SEGWIT || type == OutputType::BECH32)) { CTxDestination witdest = WitnessV0KeyHash(key.GetID()); CScript witprog = GetScriptForDestination(witdest); @@ -1707,10 +1723,9 @@ bool DescriptorScriptPubKeyMan::Encrypt(const CKeyingMaterial& master_key, Walle return true; } -bool DescriptorScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool) +bool DescriptorScriptPubKeyMan::GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error) { LOCK(cs_desc_man); - std::string error; bool result = GetNewDestination(type, address, error); index = m_wallet_descriptor.next_index - 1; return result; @@ -1881,6 +1896,12 @@ bool DescriptorScriptPubKeyMan::AddDescriptorKeyWithDB(WalletBatch& batch, const bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_key, OutputType addr_type) { + if (addr_type == OutputType::BECH32M) { + // Don't allow setting up taproot descriptors yet + // TODO: Allow setting up taproot descriptors + return false; + } + LOCK(cs_desc_man); assert(m_storage.IsWalletFlagSet(WALLET_FLAG_DESCRIPTORS)); @@ -1910,6 +1931,7 @@ bool DescriptorScriptPubKeyMan::SetupDescriptorGeneration(const CExtKey& master_ desc_prefix = "wpkh(" + xpub + "/84'"; break; } + case OutputType::BECH32M: assert(false); // TODO: Setup taproot descriptor } // no default case, so the compiler can warn about missing cases assert(!desc_prefix.empty()); @@ -2083,7 +2105,7 @@ SigningResult DescriptorScriptPubKeyMan::SignMessage(const std::string& message, return SigningResult::OK; } -TransactionError DescriptorScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psbtx, int sighash_type, bool sign, bool bip32derivs, int* n_signed) const +TransactionError DescriptorScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& psbtx, const PrecomputedTransactionData& txdata, int sighash_type, bool sign, bool bip32derivs, int* n_signed) const { if (n_signed) { *n_signed = 0; @@ -2133,7 +2155,7 @@ TransactionError DescriptorScriptPubKeyMan::FillPSBT(PartiallySignedTransaction& } } - SignPSBTInput(HidingSigningProvider(keys.get(), !sign, !bip32derivs), psbtx, i, sighash_type); + SignPSBTInput(HidingSigningProvider(keys.get(), !sign, !bip32derivs), psbtx, i, &txdata, sighash_type); bool signed_one = PSBTInputSigned(input); if (n_signed && (signed_one || !sign)) { diff --git a/src/wallet/scriptpubkeyman.h b/src/wallet/scriptpubkeyman.h index b8e34fbac3..b2ca354b0a 100644 --- a/src/wallet/scriptpubkeyman.h +++ b/src/wallet/scriptpubkeyman.h @@ -181,7 +181,7 @@ public: virtual bool CheckDecryptionKey(const CKeyingMaterial& master_key, bool accept_no_keys = false) { return false; } virtual bool Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) { return false; } - virtual bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool) { return false; } + virtual bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error) { return false; } virtual void KeepDestination(int64_t index, const OutputType& type) {} virtual void ReturnDestination(int64_t index, bool internal, const CTxDestination& addr) {} @@ -235,7 +235,7 @@ public: /** Sign a message with the given script */ virtual SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const { return SigningResult::SIGNING_FAILED; }; /** Adds script and derivation path information to a PSBT, and optionally signs it. */ - virtual TransactionError FillPSBT(PartiallySignedTransaction& psbt, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const { return TransactionError::INVALID_PSBT; } + virtual TransactionError FillPSBT(PartiallySignedTransaction& psbt, const PrecomputedTransactionData& txdata, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const { return TransactionError::INVALID_PSBT; } virtual uint256 GetID() const { return uint256(); } @@ -254,6 +254,13 @@ public: boost::signals2::signal<void ()> NotifyCanGetAddressesChanged; }; +/** OutputTypes supported by the LegacyScriptPubKeyMan */ +static const std::unordered_set<OutputType> LEGACY_OUTPUT_TYPES { + OutputType::LEGACY, + OutputType::P2SH_SEGWIT, + OutputType::BECH32, +}; + class LegacyScriptPubKeyMan : public ScriptPubKeyMan, public FillableSigningProvider { private: @@ -357,7 +364,7 @@ public: bool CheckDecryptionKey(const CKeyingMaterial& master_key, bool accept_no_keys = false) override; bool Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) override; - bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool) override; + bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error) override; void KeepDestination(int64_t index, const OutputType& type) override; void ReturnDestination(int64_t index, bool internal, const CTxDestination&) override; @@ -394,7 +401,7 @@ public: bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const override; SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const override; - TransactionError FillPSBT(PartiallySignedTransaction& psbt, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const override; + TransactionError FillPSBT(PartiallySignedTransaction& psbt, const PrecomputedTransactionData& txdata, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const override; uint256 GetID() const override; @@ -566,7 +573,7 @@ public: bool CheckDecryptionKey(const CKeyingMaterial& master_key, bool accept_no_keys = false) override; bool Encrypt(const CKeyingMaterial& master_key, WalletBatch* batch) override; - bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool) override; + bool GetReservedDestination(const OutputType type, bool internal, CTxDestination& address, int64_t& index, CKeyPool& keypool, std::string& error) override; void ReturnDestination(int64_t index, bool internal, const CTxDestination& addr) override; // Tops up the descriptor cache and m_map_script_pub_keys. The cache is stored in the wallet file @@ -605,7 +612,7 @@ public: bool SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const override; SigningResult SignMessage(const std::string& message, const PKHash& pkhash, std::string& str_sig) const override; - TransactionError FillPSBT(PartiallySignedTransaction& psbt, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const override; + TransactionError FillPSBT(PartiallySignedTransaction& psbt, const PrecomputedTransactionData& txdata, int sighash_type = 1 /* SIGHASH_ALL */, bool sign = true, bool bip32derivs = false, int* n_signed = nullptr) const override; uint256 GetID() const override; diff --git a/src/wallet/spend.cpp b/src/wallet/spend.cpp index c8ded4c51e..6a8df437ae 100644 --- a/src/wallet/spend.cpp +++ b/src/wallet/spend.cpp @@ -618,8 +618,9 @@ bool CWallet::CreateTransactionInternal( // Reserve a new key pair from key pool. If it fails, provide a dummy // destination in case we don't need change. CTxDestination dest; - if (!reservedest.GetReservedDestination(dest, true)) { - error = _("Transaction needs a change address, but we can't generate it. Please call keypoolrefill first."); + std::string dest_err; + if (!reservedest.GetReservedDestination(dest, true, dest_err)) { + error = strprintf(_("Transaction needs a change address, but we can't generate it. %s"), dest_err); } scriptChange = GetScriptForDestination(dest); // A valid destination implies a change script (and diff --git a/src/wallet/test/psbt_wallet_tests.cpp b/src/wallet/test/psbt_wallet_tests.cpp index ce7e661b67..1cefa386b7 100644 --- a/src/wallet/test/psbt_wallet_tests.cpp +++ b/src/wallet/test/psbt_wallet_tests.cpp @@ -71,7 +71,7 @@ BOOST_AUTO_TEST_CASE(psbt_updater_test) // Try to sign the mutated input SignatureData sigdata; - BOOST_CHECK(spk_man->FillPSBT(psbtx, SIGHASH_ALL, true, true) != TransactionError::OK); + BOOST_CHECK(spk_man->FillPSBT(psbtx, PrecomputePSBTData(psbtx), SIGHASH_ALL, true, true) != TransactionError::OK); } BOOST_AUTO_TEST_CASE(parse_hd_keypath) diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp index 6a791748b4..a0070b8dd3 100644 --- a/src/wallet/test/wallet_tests.cpp +++ b/src/wallet/test/wallet_tests.cpp @@ -83,17 +83,17 @@ static void AddKey(CWallet& wallet, const CKey& key) BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup) { // Cap last block file size, and mine new block in a new block file. - CBlockIndex* oldTip = ::ChainActive().Tip(); + CBlockIndex* oldTip = m_node.chainman->ActiveChain().Tip(); GetBlockFileInfo(oldTip->GetBlockPos().nFile)->nSize = MAX_BLOCKFILE_SIZE; CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())); - CBlockIndex* newTip = ::ChainActive().Tip(); + CBlockIndex* newTip = m_node.chainman->ActiveChain().Tip(); // Verify ScanForWalletTransactions fails to read an unknown start block. { CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase()); { LOCK(wallet.cs_wallet); - wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); + wallet.SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); } AddKey(wallet, coinbaseKey); WalletRescanReserver reserver(wallet); @@ -112,7 +112,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup) CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase()); { LOCK(wallet.cs_wallet); - wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); + wallet.SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); } AddKey(wallet, coinbaseKey); WalletRescanReserver reserver(wallet); @@ -138,7 +138,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup) CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase()); { LOCK(wallet.cs_wallet); - wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); + wallet.SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); } AddKey(wallet, coinbaseKey); WalletRescanReserver reserver(wallet); @@ -163,7 +163,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup) CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase()); { LOCK(wallet.cs_wallet); - wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); + wallet.SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); } AddKey(wallet, coinbaseKey); WalletRescanReserver reserver(wallet); @@ -180,10 +180,10 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup) BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup) { // Cap last block file size, and mine new block in a new block file. - CBlockIndex* oldTip = ::ChainActive().Tip(); + CBlockIndex* oldTip = m_node.chainman->ActiveChain().Tip(); GetBlockFileInfo(oldTip->GetBlockPos().nFile)->nSize = MAX_BLOCKFILE_SIZE; CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())); - CBlockIndex* newTip = ::ChainActive().Tip(); + CBlockIndex* newTip = m_node.chainman->ActiveChain().Tip(); // Prune the older block file. { @@ -242,7 +242,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup) { // Create two blocks with same timestamp to verify that importwallet rescan // will pick up both blocks, not just the first. - const int64_t BLOCK_TIME = ::ChainActive().Tip()->GetBlockTimeMax() + 5; + const int64_t BLOCK_TIME = m_node.chainman->ActiveChain().Tip()->GetBlockTimeMax() + 5; SetMockTime(BLOCK_TIME); m_coinbase_txns.emplace_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]); m_coinbase_txns.emplace_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]); @@ -265,7 +265,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup) spk_man->AddKeyPubKey(coinbaseKey, coinbaseKey.GetPubKey()); AddWallet(wallet); - wallet->SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); + wallet->SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); } JSONRPCRequest request; request.params.setArray(); @@ -286,7 +286,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup) request.params.setArray(); request.params.push_back(backup_file); AddWallet(wallet); - wallet->SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); + wallet->SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); ::importwallet().HandleRequest(request); RemoveWallet(wallet, std::nullopt); @@ -313,9 +313,9 @@ BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup) CWalletTx wtx(&wallet, m_coinbase_txns.back()); LOCK2(wallet.cs_wallet, spk_man->cs_KeyStore); - wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); + wallet.SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); - CWalletTx::Confirmation confirm(CWalletTx::Status::CONFIRMED, ::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash(), 0); + CWalletTx::Confirmation confirm(CWalletTx::Status::CONFIRMED, m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash(), 0); wtx.m_confirm = confirm; // Call GetImmatureCredit() once before adding the key to the wallet to @@ -483,16 +483,16 @@ public: wallet = std::make_unique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase()); { LOCK2(wallet->cs_wallet, ::cs_main); - wallet->SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash()); + wallet->SetLastBlockProcessed(m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash()); } wallet->LoadWallet(); AddKey(*wallet, coinbaseKey); WalletRescanReserver reserver(*wallet); reserver.reserve(); - CWallet::ScanResult result = wallet->ScanForWalletTransactions(::ChainActive().Genesis()->GetBlockHash(), 0 /* start_height */, {} /* max_height */, reserver, false /* update */); + CWallet::ScanResult result = wallet->ScanForWalletTransactions(m_node.chainman->ActiveChain().Genesis()->GetBlockHash(), 0 /* start_height */, {} /* max_height */, reserver, false /* update */); BOOST_CHECK_EQUAL(result.status, CWallet::ScanResult::SUCCESS); - BOOST_CHECK_EQUAL(result.last_scanned_block, ::ChainActive().Tip()->GetBlockHash()); - BOOST_CHECK_EQUAL(*result.last_scanned_height, ::ChainActive().Height()); + BOOST_CHECK_EQUAL(result.last_scanned_block, m_node.chainman->ActiveChain().Tip()->GetBlockHash()); + BOOST_CHECK_EQUAL(*result.last_scanned_height, m_node.chainman->ActiveChain().Height()); BOOST_CHECK(result.last_failed_block.IsNull()); } @@ -521,10 +521,10 @@ public: CreateAndProcessBlock({CMutableTransaction(blocktx)}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())); LOCK(wallet->cs_wallet); - wallet->SetLastBlockProcessed(wallet->GetLastBlockHeight() + 1, ::ChainActive().Tip()->GetBlockHash()); + wallet->SetLastBlockProcessed(wallet->GetLastBlockHeight() + 1, m_node.chainman->ActiveChain().Tip()->GetBlockHash()); auto it = wallet->mapWallet.find(tx->GetHash()); BOOST_CHECK(it != wallet->mapWallet.end()); - CWalletTx::Confirmation confirm(CWalletTx::Status::CONFIRMED, ::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash(), 1); + CWalletTx::Confirmation confirm(CWalletTx::Status::CONFIRMED, m_node.chainman->ActiveChain().Height(), m_node.chainman->ActiveChain().Tip()->GetBlockHash(), 1); it->second.m_confirm = confirm; return it->second; } diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp index 4b6630de3c..c2586b60b8 100644 --- a/src/wallet/wallet.cpp +++ b/src/wallet/wallet.cpp @@ -8,6 +8,7 @@ #include <chain.h> #include <consensus/consensus.h> #include <consensus/validation.h> +#include <external_signer.h> #include <fs.h> #include <interfaces/chain.h> #include <interfaces/wallet.h> @@ -1807,7 +1808,7 @@ bool CWallet::SignTransaction(CMutableTransaction& tx) const coins[input.prevout] = Coin(wtx.tx->vout[input.prevout.n], wtx.m_confirm.block_height, wtx.IsCoinBase()); } std::map<int, std::string> input_errors; - return SignTransaction(tx, coins, SIGHASH_ALL, input_errors); + return SignTransaction(tx, coins, SIGHASH_DEFAULT, input_errors); } bool CWallet::SignTransaction(CMutableTransaction& tx, const std::map<COutPoint, Coin>& coins, int sighash, std::map<int, std::string>& input_errors) const @@ -1830,6 +1831,7 @@ TransactionError CWallet::FillPSBT(PartiallySignedTransaction& psbtx, bool& comp if (n_signed) { *n_signed = 0; } + const PrecomputedTransactionData txdata = PrecomputePSBTData(psbtx); LOCK(cs_wallet); // Get all of the previous transactions for (unsigned int i = 0; i < psbtx.tx->vin.size(); ++i) { @@ -1856,7 +1858,7 @@ TransactionError CWallet::FillPSBT(PartiallySignedTransaction& psbtx, bool& comp // Fill in information from ScriptPubKeyMans for (ScriptPubKeyMan* spk_man : GetAllScriptPubKeyMans()) { int n_signed_this_spkm = 0; - TransactionError res = spk_man->FillPSBT(psbtx, sighash_type, sign, bip32derivs, &n_signed_this_spkm); + TransactionError res = spk_man->FillPSBT(psbtx, txdata, sighash_type, sign, bip32derivs, &n_signed_this_spkm); if (res != TransactionError::OK) { return res; } @@ -1907,7 +1909,13 @@ OutputType CWallet::TransactionChangeType(const std::optional<OutputType>& chang int witnessversion = 0; std::vector<unsigned char> witnessprogram; if (recipient.scriptPubKey.IsWitnessProgram(witnessversion, witnessprogram)) { - return OutputType::BECH32; + if (GetScriptPubKeyMan(OutputType::BECH32M, true)) { + return OutputType::BECH32M; + } else if (GetScriptPubKeyMan(OutputType::BECH32, true)) { + return OutputType::BECH32; + } else { + return m_default_address_type; + } } } @@ -2110,7 +2118,7 @@ bool CWallet::GetNewDestination(const OutputType type, const std::string label, spk_man->TopUp(); result = spk_man->GetNewDestination(type, dest, error); } else { - error = strprintf("Error: No %s addresses available.", FormatOutputType(type)); + error = strprintf(_("Error: No %s addresses available."), FormatOutputType(type)).translated; } if (result) { SetAddressBook(dest, label, "receive"); @@ -2125,8 +2133,7 @@ bool CWallet::GetNewChangeDestination(const OutputType type, CTxDestination& des error.clear(); ReserveDestination reservedest(this, type); - if (!reservedest.GetReservedDestination(dest, true)) { - error = _("Error: Keypool ran out, please call keypoolrefill first").translated; + if (!reservedest.GetReservedDestination(dest, true, error)) { return false; } @@ -2173,10 +2180,11 @@ std::set<CTxDestination> CWallet::GetLabelAddresses(const std::string& label) co return result; } -bool ReserveDestination::GetReservedDestination(CTxDestination& dest, bool internal) +bool ReserveDestination::GetReservedDestination(CTxDestination& dest, bool internal, std::string& error) { m_spk_man = pwallet->GetScriptPubKeyMan(type, internal); if (!m_spk_man) { + error = strprintf(_("Error: No %s addresses available."), FormatOutputType(type)).translated; return false; } @@ -2186,7 +2194,7 @@ bool ReserveDestination::GetReservedDestination(CTxDestination& dest, bool inter m_spk_man->TopUp(); CKeyPool keypool; - if (!m_spk_man->GetReservedDestination(type, internal, address, nIndex, keypool)) { + if (!m_spk_man->GetReservedDestination(type, internal, address, nIndex, keypool, error)) { return false; } fInternal = keypool.fInternal; @@ -2215,7 +2223,6 @@ void ReserveDestination::ReturnDestination() bool CWallet::DisplayAddress(const CTxDestination& dest) { -#ifdef ENABLE_EXTERNAL_SIGNER CScript scriptPubKey = GetScriptForDestination(dest); const auto spk_man = GetScriptPubKeyMan(scriptPubKey); if (spk_man == nullptr) { @@ -2227,9 +2234,6 @@ bool CWallet::DisplayAddress(const CTxDestination& dest) } ExternalSigner signer = ExternalSignerScriptPubKeyMan::GetExternalSigner(); return signer_spk_man->DisplayAddress(scriptPubKey, signer); -#else - return false; -#endif } void CWallet::LockCoin(const COutPoint& output) @@ -3035,7 +3039,7 @@ void CWallet::SetupLegacyScriptPubKeyMan() } auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new LegacyScriptPubKeyMan(*this)); - for (const auto& type : OUTPUT_TYPES) { + for (const auto& type : LEGACY_OUTPUT_TYPES) { m_internal_spk_managers[type] = spk_manager.get(); m_external_spk_managers[type] = spk_manager.get(); } @@ -3063,12 +3067,8 @@ void CWallet::ConnectScriptPubKeyManNotifiers() void CWallet::LoadDescriptorScriptPubKeyMan(uint256 id, WalletDescriptor& desc) { if (IsWalletFlagSet(WALLET_FLAG_EXTERNAL_SIGNER)) { -#ifdef ENABLE_EXTERNAL_SIGNER auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new ExternalSignerScriptPubKeyMan(*this, desc)); m_spk_managers[id] = std::move(spk_manager); -#else - throw std::runtime_error(std::string(__func__) + ": Compiled without external signing support (required for external signing)"); -#endif } else { auto spk_manager = std::unique_ptr<ScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, desc)); m_spk_managers[id] = std::move(spk_manager); @@ -3092,6 +3092,11 @@ void CWallet::SetupDescriptorScriptPubKeyMans() for (bool internal : {false, true}) { for (OutputType t : OUTPUT_TYPES) { + if (t == OutputType::BECH32M) { + // Skip taproot (bech32m) for now + // TODO: Setup taproot (bech32m) descriptors by default + continue; + } auto spk_manager = std::unique_ptr<DescriptorScriptPubKeyMan>(new DescriptorScriptPubKeyMan(*this, internal)); if (IsCrypted()) { if (IsLocked()) { @@ -3108,7 +3113,6 @@ void CWallet::SetupDescriptorScriptPubKeyMans() } } } else { -#ifdef ENABLE_EXTERNAL_SIGNER ExternalSigner signer = ExternalSignerScriptPubKeyMan::GetExternalSigner(); // TODO: add account parameter @@ -3135,9 +3139,6 @@ void CWallet::SetupDescriptorScriptPubKeyMans() AddActiveScriptPubKeyMan(id, t, internal); } } -#else - throw std::runtime_error(std::string(__func__) + ": Compiled without external signing support (required for external signing)"); -#endif // ENABLE_EXTERNAL_SIGNER } } diff --git a/src/wallet/wallet.h b/src/wallet/wallet.h index d0e26c416c..b63938c5f1 100644 --- a/src/wallet/wallet.h +++ b/src/wallet/wallet.h @@ -21,7 +21,6 @@ #include <validationinterface.h> #include <wallet/coinselection.h> #include <wallet/crypter.h> -#include <external_signer.h> #include <wallet/receive.h> #include <wallet/scriptpubkeyman.h> #include <wallet/spend.h> @@ -182,7 +181,7 @@ public: } //! Reserve an address - bool GetReservedDestination(CTxDestination& pubkey, bool internal); + bool GetReservedDestination(CTxDestination& pubkey, bool internal, std::string& error); //! Return reserved address void ReturnDestination(); //! Keep the address. Do not return it's key to the keypool when this object goes out of scope diff --git a/test/functional/feature_anchors.py b/test/functional/feature_anchors.py index a60a723b3e..24bb02bc90 100755 --- a/test/functional/feature_anchors.py +++ b/test/functional/feature_anchors.py @@ -2,7 +2,7 @@ # Copyright (c) 2020 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. -"""Test Anchors functionality""" +"""Test block-relay-only anchors functionality""" import os @@ -10,6 +10,9 @@ from test_framework.p2p import P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal +INBOUND_CONNECTIONS = 5 +BLOCK_RELAY_CONNECTIONS = 2 + def check_node_connections(*, node, num_in, num_out): info = node.getnetworkinfo() @@ -25,19 +28,26 @@ class AnchorsTest(BitcoinTestFramework): self.setup_nodes() def run_test(self): - self.log.info("Add 2 block-relay-only connections to node 0") - for i in range(2): + node_anchors_path = os.path.join( + self.nodes[0].datadir, "regtest", "anchors.dat" + ) + + self.log.info("When node starts, check if anchors.dat doesn't exist") + assert not os.path.exists(node_anchors_path) + + self.log.info(f"Add {BLOCK_RELAY_CONNECTIONS} block-relay-only connections to node") + for i in range(BLOCK_RELAY_CONNECTIONS): self.log.debug(f"block-relay-only: {i}") self.nodes[0].add_outbound_p2p_connection( P2PInterface(), p2p_idx=i, connection_type="block-relay-only" ) - self.log.info("Add 5 inbound connections to node 0") - for i in range(5): + self.log.info(f"Add {INBOUND_CONNECTIONS} inbound connections to node") + for i in range(INBOUND_CONNECTIONS): self.log.debug(f"inbound: {i}") self.nodes[0].add_p2p_connection(P2PInterface()) - self.log.info("Check node 0 connections") + self.log.info("Check node connections") check_node_connections(node=self.nodes[0], num_in=5, num_out=2) # 127.0.0.1 @@ -57,14 +67,10 @@ class AnchorsTest(BitcoinTestFramework): self.log.info("Stop node 0") self.stop_node(0) - node0_anchors_path = os.path.join( - self.nodes[0].datadir, "regtest", "anchors.dat" - ) - # It should contain only the block-relay-only addresses self.log.info("Check the addresses in anchors.dat") - with open(node0_anchors_path, "rb") as file_handler: + with open(node_anchors_path, "rb") as file_handler: anchors = file_handler.read().hex() for port in block_relay_nodes_port: @@ -74,11 +80,11 @@ class AnchorsTest(BitcoinTestFramework): ip_port = ip + port assert ip_port not in anchors - self.log.info("Start node 0") + self.log.info("Start node") self.start_node(0) self.log.info("When node starts, check if anchors.dat doesn't exist anymore") - assert not os.path.exists(node0_anchors_path) + assert not os.path.exists(node_anchors_path) if __name__ == "__main__": diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py index 6c5857c5ce..e44ce9b57d 100755 --- a/test/functional/feature_bip68_sequence.py +++ b/test/functional/feature_bip68_sequence.py @@ -6,8 +6,19 @@ import time -from test_framework.blocktools import create_block, NORMAL_GBT_REQUEST_PARAMS, add_witness_commitment -from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, ToHex +from test_framework.blocktools import ( + NORMAL_GBT_REQUEST_PARAMS, + add_witness_commitment, + create_block, +) +from test_framework.messages import ( + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + tx_from_hex, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -89,7 +100,7 @@ class BIP68Test(BitcoinTestFramework): tx1.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), nSequence=sequence_value)] tx1.vout = [CTxOut(value, DUMMY_P2WPKH_SCRIPT)] - tx1_signed = self.nodes[0].signrawtransactionwithwallet(ToHex(tx1))["hex"] + tx1_signed = self.nodes[0].signrawtransactionwithwallet(tx1.serialize().hex())["hex"] tx1_id = self.nodes[0].sendrawtransaction(tx1_signed) tx1_id = int(tx1_id, 16) @@ -102,13 +113,13 @@ class BIP68Test(BitcoinTestFramework): tx2.vout = [CTxOut(int(value - self.relayfee * COIN), DUMMY_P2WPKH_SCRIPT)] tx2.rehash() - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx2)) + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, tx2.serialize().hex()) # Setting the version back down to 1 should disable the sequence lock, # so this should be accepted. tx2.nVersion = 1 - self.nodes[0].sendrawtransaction(ToHex(tx2)) + self.nodes[0].sendrawtransaction(tx2.serialize().hex()) # Calculate the median time past of a prior block ("confirmations" before # the current tip). @@ -193,9 +204,9 @@ class BIP68Test(BitcoinTestFramework): tx.vin.append(CTxIn(COutPoint(int(utxos[j]["txid"], 16), utxos[j]["vout"]), nSequence=sequence_value)) value += utxos[j]["amount"]*COIN # Overestimate the size of the tx - signatures should be less than 120 bytes, and leave 50 for the output - tx_size = len(ToHex(tx))//2 + 120*num_inputs + 50 + tx_size = len(tx.serialize().hex())//2 + 120*num_inputs + 50 tx.vout.append(CTxOut(int(value-self.relayfee*tx_size*COIN/1000), DUMMY_P2WPKH_SCRIPT)) - rawtx = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))["hex"] + rawtx = self.nodes[0].signrawtransactionwithwallet(tx.serialize().hex())["hex"] if (using_sequence_locks and not should_pass): # This transaction should be rejected @@ -215,7 +226,7 @@ class BIP68Test(BitcoinTestFramework): # Create a mempool tx. txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) - tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) + tx1 = tx_from_hex(self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Anyone-can-spend mempool tx. @@ -224,8 +235,8 @@ class BIP68Test(BitcoinTestFramework): tx2.nVersion = 2 tx2.vin = [CTxIn(COutPoint(tx1.sha256, 0), nSequence=0)] tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), DUMMY_P2WPKH_SCRIPT)] - tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] - tx2 = FromHex(tx2, tx2_raw) + tx2_raw = self.nodes[0].signrawtransactionwithwallet(tx2.serialize().hex())["hex"] + tx2 = tx_from_hex(tx2_raw) tx2.rehash() self.nodes[0].sendrawtransaction(tx2_raw) @@ -246,10 +257,10 @@ class BIP68Test(BitcoinTestFramework): if (orig_tx.hash in node.getrawmempool()): # sendrawtransaction should fail if the tx is in the mempool - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, ToHex(tx)) + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, node.sendrawtransaction, tx.serialize().hex()) else: # sendrawtransaction should succeed if the tx is not in the mempool - node.sendrawtransaction(ToHex(tx)) + node.sendrawtransaction(tx.serialize().hex()) return tx @@ -299,7 +310,7 @@ class BIP68Test(BitcoinTestFramework): utxos = self.nodes[0].listunspent() tx5.vin.append(CTxIn(COutPoint(int(utxos[0]["txid"], 16), utxos[0]["vout"]), nSequence=1)) tx5.vout[0].nValue += int(utxos[0]["amount"]*COIN) - raw_tx5 = self.nodes[0].signrawtransactionwithwallet(ToHex(tx5))["hex"] + raw_tx5 = self.nodes[0].signrawtransactionwithwallet(tx5.serialize().hex())["hex"] assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, raw_tx5) @@ -325,7 +336,7 @@ class BIP68Test(BitcoinTestFramework): block.rehash() block.solve() tip = block.sha256 - assert_equal(None if i == 1 else 'inconclusive', self.nodes[0].submitblock(ToHex(block))) + assert_equal(None if i == 1 else 'inconclusive', self.nodes[0].submitblock(block.serialize().hex())) tmpl = self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS) tmpl['previousblockhash'] = '%x' % tip tmpl['transactions'] = [] @@ -348,7 +359,7 @@ class BIP68Test(BitcoinTestFramework): assert not softfork_active(self.nodes[0], 'csv') txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), 2) - tx1 = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) + tx1 = tx_from_hex(self.nodes[0].getrawtransaction(txid)) tx1.rehash() # Make an anyone-can-spend transaction @@ -358,11 +369,11 @@ class BIP68Test(BitcoinTestFramework): tx2.vout = [CTxOut(int(tx1.vout[0].nValue - self.relayfee*COIN), DUMMY_P2WPKH_SCRIPT)] # sign tx2 - tx2_raw = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))["hex"] - tx2 = FromHex(tx2, tx2_raw) + tx2_raw = self.nodes[0].signrawtransactionwithwallet(tx2.serialize().hex())["hex"] + tx2 = tx_from_hex(tx2_raw) tx2.rehash() - self.nodes[0].sendrawtransaction(ToHex(tx2)) + self.nodes[0].sendrawtransaction(tx2.serialize().hex()) # Now make an invalid spend of tx2 according to BIP68 sequence_value = 100 # 100 block relative locktime @@ -373,7 +384,7 @@ class BIP68Test(BitcoinTestFramework): tx3.vout = [CTxOut(int(tx2.vout[0].nValue - self.relayfee * COIN), DUMMY_P2WPKH_SCRIPT)] tx3.rehash() - assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, ToHex(tx3)) + assert_raises_rpc_error(-26, NOT_FINAL_ERROR, self.nodes[0].sendrawtransaction, tx3.serialize().hex()) # make a block that violates bip68; ensure that the tip updates block = create_block(tmpl=self.nodes[0].getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)) @@ -404,9 +415,9 @@ class BIP68Test(BitcoinTestFramework): outputs = { self.nodes[1].getnewaddress() : 1.0 } rawtx = self.nodes[1].createrawtransaction(inputs, outputs) rawtxfund = self.nodes[1].fundrawtransaction(rawtx)['hex'] - tx = FromHex(CTransaction(), rawtxfund) + tx = tx_from_hex(rawtxfund) tx.nVersion = 2 - tx_signed = self.nodes[1].signrawtransactionwithwallet(ToHex(tx))["hex"] + tx_signed = self.nodes[1].signrawtransactionwithwallet(tx.serialize().hex())["hex"] self.nodes[1].sendrawtransaction(tx_signed) if __name__ == '__main__': diff --git a/test/functional/feature_block.py b/test/functional/feature_block.py index 158efb52c9..389db73bc9 100755 --- a/test/functional/feature_block.py +++ b/test/functional/feature_block.py @@ -591,6 +591,8 @@ class FullBlockTest(BitcoinTestFramework): b44.hashPrevBlock = self.tip.sha256 b44.nBits = 0x207fffff b44.vtx.append(coinbase) + tx = self.create_and_sign_transaction(out[14], 1) + b44.vtx.append(tx) b44.hashMerkleRoot = b44.calc_merkle_root() b44.solve() self.tip = b44 @@ -678,7 +680,7 @@ class FullBlockTest(BitcoinTestFramework): # Test block timestamps # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) # \-> b54 (15) - # + # -> b44 (14)\-> b48 () self.move_tip(43) b53 = self.next_block(53, spend=out[14]) self.send_blocks([b53], False) @@ -698,6 +700,21 @@ class FullBlockTest(BitcoinTestFramework): self.send_blocks([b55], True) self.save_spendable_output() + # The block which was previously rejected because of being "too far(3 hours)" must be accepted 2 hours later. + # The new block is only 1 hour into future now and we must reorg onto to the new longer chain. + # The new bestblock b48p is invalidated manually. + # -> b31 (8) -> b33 (9) -> b35 (10) -> b39 (11) -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) + # \-> b54 (15) + # -> b44 (14)\-> b48 () -> b48p () + self.log.info("Accept a previously rejected future block at a later time") + node.setmocktime(int(time.time()) + 2*60*60) + self.move_tip(48) + self.block_heights[b48.sha256] = self.block_heights[b44.sha256] + 1 # b48 is a parent of b44 + b48p = self.next_block("48p") + self.send_blocks([b48, b48p], success=True) # Reorg to the longer chain + node.invalidateblock(b48p.hash) # mark b48p as invalid + node.setmocktime(0) + # Test Merkle tree malleability # # -> b42 (12) -> b43 (13) -> b53 (14) -> b55 (15) -> b57p2 (16) @@ -1308,7 +1325,7 @@ class FullBlockTest(BitcoinTestFramework): return create_tx_with_script(spend_tx, n, amount=value, script_pub_key=script) # sign a transaction, using the key we know about - # this signs input 0 in tx, which is assumed to be spending output n in spend_tx + # this signs input 0 in tx, which is assumed to be spending output 0 in spend_tx def sign_tx(self, tx, spend_tx): scriptPubKey = bytearray(spend_tx.vout[0].scriptPubKey) if (scriptPubKey[0] == OP_TRUE): # an anyone-can-spend diff --git a/test/functional/feature_coinstatsindex.py b/test/functional/feature_coinstatsindex.py index cf270b25ee..5d8ec2a8da 100755 --- a/test/functional/feature_coinstatsindex.py +++ b/test/functional/feature_coinstatsindex.py @@ -22,7 +22,6 @@ from test_framework.messages import ( CTransaction, CTxIn, CTxOut, - ToHex, ) from test_framework.script import ( CScript, @@ -170,7 +169,7 @@ class CoinStatsIndexTest(BitcoinTestFramework): tx2 = CTransaction() tx2.vin.append(CTxIn(COutPoint(int(tx1_txid, 16), n), b'')) tx2.vout.append(CTxOut(int(20.99 * COIN), CScript([OP_RETURN] + [OP_FALSE]*30))) - tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx2))['hex'] + tx2_hex = self.nodes[0].signrawtransactionwithwallet(tx2.serialize().hex())['hex'] self.nodes[0].sendrawtransaction(tx2_hex) # Include both txs in a block @@ -207,7 +206,7 @@ class CoinStatsIndexTest(BitcoinTestFramework): block_time = self.nodes[0].getblock(tip)['time'] + 1 block = create_block(int(tip, 16), cb, block_time) block.solve() - self.nodes[0].submitblock(ToHex(block)) + self.nodes[0].submitblock(block.serialize().hex()) self.sync_all() self.wait_until(lambda: not try_rpc(-32603, "Unable to read UTXO set", index_node.gettxoutsetinfo, 'muhash')) @@ -281,6 +280,7 @@ class CoinStatsIndexTest(BitcoinTestFramework): # Add another block, so we don't depend on reconsiderblock remembering which # blocks were touched by invalidateblock index_node.generate(1) + self.sync_all() # Ensure that removing and re-adding blocks yields consistent results block = index_node.getblockhash(99) diff --git a/test/functional/feature_dbcrash.py b/test/functional/feature_dbcrash.py index 2b56bc78f5..c532300ce2 100755 --- a/test/functional/feature_dbcrash.py +++ b/test/functional/feature_dbcrash.py @@ -36,7 +36,6 @@ from test_framework.messages import ( CTransaction, CTxIn, CTxOut, - ToHex, ) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( @@ -208,7 +207,7 @@ class ChainstateWriteCrashTest(BitcoinTestFramework): tx.vout.append(CTxOut(output_amount, hex_str_to_bytes(utxo['scriptPubKey']))) # Sign and send the transaction to get into the mempool - tx_signed_hex = node.signrawtransactionwithwallet(ToHex(tx))['hex'] + tx_signed_hex = node.signrawtransactionwithwallet(tx.serialize().hex())['hex'] node.sendrawtransaction(tx_signed_hex) num_transactions += 1 diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py index 8f522aee66..8ccdf87ff3 100755 --- a/test/functional/feature_fee_estimation.py +++ b/test/functional/feature_fee_estimation.py @@ -6,8 +6,23 @@ from decimal import Decimal import random -from test_framework.messages import CTransaction, CTxIn, CTxOut, COutPoint, ToHex, COIN -from test_framework.script import CScript, OP_1, OP_DROP, OP_2, OP_HASH160, OP_EQUAL, hash160, OP_TRUE +from test_framework.messages import ( + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, +) +from test_framework.script import ( + CScript, + OP_1, + OP_2, + OP_DROP, + OP_EQUAL, + OP_HASH160, + OP_TRUE, + hash160, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -64,11 +79,11 @@ def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee # the ScriptSig that will satisfy the ScriptPubKey. for inp in tx.vin: inp.scriptSig = SCRIPT_SIG[inp.prevout.n] - txid = from_node.sendrawtransaction(hexstring=ToHex(tx), maxfeerate=0) + txid = from_node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0) unconflist.append({"txid": txid, "vout": 0, "amount": total_in - amount - fee}) unconflist.append({"txid": txid, "vout": 1, "amount": amount}) - return (ToHex(tx), fee) + return (tx.serialize().hex(), fee) def split_inputs(from_node, txins, txouts, initial_split=False): @@ -91,10 +106,10 @@ def split_inputs(from_node, txins, txouts, initial_split=False): # If this is the initial split we actually need to sign the transaction # Otherwise we just need to insert the proper ScriptSig if (initial_split): - completetx = from_node.signrawtransactionwithwallet(ToHex(tx))["hex"] + completetx = from_node.signrawtransactionwithwallet(tx.serialize().hex())["hex"] else: tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]] - completetx = ToHex(tx) + completetx = tx.serialize().hex() txid = from_node.sendrawtransaction(hexstring=completetx, maxfeerate=0) txouts.append({"txid": txid, "vout": 0, "amount": half_change}) txouts.append({"txid": txid, "vout": 1, "amount": rem_change}) diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py index f09bffe2d4..cedb7b57ca 100755 --- a/test/functional/feature_pruning.py +++ b/test/functional/feature_pruning.py @@ -11,8 +11,12 @@ This test takes 30 mins or more (up to 2 hours) import os from test_framework.blocktools import create_coinbase -from test_framework.messages import CBlock, ToHex -from test_framework.script import CScript, OP_RETURN, OP_NOP +from test_framework.messages import CBlock +from test_framework.script import ( + CScript, + OP_NOP, + OP_RETURN, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -62,7 +66,7 @@ def mine_large_blocks(node, n): block.solve() # Submit to the node - node.submitblock(ToHex(block)) + node.submitblock(block.serialize().hex()) previousblockhash = block.sha256 height += 1 diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py index 0bb04ae267..e42f8045db 100755 --- a/test/functional/feature_rbf.py +++ b/test/functional/feature_rbf.py @@ -7,16 +7,22 @@ from decimal import Decimal from test_framework.blocktools import COINBASE_MATURITY -from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut +from test_framework.messages import ( + BIP125_SEQUENCE_NUMBER, + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, +) from test_framework.script import CScript, OP_DROP from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round from test_framework.script_util import DUMMY_P2WPKH_SCRIPT, DUMMY_2_P2WPKH_SCRIPT +from test_framework.wallet import MiniWallet MAX_REPLACEMENT_LIMIT = 100 -def txToHex(tx): - return tx.serialize().hex() def make_utxo(node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT): """Create a txout with a given amount and scriptPubKey @@ -26,12 +32,12 @@ def make_utxo(node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT): confirmed - txouts created will be confirmed in the blockchain; unconfirmed otherwise. """ - fee = 1*COIN - while node.getbalance() < satoshi_round((amount + fee)/COIN): + fee = 1 * COIN + while node.getbalance() < satoshi_round((amount + fee) / COIN): node.generate(COINBASE_MATURITY) new_addr = node.getnewaddress() - txid = node.sendtoaddress(new_addr, satoshi_round((amount+fee)/COIN)) + txid = node.sendtoaddress(new_addr, satoshi_round((amount + fee) / COIN)) tx1 = node.getrawtransaction(txid, 1) txid = int(txid, 16) i, _ = next(filter(lambda vout: new_addr == vout[1]['scriptPubKey']['address'], enumerate(tx1['vout']))) @@ -41,7 +47,7 @@ def make_utxo(node, amount, confirmed=True, scriptPubKey=DUMMY_P2WPKH_SCRIPT): tx2.vout = [CTxOut(amount, scriptPubKey)] tx2.rehash() - signed_tx = node.signrawtransactionwithwallet(txToHex(tx2)) + signed_tx = node.signrawtransactionwithwallet(tx2.serialize().hex()) txid = node.sendrawtransaction(signed_tx['hex'], 0) @@ -78,10 +84,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): self.skip_if_no_wallet() def run_test(self): - # Leave IBD - self.nodes[0].generate(1) - - make_utxo(self.nodes[0], 1*COIN) + make_utxo(self.nodes[0], 1 * COIN) # Ensure nodes are synced self.sync_all() @@ -123,7 +126,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): def test_simple_doublespend(self): """Simple doublespend""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) # make_utxo may have generated a bunch of blocks, so we need to sync # before we can spend the coins generated, or else the resulting @@ -133,7 +136,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx1a.serialize().hex() tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) self.sync_all() @@ -142,7 +145,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(1 * COIN, DUMMY_2_P2WPKH_SCRIPT)] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx1b.serialize().hex() # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0) @@ -151,7 +154,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx1b.serialize().hex() # Works when enabled tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0) @@ -165,18 +168,18 @@ class ReplaceByFeeTest(BitcoinTestFramework): def test_doublespend_chain(self): """Doublespend of a long chain""" - initial_nValue = 50*COIN + initial_nValue = 50 * COIN tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) prevout = tx0_outpoint remaining_value = initial_nValue chain_txids = [] - while remaining_value > 10*COIN: - remaining_value -= 1*COIN + while remaining_value > 10 * COIN: + remaining_value -= 1 * COIN tx = CTransaction() tx.vin = [CTxIn(prevout, nSequence=0)] tx.vout = [CTxOut(remaining_value, CScript([1, OP_DROP] * 15 + [1]))] - tx_hex = txToHex(tx) + tx_hex = tx.serialize().hex() txid = self.nodes[0].sendrawtransaction(tx_hex, 0) chain_txids.append(txid) prevout = COutPoint(int(txid, 16), 0) @@ -186,7 +189,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - 30 * COIN, DUMMY_P2WPKH_SCRIPT)] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx_hex = dbl_tx.serialize().hex() # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0) @@ -195,7 +198,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx_hex = dbl_tx.serialize().hex() self.nodes[0].sendrawtransaction(dbl_tx_hex, 0) mempool = self.nodes[0].getrawmempool() @@ -205,10 +208,10 @@ class ReplaceByFeeTest(BitcoinTestFramework): def test_doublespend_tree(self): """Doublespend of a big tree of transactions""" - initial_nValue = 50*COIN + initial_nValue = 50 * COIN tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) - def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001*COIN, _total_txs=None): + def branch(prevout, initial_value, max_txs, tree_width=5, fee=0.0001 * COIN, _total_txs=None): if _total_txs is None: _total_txs = [0] if _total_txs[0] >= max_txs: @@ -223,7 +226,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx = CTransaction() tx.vin = [CTxIn(prevout, nSequence=0)] tx.vout = vout - tx_hex = txToHex(tx) + tx_hex = tx.serialize().hex() assert len(tx.serialize()) < 100000 txid = self.nodes[0].sendrawtransaction(tx_hex, 0) @@ -239,7 +242,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): _total_txs=_total_txs): yield x - fee = int(0.0001*COIN) + fee = int(0.0001 * COIN) n = MAX_REPLACEMENT_LIMIT tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) assert_equal(len(tree_txs), n) @@ -248,7 +251,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - fee * n, DUMMY_P2WPKH_SCRIPT)] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx_hex = dbl_tx.serialize().hex() # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0) @@ -256,7 +259,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - fee * n - 1 * COIN, DUMMY_P2WPKH_SCRIPT)] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx_hex = dbl_tx.serialize().hex() self.nodes[0].sendrawtransaction(dbl_tx_hex, 0) mempool = self.nodes[0].getrawmempool() @@ -267,8 +270,8 @@ class ReplaceByFeeTest(BitcoinTestFramework): # Try again, but with more total transactions than the "max txs # double-spent at once" anti-DoS limit. - for n in (MAX_REPLACEMENT_LIMIT+1, MAX_REPLACEMENT_LIMIT*2): - fee = int(0.0001*COIN) + for n in (MAX_REPLACEMENT_LIMIT + 1, MAX_REPLACEMENT_LIMIT * 2): + fee = int(0.0001 * COIN) tx0_outpoint = make_utxo(self.nodes[0], initial_nValue) tree_txs = list(branch(tx0_outpoint, initial_nValue, n, fee=fee)) assert_equal(len(tree_txs), n) @@ -276,7 +279,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): dbl_tx = CTransaction() dbl_tx.vin = [CTxIn(tx0_outpoint, nSequence=0)] dbl_tx.vout = [CTxOut(initial_nValue - 2 * fee * n, DUMMY_P2WPKH_SCRIPT)] - dbl_tx_hex = txToHex(dbl_tx) + dbl_tx_hex = dbl_tx.serialize().hex() # This will raise an exception assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, dbl_tx_hex, 0) @@ -286,33 +289,33 @@ class ReplaceByFeeTest(BitcoinTestFramework): def test_replacement_feeperkb(self): """Replacement requires fee-per-KB to be higher""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx1a.serialize().hex() self.nodes[0].sendrawtransaction(tx1a_hex, 0) # Higher fee, but the fee per KB is much lower, so the replacement is # rejected. tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*999000]))] - tx1b_hex = txToHex(tx1b) + tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 999000]))] + tx1b_hex = tx1b.serialize().hex() # This will raise an exception due to insufficient fee assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0) def test_spends_of_conflicting_outputs(self): """Replacements that spend conflicting tx outputs are rejected""" - utxo1 = make_utxo(self.nodes[0], int(1.2*COIN)) - utxo2 = make_utxo(self.nodes[0], 3*COIN) + utxo1 = make_utxo(self.nodes[0], int(1.2 * COIN)) + utxo2 = make_utxo(self.nodes[0], 3 * COIN) tx1a = CTransaction() tx1a.vin = [CTxIn(utxo1, nSequence=0)] tx1a.vout = [CTxOut(int(1.1 * COIN), DUMMY_P2WPKH_SCRIPT)] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx1a.serialize().hex() tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) tx1a_txid = int(tx1a_txid, 16) @@ -322,7 +325,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0)] tx2.vin.append(CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)) tx2.vout = tx1a.vout - tx2_hex = txToHex(tx2) + tx2_hex = tx2.serialize().hex() # This will raise an exception assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0) @@ -331,7 +334,7 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx1b = CTransaction() tx1b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] tx1b.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx1b.serialize().hex() tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0) tx1b_txid = int(tx1b_txid, 16) @@ -339,26 +342,26 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx2.vin = [CTxIn(utxo1, nSequence=0), CTxIn(utxo2, nSequence=0), CTxIn(COutPoint(tx1b_txid, 0))] tx2.vout = tx1a.vout - tx2_hex = txToHex(tx2) + tx2_hex = tx2.serialize().hex() # This will raise an exception assert_raises_rpc_error(-26, "bad-txns-spends-conflicting-tx", self.nodes[0].sendrawtransaction, tx2_hex, 0) def test_new_unconfirmed_inputs(self): """Replacements that add new unconfirmed inputs are rejected""" - confirmed_utxo = make_utxo(self.nodes[0], int(1.1*COIN)) - unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1*COIN), False) + confirmed_utxo = make_utxo(self.nodes[0], int(1.1 * COIN)) + unconfirmed_utxo = make_utxo(self.nodes[0], int(0.1 * COIN), False) tx1 = CTransaction() tx1.vin = [CTxIn(confirmed_utxo)] tx1.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - tx1_hex = txToHex(tx1) + tx1_hex = tx1.serialize().hex() self.nodes[0].sendrawtransaction(tx1_hex, 0) tx2 = CTransaction() tx2.vin = [CTxIn(confirmed_utxo), CTxIn(unconfirmed_utxo)] tx2.vout = tx1.vout - tx2_hex = txToHex(tx2) + tx2_hex = tx2.serialize().hex() # This will raise an exception assert_raises_rpc_error(-26, "replacement-adds-unconfirmed", self.nodes[0].sendrawtransaction, tx2_hex, 0) @@ -369,42 +372,42 @@ class ReplaceByFeeTest(BitcoinTestFramework): # transactions # Start by creating a single transaction with many outputs - initial_nValue = 10*COIN + initial_nValue = 10 * COIN utxo = make_utxo(self.nodes[0], initial_nValue) - fee = int(0.0001*COIN) - split_value = int((initial_nValue-fee)/(MAX_REPLACEMENT_LIMIT+1)) + fee = int(0.0001 * COIN) + split_value = int((initial_nValue - fee) / (MAX_REPLACEMENT_LIMIT + 1)) outputs = [] - for _ in range(MAX_REPLACEMENT_LIMIT+1): + for _ in range(MAX_REPLACEMENT_LIMIT + 1): outputs.append(CTxOut(split_value, CScript([1]))) splitting_tx = CTransaction() splitting_tx.vin = [CTxIn(utxo, nSequence=0)] splitting_tx.vout = outputs - splitting_tx_hex = txToHex(splitting_tx) + splitting_tx_hex = splitting_tx.serialize().hex() txid = self.nodes[0].sendrawtransaction(splitting_tx_hex, 0) txid = int(txid, 16) # Now spend each of those outputs individually - for i in range(MAX_REPLACEMENT_LIMIT+1): + for i in range(MAX_REPLACEMENT_LIMIT + 1): tx_i = CTransaction() tx_i.vin = [CTxIn(COutPoint(txid, i), nSequence=0)] tx_i.vout = [CTxOut(split_value - fee, DUMMY_P2WPKH_SCRIPT)] - tx_i_hex = txToHex(tx_i) + tx_i_hex = tx_i.serialize().hex() self.nodes[0].sendrawtransaction(tx_i_hex, 0) # Now create doublespend of the whole lot; should fail. # Need a big enough fee to cover all spending transactions and have # a higher fee rate - double_spend_value = (split_value-100*fee)*(MAX_REPLACEMENT_LIMIT+1) + double_spend_value = (split_value - 100 * fee) * (MAX_REPLACEMENT_LIMIT + 1) inputs = [] - for i in range(MAX_REPLACEMENT_LIMIT+1): + for i in range(MAX_REPLACEMENT_LIMIT + 1): inputs.append(CTxIn(COutPoint(txid, i), nSequence=0)) double_tx = CTransaction() double_tx.vin = inputs double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] - double_tx_hex = txToHex(double_tx) + double_tx_hex = double_tx.serialize().hex() # This will raise an exception assert_raises_rpc_error(-26, "too many potential replacements", self.nodes[0].sendrawtransaction, double_tx_hex, 0) @@ -413,18 +416,18 @@ class ReplaceByFeeTest(BitcoinTestFramework): double_tx = CTransaction() double_tx.vin = inputs[0:-1] double_tx.vout = [CTxOut(double_spend_value, CScript([b'a']))] - double_tx_hex = txToHex(double_tx) + double_tx_hex = double_tx.serialize().hex() self.nodes[0].sendrawtransaction(double_tx_hex, 0) def test_opt_in(self): """Replacing should only work if orig tx opted in""" - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) # Create a non-opting in transaction tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0xffffffff)] tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx1a.serialize().hex() tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) # This transaction isn't shown as replaceable @@ -434,25 +437,25 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)] - tx1b_hex = txToHex(tx1b) + tx1b_hex = tx1b.serialize().hex() # This will raise an exception assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx1b_hex, 0) - tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) # Create a different non-opting in transaction tx2a = CTransaction() tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0xfffffffe)] tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - tx2a_hex = txToHex(tx2a) + tx2a_hex = tx2a.serialize().hex() tx2a_txid = self.nodes[0].sendrawtransaction(tx2a_hex, 0) # Still shouldn't be able to double-spend tx2b = CTransaction() tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2b.vout = [CTxOut(int(0.9 * COIN), DUMMY_P2WPKH_SCRIPT)] - tx2b_hex = txToHex(tx2b) + tx2b_hex = tx2b.serialize().hex() # This will raise an exception assert_raises_rpc_error(-26, "txn-mempool-conflict", self.nodes[0].sendrawtransaction, tx2b_hex, 0) @@ -467,8 +470,8 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx3a = CTransaction() tx3a.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0xffffffff), CTxIn(COutPoint(tx2a_txid, 0), nSequence=0xfffffffd)] - tx3a.vout = [CTxOut(int(0.9*COIN), CScript([b'c'])), CTxOut(int(0.9*COIN), CScript([b'd']))] - tx3a_hex = txToHex(tx3a) + tx3a.vout = [CTxOut(int(0.9 * COIN), CScript([b'c'])), CTxOut(int(0.9 * COIN), CScript([b'd']))] + tx3a_hex = tx3a.serialize().hex() tx3a_txid = self.nodes[0].sendrawtransaction(tx3a_hex, 0) @@ -478,12 +481,12 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx3b = CTransaction() tx3b.vin = [CTxIn(COutPoint(tx1a_txid, 0), nSequence=0)] tx3b.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)] - tx3b_hex = txToHex(tx3b) + tx3b_hex = tx3b.serialize().hex() tx3c = CTransaction() tx3c.vin = [CTxIn(COutPoint(tx2a_txid, 0), nSequence=0)] tx3c.vout = [CTxOut(int(0.5 * COIN), DUMMY_P2WPKH_SCRIPT)] - tx3c_hex = txToHex(tx3c) + tx3c_hex = tx3c.serialize().hex() self.nodes[0].sendrawtransaction(tx3b_hex, 0) # If tx3b was accepted, tx3c won't look like a replacement, @@ -495,25 +498,25 @@ class ReplaceByFeeTest(BitcoinTestFramework): # correctly used by replacement logic # 1. Check that feeperkb uses modified fees - tx0_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + tx0_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) tx1a = CTransaction() tx1a.vin = [CTxIn(tx0_outpoint, nSequence=0)] tx1a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - tx1a_hex = txToHex(tx1a) + tx1a_hex = tx1a.serialize().hex() tx1a_txid = self.nodes[0].sendrawtransaction(tx1a_hex, 0) # Higher fee, but the actual fee per KB is much lower. tx1b = CTransaction() tx1b.vin = [CTxIn(tx0_outpoint, nSequence=0)] - tx1b.vout = [CTxOut(int(0.001*COIN), CScript([b'a'*740000]))] - tx1b_hex = txToHex(tx1b) + tx1b.vout = [CTxOut(int(0.001 * COIN), CScript([b'a' * 740000]))] + tx1b_hex = tx1b.serialize().hex() # Verify tx1b cannot replace tx1a. assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx1b_hex, 0) # Use prioritisetransaction to set tx1a's fee to 0. - self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1*COIN)) + self.nodes[0].prioritisetransaction(txid=tx1a_txid, fee_delta=int(-0.1 * COIN)) # Now tx1b should be able to replace tx1a tx1b_txid = self.nodes[0].sendrawtransaction(tx1b_hex, 0) @@ -521,12 +524,12 @@ class ReplaceByFeeTest(BitcoinTestFramework): assert tx1b_txid in self.nodes[0].getrawmempool() # 2. Check that absolute fee checks use modified fee. - tx1_outpoint = make_utxo(self.nodes[0], int(1.1*COIN)) + tx1_outpoint = make_utxo(self.nodes[0], int(1.1 * COIN)) tx2a = CTransaction() tx2a.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2a.vout = [CTxOut(1 * COIN, DUMMY_P2WPKH_SCRIPT)] - tx2a_hex = txToHex(tx2a) + tx2a_hex = tx2a.serialize().hex() self.nodes[0].sendrawtransaction(tx2a_hex, 0) # Lower fee, but we'll prioritise it @@ -534,13 +537,13 @@ class ReplaceByFeeTest(BitcoinTestFramework): tx2b.vin = [CTxIn(tx1_outpoint, nSequence=0)] tx2b.vout = [CTxOut(int(1.01 * COIN), DUMMY_P2WPKH_SCRIPT)] tx2b.rehash() - tx2b_hex = txToHex(tx2b) + tx2b_hex = tx2b.serialize().hex() # Verify tx2b cannot replace tx2a. assert_raises_rpc_error(-26, "insufficient fee", self.nodes[0].sendrawtransaction, tx2b_hex, 0) # Now prioritise tx2b to have a higher modified fee - self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1*COIN)) + self.nodes[0].prioritisetransaction(txid=tx2b.hash, fee_delta=int(0.1 * COIN)) # tx2b should now be accepted tx2b_txid = self.nodes[0].sendrawtransaction(tx2b_hex, 0) @@ -550,11 +553,11 @@ class ReplaceByFeeTest(BitcoinTestFramework): def test_rpc(self): us0 = self.nodes[0].listunspent()[0] ins = [us0] - outs = {self.nodes[0].getnewaddress() : Decimal(1.0000000)} + outs = {self.nodes[0].getnewaddress(): Decimal(1.0000000)} rawtx0 = self.nodes[0].createrawtransaction(ins, outs, 0, True) rawtx1 = self.nodes[0].createrawtransaction(ins, outs, 0, False) - json0 = self.nodes[0].decoderawtransaction(rawtx0) - json1 = self.nodes[0].decoderawtransaction(rawtx1) + json0 = self.nodes[0].decoderawtransaction(rawtx0) + json1 = self.nodes[0].decoderawtransaction(rawtx1) assert_equal(json0["vin"][0]["sequence"], 4294967293) assert_equal(json1["vin"][0]["sequence"], 4294967295) @@ -562,74 +565,68 @@ class ReplaceByFeeTest(BitcoinTestFramework): frawtx2a = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": True}) frawtx2b = self.nodes[0].fundrawtransaction(rawtx2, {"replaceable": False}) - json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex']) - json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex']) + json0 = self.nodes[0].decoderawtransaction(frawtx2a['hex']) + json1 = self.nodes[0].decoderawtransaction(frawtx2b['hex']) assert_equal(json0["vin"][0]["sequence"], 4294967293) assert_equal(json1["vin"][0]["sequence"], 4294967294) def test_no_inherited_signaling(self): - # Send tx from which to conflict outputs later - base_txid = self.nodes[0].sendtoaddress(self.nodes[0].getnewaddress(), Decimal("10")) - self.nodes[0].generate(1) - self.sync_blocks() + wallet = MiniWallet(self.nodes[0]) + wallet.scan_blocks(start=76, num=1) + confirmed_utxo = wallet.get_utxo() # Create an explicitly opt-in parent transaction - optin_parent_tx = self.nodes[0].createrawtransaction([{ - 'txid': base_txid, - 'vout': 0, - "sequence": 0xfffffffd, - }], {self.nodes[0].getnewaddress(): Decimal("9.99998")}) - - optin_parent_tx = self.nodes[0].signrawtransactionwithwallet(optin_parent_tx) - - # Broadcast parent tx - optin_parent_txid = self.nodes[0].sendrawtransaction(hexstring=optin_parent_tx["hex"], maxfeerate=0) - assert optin_parent_txid in self.nodes[0].getrawmempool() - - replacement_parent_tx = self.nodes[0].createrawtransaction([{ - 'txid': base_txid, - 'vout': 0, - "sequence": 0xfffffffd, - }], {self.nodes[0].getnewaddress(): Decimal("9.90000")}) - - replacement_parent_tx = self.nodes[0].signrawtransactionwithwallet(replacement_parent_tx) + optin_parent_tx = wallet.send_self_transfer( + from_node=self.nodes[0], + utxo_to_spend=confirmed_utxo, + sequence=BIP125_SEQUENCE_NUMBER, + fee_rate=Decimal('0.01'), + ) + assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable']) + + replacement_parent_tx = wallet.create_self_transfer( + from_node=self.nodes[0], + utxo_to_spend=confirmed_utxo, + sequence=BIP125_SEQUENCE_NUMBER, + fee_rate=Decimal('0.02'), + ) # Test if parent tx can be replaced. - res = self.nodes[0].testmempoolaccept(rawtxs=[replacement_parent_tx['hex']], maxfeerate=0)[0] + res = self.nodes[0].testmempoolaccept(rawtxs=[replacement_parent_tx['hex']])[0] # Parent can be replaced. assert_equal(res['allowed'], True) # Create an opt-out child tx spending the opt-in parent - optout_child_tx = self.nodes[0].createrawtransaction([{ - 'txid': optin_parent_txid, - 'vout': 0, - "sequence": 0xffffffff, - }], {self.nodes[0].getnewaddress(): Decimal("9.99990")}) - - optout_child_tx = self.nodes[0].signrawtransactionwithwallet(optout_child_tx) - - # Broadcast child tx - optout_child_txid = self.nodes[0].sendrawtransaction(hexstring=optout_child_tx["hex"], maxfeerate=0) - assert optout_child_txid in self.nodes[0].getrawmempool() - - replacement_child_tx = self.nodes[0].createrawtransaction([{ - 'txid': optin_parent_txid, - 'vout': 0, - "sequence": 0xffffffff, - }], {self.nodes[0].getnewaddress(): Decimal("9.00000")}) - - replacement_child_tx = self.nodes[0].signrawtransactionwithwallet(replacement_child_tx) + parent_utxo = wallet.get_utxo(txid=optin_parent_tx['txid']) + optout_child_tx = wallet.send_self_transfer( + from_node=self.nodes[0], + utxo_to_spend=parent_utxo, + sequence=0xffffffff, + fee_rate=Decimal('0.01'), + ) + + # Reports true due to inheritance + assert_equal(True, self.nodes[0].getmempoolentry(optout_child_tx['txid'])['bip125-replaceable']) + + replacement_child_tx = wallet.create_self_transfer( + from_node=self.nodes[0], + utxo_to_spend=parent_utxo, + sequence=0xffffffff, + fee_rate=Decimal('0.02'), + mempool_valid=False, + ) # Broadcast replacement child tx # BIP 125 : # 1. The original transactions signal replaceability explicitly or through inheritance as described in the above # Summary section. - # The original transaction (`optout_child_tx`) doesn't signal RBF but its parent (`optin_parent_txid`) does. + # The original transaction (`optout_child_tx`) doesn't signal RBF but its parent (`optin_parent_tx`) does. # The replacement transaction (`replacement_child_tx`) should be able to replace the original transaction. # See CVE-2021-31876 for further explanations. - assert optin_parent_txid in self.nodes[0].getrawmempool() + assert_equal(True, self.nodes[0].getmempoolentry(optin_parent_tx['txid'])['bip125-replaceable']) assert_raises_rpc_error(-26, 'txn-mempool-conflict', self.nodes[0].sendrawtransaction, replacement_child_tx["hex"], 0) + if __name__ == '__main__': ReplaceByFeeTest().main() diff --git a/test/functional/feature_segwit.py b/test/functional/feature_segwit.py index ad8767556b..42910904d7 100755 --- a/test/functional/feature_segwit.py +++ b/test/functional/feature_segwit.py @@ -5,7 +5,6 @@ """Test the SegWit changeover logic.""" from decimal import Decimal -from io import BytesIO from test_framework.address import ( key_to_p2pkh, @@ -14,9 +13,34 @@ from test_framework.address import ( script_to_p2sh_p2wsh, script_to_p2wsh, ) -from test_framework.blocktools import witness_script, send_to_witness -from test_framework.messages import COIN, COutPoint, CTransaction, CTxIn, CTxOut, FromHex, sha256, ToHex -from test_framework.script import CScript, OP_HASH160, OP_CHECKSIG, OP_0, hash160, OP_EQUAL, OP_DUP, OP_EQUALVERIFY, OP_1, OP_2, OP_CHECKMULTISIG, OP_TRUE, OP_DROP +from test_framework.blocktools import ( + send_to_witness, + witness_script, +) +from test_framework.messages import ( + COIN, + COutPoint, + CTransaction, + CTxIn, + CTxOut, + sha256, + tx_from_hex, +) +from test_framework.script import ( + CScript, + OP_0, + OP_1, + OP_2, + OP_CHECKMULTISIG, + OP_CHECKSIG, + OP_DROP, + OP_DUP, + OP_EQUAL, + OP_EQUALVERIFY, + OP_HASH160, + OP_TRUE, + hash160, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -179,7 +203,7 @@ class SegWitTest(BitcoinTestFramework): assert self.nodes[1].getblock(blockhash, False) == self.nodes[2].getblock(blockhash, False) for tx_id in segwit_tx_list: - tx = FromHex(CTransaction(), self.nodes[2].gettransaction(tx_id)["hex"]) + tx = tx_from_hex(self.nodes[2].gettransaction(tx_id)["hex"]) assert self.nodes[2].getrawtransaction(tx_id, False, blockhash) != self.nodes[0].getrawtransaction(tx_id, False, blockhash) assert self.nodes[1].getrawtransaction(tx_id, False, blockhash) == self.nodes[2].getrawtransaction(tx_id, False, blockhash) assert self.nodes[0].getrawtransaction(tx_id, False, blockhash) != self.nodes[2].gettransaction(tx_id)["hex"] @@ -225,12 +249,12 @@ class SegWitTest(BitcoinTestFramework): # tx1 is allowed to appear in the block, but no others. txid1 = send_to_witness(1, self.nodes[0], find_spendable_utxo(self.nodes[0], 50), self.pubkey[0], False, Decimal("49.996")) hex_tx = self.nodes[0].gettransaction(txid)['hex'] - tx = FromHex(CTransaction(), hex_tx) + tx = tx_from_hex(hex_tx) assert tx.wit.is_null() # This should not be a segwit input assert txid1 in self.nodes[0].getrawmempool() tx1_hex = self.nodes[0].gettransaction(txid1)['hex'] - tx1 = FromHex(CTransaction(), tx1_hex) + tx1 = tx_from_hex(tx1_hex) # Check that wtxid is properly reported in mempool entry (txid1) assert_equal(int(self.nodes[0].getmempoolentry(txid1)["wtxid"], 16), tx1.calc_sha256(True)) @@ -243,9 +267,9 @@ class SegWitTest(BitcoinTestFramework): tx = CTransaction() tx.vin.append(CTxIn(COutPoint(int(txid1, 16), 0), b'')) tx.vout.append(CTxOut(int(49.99 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) - tx2_hex = self.nodes[0].signrawtransactionwithwallet(ToHex(tx))['hex'] + tx2_hex = self.nodes[0].signrawtransactionwithwallet(tx.serialize().hex())['hex'] txid2 = self.nodes[0].sendrawtransaction(tx2_hex) - tx = FromHex(CTransaction(), tx2_hex) + tx = tx_from_hex(tx2_hex) assert not tx.wit.is_null() # Check that wtxid is properly reported in mempool entry (txid2) @@ -260,7 +284,7 @@ class SegWitTest(BitcoinTestFramework): tx.vin.append(CTxIn(COutPoint(int(txid2, 16), 0), b"")) tx.vout.append(CTxOut(int(49.95 * COIN), CScript([OP_TRUE, OP_DROP] * 15 + [OP_TRUE]))) # Huge fee tx.calc_sha256() - txid3 = self.nodes[0].sendrawtransaction(hexstring=ToHex(tx), maxfeerate=0) + txid3 = self.nodes[0].sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0) assert tx.wit.is_null() assert txid3 in self.nodes[0].getrawmempool() @@ -611,10 +635,8 @@ class SegWitTest(BitcoinTestFramework): def create_and_mine_tx_from_txids(self, txids, success=True): tx = CTransaction() for i in txids: - txtmp = CTransaction() txraw = self.nodes[0].getrawtransaction(i, 0, txs_mined[i]) - f = BytesIO(hex_str_to_bytes(txraw)) - txtmp.deserialize(f) + txtmp = tx_from_hex(txraw) for j in range(len(txtmp.vout)): tx.vin.append(CTxIn(COutPoint(int('0x' + i, 0), j))) tx.vout.append(CTxOut(0, CScript())) diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py index fc04853199..99283b69b0 100755 --- a/test/functional/feature_taproot.py +++ b/test/functional/feature_taproot.py @@ -19,7 +19,6 @@ from test_framework.messages import ( CTxIn, CTxInWitness, CTxOut, - ToHex, ) from test_framework.script import ( ANNEX_TAG, @@ -1306,7 +1305,7 @@ class TaprootTest(BitcoinTestFramework): # Add change fund_tx.vout.append(CTxOut(balance - 10000, random.choice(host_spks))) # Ask the wallet to sign - ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(ToHex(fund_tx))["hex"])) + ss = BytesIO(bytes.fromhex(node.signrawtransactionwithwallet(fund_tx.serialize().hex())["hex"])) fund_tx.deserialize(ss) # Construct UTXOData entries fund_tx.rehash() diff --git a/test/functional/feature_utxo_set_hash.py b/test/functional/feature_utxo_set_hash.py index ce00faffee..afc0bdb8c5 100755 --- a/test/functional/feature_utxo_set_hash.py +++ b/test/functional/feature_utxo_set_hash.py @@ -9,7 +9,7 @@ import struct from test_framework.messages import ( CBlock, COutPoint, - FromHex, + from_hex, ) from test_framework.muhash import MuHash3072 from test_framework.test_framework import BitcoinTestFramework @@ -32,13 +32,13 @@ class UTXOSetHashTest(BitcoinTestFramework): # Generate 100 blocks and remove the first since we plan to spend its # coinbase block_hashes = wallet.generate(1) + node.generate(99) - blocks = list(map(lambda block: FromHex(CBlock(), node.getblock(block, False)), block_hashes)) + blocks = list(map(lambda block: from_hex(CBlock(), node.getblock(block, False)), block_hashes)) blocks.pop(0) # Create a spending transaction and mine a block which includes it txid = wallet.send_self_transfer(from_node=node)['txid'] tx_block = node.generateblock(output=wallet.get_address(), transactions=[txid]) - blocks.append(FromHex(CBlock(), node.getblock(tx_block['hash'], False))) + blocks.append(from_hex(CBlock(), node.getblock(tx_block['hash'], False))) # Serialize the outputs that should be in the UTXO set and add them to # a MuHash object diff --git a/test/functional/feature_versionbits_warning.py b/test/functional/feature_versionbits_warning.py index 062a7affb5..1c9e237d78 100755 --- a/test/functional/feature_versionbits_warning.py +++ b/test/functional/feature_versionbits_warning.py @@ -21,8 +21,8 @@ VB_TOP_BITS = 0x20000000 VB_UNKNOWN_BIT = 27 # Choose a bit unassigned to any deployment VB_UNKNOWN_VERSION = VB_TOP_BITS | (1 << VB_UNKNOWN_BIT) -WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) -VB_PATTERN = re.compile("Warning: unknown new rules activated.*versionbit") +WARN_UNKNOWN_RULES_ACTIVE = "Unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT) +VB_PATTERN = re.compile("Unknown new rules activated.*versionbit") class VersionBitsWarningTest(BitcoinTestFramework): def set_test_params(self): diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py index 30cd499b3f..22eec59600 100755 --- a/test/functional/interface_bitcoin_cli.py +++ b/test/functional/interface_bitcoin_cli.py @@ -10,10 +10,12 @@ from test_framework.blocktools import COINBASE_MATURITY from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, + assert_greater_than_or_equal, assert_raises_process_error, assert_raises_rpc_error, get_auth_cookie, ) +import time # The block reward of coinbaseoutput.nValue (50) BTC/block matures after # COINBASE_MATURITY (100) blocks. Therefore, after mining 101 blocks we expect @@ -248,6 +250,12 @@ class TestBitcoinCli(BitcoinTestFramework): self.nodes[0].wait_for_rpc_connection() assert_equal(blocks, BLOCKS + 25) + self.log.info("Test -rpcwait option waits at most -rpcwaittimeout seconds for startup") + self.stop_node(0) # stop the node so we time out + start_time = time.time() + assert_raises_process_error(1, "Could not connect to the server", self.nodes[0].cli('-rpcwait', '-rpcwaittimeout=5').echo) + assert_greater_than_or_equal(time.time(), start_time + 5) + if __name__ == '__main__': TestBitcoinCli().main() diff --git a/test/functional/interface_zmq.py b/test/functional/interface_zmq.py index 94e162b748..15f352d68c 100755 --- a/test/functional/interface_zmq.py +++ b/test/functional/interface_zmq.py @@ -5,10 +5,21 @@ """Test the ZMQ notification interface.""" import struct -from test_framework.address import ADDRESS_BCRT1_UNSPENDABLE, ADDRESS_BCRT1_P2WSH_OP_TRUE -from test_framework.blocktools import create_block, create_coinbase, add_witness_commitment +from test_framework.address import ( + ADDRESS_BCRT1_P2WSH_OP_TRUE, + ADDRESS_BCRT1_UNSPENDABLE, +) +from test_framework.blocktools import ( + add_witness_commitment, + create_block, + create_coinbase, +) from test_framework.test_framework import BitcoinTestFramework -from test_framework.messages import CTransaction, hash256, FromHex +from test_framework.messages import ( + CTransaction, + hash256, + tx_from_hex, +) from test_framework.util import ( assert_equal, assert_raises_rpc_error, @@ -393,10 +404,10 @@ class ZMQTest (BitcoinTestFramework): bump_info = self.nodes[0].bumpfee(orig_txid) # Mine the pre-bump tx block = create_block(int(self.nodes[0].getbestblockhash(), 16), create_coinbase(self.nodes[0].getblockcount()+1)) - tx = FromHex(CTransaction(), raw_tx) + tx = tx_from_hex(raw_tx) block.vtx.append(tx) for txid in more_tx: - tx = FromHex(CTransaction(), self.nodes[0].getrawtransaction(txid)) + tx = tx_from_hex(self.nodes[0].getrawtransaction(txid)) block.vtx.append(tx) add_witness_commitment(block) block.solve() diff --git a/test/functional/mempool_accept.py b/test/functional/mempool_accept.py index 12aac3ab65..1705d957aa 100755 --- a/test/functional/mempool_accept.py +++ b/test/functional/mempool_accept.py @@ -5,7 +5,6 @@ """Test mempool acceptance of raw transactions.""" from decimal import Decimal -from io import BytesIO import math from test_framework.test_framework import BitcoinTestFramework @@ -14,10 +13,10 @@ from test_framework.messages import ( BIP125_SEQUENCE_NUMBER, COIN, COutPoint, - CTransaction, CTxOut, MAX_BLOCK_BASE_SIZE, MAX_MONEY, + tx_from_hex, ) from test_framework.script import ( hash160, @@ -33,7 +32,6 @@ from test_framework.script import ( from test_framework.util import ( assert_equal, assert_raises_rpc_error, - hex_str_to_bytes, ) @@ -91,8 +89,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later outputs=[{node.getnewaddress(): Decimal('0.3') - fee}], ))['hex'] - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0))) + tx = tx_from_hex(raw_tx_0) txid_0 = tx.rehash() self.check_mempool_result( result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee}}], @@ -107,7 +104,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): outputs=[{node.getnewaddress(): output_amount}], locktime=node.getblockcount() + 2000, # Can be anything ))['hex'] - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_final))) + tx = tx_from_hex(raw_tx_final) fee_expected = coin['amount'] - output_amount self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': fee_expected}}], @@ -126,11 +123,11 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction that replaces a mempool transaction') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0))) + tx = tx_from_hex(raw_tx_0) tx.vout[0].nValue -= int(fee * COIN) # Double the fee tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF raw_tx_0 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex'] - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0))) + tx = tx_from_hex(raw_tx_0) txid_0 = tx.rehash() self.check_mempool_result( result_expected=[{'txid': txid_0, 'allowed': True, 'vsize': tx.get_vsize(), 'fees': {'base': (2 * fee)}}], @@ -141,7 +138,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): # Send the transaction that replaces the mempool transaction and opts out of replaceability node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0) # take original raw_tx_0 - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0))) + tx = tx_from_hex(raw_tx_0) tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee # skip re-signing the tx self.check_mempool_result( @@ -151,7 +148,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction with missing inputs, that never existed') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0))) + tx = tx_from_hex(raw_tx_0) tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14) # skip re-signing the tx self.check_mempool_result( @@ -160,7 +157,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction with missing inputs, that existed once in the past') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0))) + tx = tx_from_hex(raw_tx_0) tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend raw_tx_1 = node.signrawtransactionwithwallet(tx.serialize().hex())['hex'] txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, maxfeerate=0) @@ -190,7 +187,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): inputs=[{'txid': txid_spend_both, 'vout': 0}], outputs=[{node.getnewaddress(): 0.05}], ))['hex'] - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) # Reference tx should be valid on itself self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': True, 'vsize': tx.get_vsize(), 'fees': { 'base': Decimal('0.1') - Decimal('0.05')}}], @@ -199,17 +196,17 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction with no outputs') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vout = [] # Skip re-signing the transaction for context independent checks from now on - # tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(tx.serialize().hex())['hex']))) + # tx = tx_from_hex(node.signrawtransactionwithwallet(tx.serialize().hex())['hex']) self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-empty'}], rawtxs=[tx.serialize().hex()], ) self.log.info('A really large transaction') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vin = [tx.vin[0]] * math.ceil(MAX_BLOCK_BASE_SIZE / len(tx.vin[0].serialize())) self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-oversize'}], @@ -217,7 +214,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction with negative output value') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vout[0].nValue *= -1 self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-negative'}], @@ -226,7 +223,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): # The following two validations prevent overflow of the output amounts (see CVE-2010-5139). self.log.info('A transaction with too large output value') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vout[0].nValue = MAX_MONEY + 1 self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-vout-toolarge'}], @@ -234,7 +231,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction with too large sum of output values') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vout = [tx.vout[0]] * 2 tx.vout[0].nValue = MAX_MONEY self.check_mempool_result( @@ -243,7 +240,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction with duplicate inputs') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vin = [tx.vin[0]] * 2 self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bad-txns-inputs-duplicate'}], @@ -253,26 +250,26 @@ class MempoolAcceptanceTest(BitcoinTestFramework): self.log.info('A coinbase transaction') # Pick the input of the first tx we signed, so it has to be a coinbase tx raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid']) - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent))) + tx = tx_from_hex(raw_tx_coinbase_spent) self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'coinbase'}], rawtxs=[tx.serialize().hex()], ) self.log.info('Some nonstandard transactions') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.nVersion = 3 # A version currently non-standard self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'version'}], rawtxs=[tx.serialize().hex()], ) - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptpubkey'}], rawtxs=[tx.serialize().hex()], ) - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) key = ECKey() key.generate() pubkey = key.get_pubkey().get_bytes() @@ -281,19 +278,19 @@ class MempoolAcceptanceTest(BitcoinTestFramework): result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'bare-multisig'}], rawtxs=[tx.serialize().hex()], ) - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-not-pushonly'}], rawtxs=[tx.serialize().hex()], ) - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vin[0].scriptSig = CScript([b'a' * 1648]) # Some too large scriptSig (>1650 bytes) self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'scriptsig-size'}], rawtxs=[tx.serialize().hex()], ) - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL])) num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy tx.vout = [output_p2sh_burn] * num_scripts @@ -301,14 +298,14 @@ class MempoolAcceptanceTest(BitcoinTestFramework): result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'tx-size'}], rawtxs=[tx.serialize().hex()], ) - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vout[0] = output_p2sh_burn tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy self.check_mempool_result( result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'dust'}], rawtxs=[tx.serialize().hex()], ) - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff']) tx.vout = [tx.vout[0]] * 2 self.check_mempool_result( @@ -317,7 +314,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A timelocked transaction') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored tx.nLockTime = node.getblockcount() + 1 self.check_mempool_result( @@ -326,7 +323,7 @@ class MempoolAcceptanceTest(BitcoinTestFramework): ) self.log.info('A transaction that is locked by BIP68 sequence logic') - tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference))) + tx = tx_from_hex(raw_tx_reference) tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one # Can skip re-signing the tx because of early rejection self.check_mempool_result( diff --git a/test/functional/mempool_package_onemore.py b/test/functional/mempool_package_onemore.py index 1e9895e621..fcd8b061fa 100755 --- a/test/functional/mempool_package_onemore.py +++ b/test/functional/mempool_package_onemore.py @@ -11,7 +11,11 @@ from decimal import Decimal from test_framework.blocktools import COINBASE_MATURITY from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_raises_rpc_error, satoshi_round +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + chain_transaction, +) MAX_ANCESTORS = 25 MAX_DESCENDANTS = 25 @@ -24,23 +28,6 @@ class MempoolPackagesTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() - # Build a transaction that spends parent_txid:vout - # Return amount sent - def chain_transaction(self, node, parent_txids, vouts, value, fee, num_outputs): - send_value = satoshi_round((value - fee)/num_outputs) - inputs = [] - for (txid, vout) in zip(parent_txids, vouts): - inputs.append({'txid' : txid, 'vout' : vout}) - outputs = {} - for _ in range(num_outputs): - outputs[node.getnewaddress()] = send_value - rawtx = node.createrawtransaction(inputs, outputs, 0, True) - signedtx = node.signrawtransactionwithwallet(rawtx) - txid = node.sendrawtransaction(signedtx['hex']) - fulltx = node.getrawtransaction(txid, 1) - assert len(fulltx['vout']) == num_outputs # make sure we didn't generate a change output - return (txid, send_value) - def run_test(self): # Mine some blocks and have them mature. self.nodes[0].generate(COINBASE_MATURITY + 1) @@ -53,32 +40,32 @@ class MempoolPackagesTest(BitcoinTestFramework): # MAX_ANCESTORS transactions off a confirmed tx should be fine chain = [] for _ in range(4): - (txid, sent_value) = self.chain_transaction(self.nodes[0], [txid], [vout], value, fee, 2) + (txid, sent_value) = chain_transaction(self.nodes[0], [txid], [vout], value, fee, 2) vout = 0 value = sent_value chain.append([txid, value]) for _ in range(MAX_ANCESTORS - 4): - (txid, sent_value) = self.chain_transaction(self.nodes[0], [txid], [0], value, fee, 1) + (txid, sent_value) = chain_transaction(self.nodes[0], [txid], [0], value, fee, 1) value = sent_value chain.append([txid, value]) - (second_chain, second_chain_value) = self.chain_transaction(self.nodes[0], [utxo[1]['txid']], [utxo[1]['vout']], utxo[1]['amount'], fee, 1) + (second_chain, second_chain_value) = chain_transaction(self.nodes[0], [utxo[1]['txid']], [utxo[1]['vout']], utxo[1]['amount'], fee, 1) # Check mempool has MAX_ANCESTORS + 1 transactions in it assert_equal(len(self.nodes[0].getrawmempool(True)), MAX_ANCESTORS + 1) # Adding one more transaction on to the chain should fail. - assert_raises_rpc_error(-26, "too-long-mempool-chain, too many unconfirmed ancestors [limit: 25]", self.chain_transaction, self.nodes[0], [txid], [0], value, fee, 1) + assert_raises_rpc_error(-26, "too-long-mempool-chain, too many unconfirmed ancestors [limit: 25]", chain_transaction, self.nodes[0], [txid], [0], value, fee, 1) # ...even if it chains on from some point in the middle of the chain. - assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_transaction, self.nodes[0], [chain[2][0]], [1], chain[2][1], fee, 1) - assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_transaction, self.nodes[0], [chain[1][0]], [1], chain[1][1], fee, 1) + assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", chain_transaction, self.nodes[0], [chain[2][0]], [1], chain[2][1], fee, 1) + assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", chain_transaction, self.nodes[0], [chain[1][0]], [1], chain[1][1], fee, 1) # ...even if it chains on to two parent transactions with one in the chain. - assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_transaction, self.nodes[0], [chain[0][0], second_chain], [1, 0], chain[0][1] + second_chain_value, fee, 1) + assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", chain_transaction, self.nodes[0], [chain[0][0], second_chain], [1, 0], chain[0][1] + second_chain_value, fee, 1) # ...especially if its > 40k weight - assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", self.chain_transaction, self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 350) + assert_raises_rpc_error(-26, "too-long-mempool-chain, too many descendants", chain_transaction, self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 350) # But not if it chains directly off the first transaction - (replacable_txid, replacable_orig_value) = self.chain_transaction(self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 1) + (replacable_txid, replacable_orig_value) = chain_transaction(self.nodes[0], [chain[0][0]], [1], chain[0][1], fee, 1) # and the second chain should work just fine - self.chain_transaction(self.nodes[0], [second_chain], [0], second_chain_value, fee, 1) + chain_transaction(self.nodes[0], [second_chain], [0], second_chain_value, fee, 1) # Make sure we can RBF the chain which used our carve-out rule second_tx_outputs = {self.nodes[0].getrawtransaction(replacable_txid, True)["vout"][0]['scriptPubKey']['address']: replacable_orig_value - (Decimal(1) / Decimal(100))} diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py index 606717d890..5fc3ec23ae 100755 --- a/test/functional/mempool_packages.py +++ b/test/functional/mempool_packages.py @@ -13,6 +13,7 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, + chain_transaction, satoshi_round, ) @@ -42,21 +43,6 @@ class MempoolPackagesTest(BitcoinTestFramework): def skip_test_if_missing_module(self): self.skip_if_no_wallet() - # Build a transaction that spends parent_txid:vout - # Return amount sent - def chain_transaction(self, node, parent_txid, vout, value, fee, num_outputs): - send_value = satoshi_round((value - fee)/num_outputs) - inputs = [ {'txid' : parent_txid, 'vout' : vout} ] - outputs = {} - for _ in range(num_outputs): - outputs[node.getnewaddress()] = send_value - rawtx = node.createrawtransaction(inputs, outputs) - signedtx = node.signrawtransactionwithwallet(rawtx) - txid = node.sendrawtransaction(signedtx['hex']) - fulltx = node.getrawtransaction(txid, 1) - assert len(fulltx['vout']) == num_outputs # make sure we didn't generate a change output - return (txid, send_value) - def run_test(self): # Mine some blocks and have them mature. peer_inv_store = self.nodes[0].add_p2p_connection(P2PTxInvStore()) # keep track of invs @@ -71,7 +57,7 @@ class MempoolPackagesTest(BitcoinTestFramework): chain = [] witness_chain = [] for _ in range(MAX_ANCESTORS): - (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, 0, value, fee, 1) + (txid, sent_value) = chain_transaction(self.nodes[0], [txid], [0], value, fee, 1) value = sent_value chain.append(txid) # We need the wtxids to check P2P announcements @@ -189,7 +175,7 @@ class MempoolPackagesTest(BitcoinTestFramework): assert_equal(mempool[x]['descendantfees'], descendant_fees * COIN + 1000) # Adding one more transaction on to the chain should fail. - assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], txid, vout, value, fee, 1) + assert_raises_rpc_error(-26, "too-long-mempool-chain", chain_transaction, self.nodes[0], [txid], [vout], value, fee, 1) # Check that prioritising a tx before it's added to the mempool works # First clear the mempool by mining a block. @@ -238,7 +224,7 @@ class MempoolPackagesTest(BitcoinTestFramework): transaction_package = [] tx_children = [] # First create one parent tx with 10 children - (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 10) + (txid, sent_value) = chain_transaction(self.nodes[0], [txid], [vout], value, fee, 10) parent_transaction = txid for i in range(10): transaction_package.append({'txid': txid, 'vout': i, 'amount': sent_value}) @@ -247,7 +233,7 @@ class MempoolPackagesTest(BitcoinTestFramework): chain = [] # save sent txs for the purpose of checking node1's mempool later (see below) for _ in range(MAX_DESCENDANTS - 1): utxo = transaction_package.pop(0) - (txid, sent_value) = self.chain_transaction(self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) + (txid, sent_value) = chain_transaction(self.nodes[0], [utxo['txid']], [utxo['vout']], utxo['amount'], fee, 10) chain.append(txid) if utxo['txid'] is parent_transaction: tx_children.append(txid) @@ -263,7 +249,7 @@ class MempoolPackagesTest(BitcoinTestFramework): # Sending one more chained transaction will fail utxo = transaction_package.pop(0) - assert_raises_rpc_error(-26, "too-long-mempool-chain", self.chain_transaction, self.nodes[0], utxo['txid'], utxo['vout'], utxo['amount'], fee, 10) + assert_raises_rpc_error(-26, "too-long-mempool-chain", chain_transaction, self.nodes[0], [utxo['txid']], [utxo['vout']], utxo['amount'], fee, 10) # Check that node1's mempool is as expected, containing: # - txs from previous ancestor test (-> custom ancestor limit) @@ -321,13 +307,13 @@ class MempoolPackagesTest(BitcoinTestFramework): value = send_value # Create tx1 - tx1_id, _ = self.chain_transaction(self.nodes[0], tx0_id, 0, value, fee, 1) + tx1_id, _ = chain_transaction(self.nodes[0], [tx0_id], [0], value, fee, 1) # Create tx2-7 vout = 1 txid = tx0_id for _ in range(6): - (txid, sent_value) = self.chain_transaction(self.nodes[0], txid, vout, value, fee, 1) + (txid, sent_value) = chain_transaction(self.nodes[0], [txid], [vout], value, fee, 1) vout = 0 value = sent_value diff --git a/test/functional/p2p_addr_relay.py b/test/functional/p2p_addr_relay.py index 87297989ba..1a414959b9 100755 --- a/test/functional/p2p_addr_relay.py +++ b/test/functional/p2p_addr_relay.py @@ -23,30 +23,29 @@ import time class AddrReceiver(P2PInterface): num_ipv4_received = 0 + test_addr_contents = False - def on_addr(self, message): - for addr in message.addrs: - assert_equal(addr.nServices, 9) - if not 8333 <= addr.port < 8343: - raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port)) - assert addr.ip.startswith('123.123.123.') - self.num_ipv4_received += 1 - - -class GetAddrStore(P2PInterface): - getaddr_received = False - num_ipv4_received = 0 - - def on_getaddr(self, message): - self.getaddr_received = True + def __init__(self, test_addr_contents=False): + super().__init__() + self.test_addr_contents = test_addr_contents def on_addr(self, message): for addr in message.addrs: self.num_ipv4_received += 1 + if(self.test_addr_contents): + # relay_tests checks the content of the addr messages match + # expectations based on the message creation in setup_addr_msg + assert_equal(addr.nServices, 9) + if not 8333 <= addr.port < 8343: + raise AssertionError("Invalid addr.port of {} (8333-8342 expected)".format(addr.port)) + assert addr.ip.startswith('123.123.123.') def addr_received(self): return self.num_ipv4_received != 0 + def getaddr_received(self): + return self.message_count['getaddr'] > 0 + class AddrTest(BitcoinTestFramework): counter = 0 @@ -79,7 +78,7 @@ class AddrTest(BitcoinTestFramework): def send_addr_msg(self, source, msg, receivers): source.send_and_ping(msg) # pop m_next_addr_send timer - self.mocktime += 5 * 60 + self.mocktime += 10 * 60 self.nodes[0].setmocktime(self.mocktime) for peer in receivers: peer.sync_send_with_ping() @@ -101,7 +100,7 @@ class AddrTest(BitcoinTestFramework): num_receivers = 7 receivers = [] for _ in range(num_receivers): - receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver())) + receivers.append(self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True))) # Keep this with length <= 10. Addresses from larger messages are not # relayed. @@ -125,8 +124,8 @@ class AddrTest(BitcoinTestFramework): self.nodes[0].disconnect_p2ps() self.log.info('Check relay of addresses received from outbound peers') - inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver()) - full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=0, connection_type="outbound-full-relay") + inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver(test_addr_contents=True)) + full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") msg = self.setup_addr_msg(2) self.send_addr_msg(full_outbound_peer, msg, [inbound_peer]) self.log.info('Check that the first addr message received from an outbound peer is not relayed') @@ -142,7 +141,7 @@ class AddrTest(BitcoinTestFramework): assert_equal(inbound_peer.num_ipv4_received, 2) self.log.info('Check address relay to outbound peers') - block_relay_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=1, connection_type="block-relay-only") + block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only") msg3 = self.setup_addr_msg(2) self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer]) @@ -156,17 +155,17 @@ class AddrTest(BitcoinTestFramework): def getaddr_tests(self): self.log.info('Test getaddr behavior') self.log.info('Check that we send a getaddr message upon connecting to an outbound-full-relay peer') - full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=0, connection_type="outbound-full-relay") + full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") full_outbound_peer.sync_with_ping() - assert full_outbound_peer.getaddr_received + assert full_outbound_peer.getaddr_received() self.log.info('Check that we do not send a getaddr message upon connecting to a block-relay-only peer') - block_relay_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=1, connection_type="block-relay-only") + block_relay_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=1, connection_type="block-relay-only") block_relay_peer.sync_with_ping() - assert_equal(block_relay_peer.getaddr_received, False) + assert_equal(block_relay_peer.getaddr_received(), False) self.log.info('Check that we answer getaddr messages only from inbound peers') - inbound_peer = self.nodes[0].add_p2p_connection(GetAddrStore()) + inbound_peer = self.nodes[0].add_p2p_connection(AddrReceiver()) inbound_peer.sync_with_ping() # Add some addresses to addrman @@ -182,7 +181,7 @@ class AddrTest(BitcoinTestFramework): self.mocktime += 5 * 60 self.nodes[0].setmocktime(self.mocktime) - inbound_peer.wait_until(inbound_peer.addr_received) + inbound_peer.wait_until(lambda: inbound_peer.addr_received() is True) assert_equal(full_outbound_peer.num_ipv4_received, 0) assert_equal(block_relay_peer.num_ipv4_received, 0) @@ -196,9 +195,9 @@ class AddrTest(BitcoinTestFramework): self.mocktime = int(time.time()) self.log.info('Check that we send getaddr messages') - full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(GetAddrStore(), p2p_idx=0, connection_type="outbound-full-relay") + full_outbound_peer = self.nodes[0].add_outbound_p2p_connection(AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") full_outbound_peer.sync_with_ping() - assert full_outbound_peer.getaddr_received + assert full_outbound_peer.getaddr_received() self.log.info('Check that we relay address messages') addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) diff --git a/test/functional/p2p_blockfilters.py b/test/functional/p2p_blockfilters.py index 03662babef..63fc2a98d4 100755 --- a/test/functional/p2p_blockfilters.py +++ b/test/functional/p2p_blockfilters.py @@ -24,7 +24,7 @@ from test_framework.util import ( assert_equal, ) -class CFiltersClient(P2PInterface): +class FiltersClient(P2PInterface): def __init__(self): super().__init__() # Store the cfilters received. @@ -39,6 +39,7 @@ class CFiltersClient(P2PInterface): """Store cfilters received in a list.""" self.cfilters.append(message) + class CompactFiltersTest(BitcoinTestFramework): def set_test_params(self): self.setup_clean_chain = True @@ -51,8 +52,8 @@ class CompactFiltersTest(BitcoinTestFramework): def run_test(self): # Node 0 supports COMPACT_FILTERS, node 1 does not. - node0 = self.nodes[0].add_p2p_connection(CFiltersClient()) - node1 = self.nodes[1].add_p2p_connection(CFiltersClient()) + peer_0 = self.nodes[0].add_p2p_connection(FiltersClient()) + peer_1 = self.nodes[1].add_p2p_connection(FiltersClient()) # Nodes 0 & 1 share the same first 999 blocks in the chain. self.nodes[0].generate(999) @@ -61,16 +62,16 @@ class CompactFiltersTest(BitcoinTestFramework): # Stale blocks by disconnecting nodes 0 & 1, mining, then reconnecting self.disconnect_nodes(0, 1) - self.nodes[0].generate(1) - self.wait_until(lambda: self.nodes[0].getblockcount() == 1000) - stale_block_hash = self.nodes[0].getblockhash(1000) + stale_block_hash = self.nodes[0].generate(1)[0] + self.nodes[0].syncwithvalidationinterfacequeue() + assert_equal(self.nodes[0].getblockcount(), 1000) self.nodes[1].generate(1001) - self.wait_until(lambda: self.nodes[1].getblockcount() == 2000) + assert_equal(self.nodes[1].getblockcount(), 2000) # Check that nodes have signalled NODE_COMPACT_FILTERS correctly. - assert node0.nServices & NODE_COMPACT_FILTERS != 0 - assert node1.nServices & NODE_COMPACT_FILTERS == 0 + assert peer_0.nServices & NODE_COMPACT_FILTERS != 0 + assert peer_1.nServices & NODE_COMPACT_FILTERS == 0 # Check that the localservices is as expected. assert int(self.nodes[0].getnetworkinfo()['localservices'], 16) & NODE_COMPACT_FILTERS != 0 @@ -79,10 +80,10 @@ class CompactFiltersTest(BitcoinTestFramework): self.log.info("get cfcheckpt on chain to be re-orged out.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, - stop_hash=int(stale_block_hash, 16) + stop_hash=int(stale_block_hash, 16), ) - node0.send_and_ping(message=request) - response = node0.last_message['cfcheckpt'] + peer_0.send_and_ping(message=request) + response = peer_0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) assert_equal(len(response.headers), 1) @@ -90,6 +91,7 @@ class CompactFiltersTest(BitcoinTestFramework): self.log.info("Reorg node 0 to a new chain.") self.connect_nodes(0, 1) self.sync_blocks(timeout=600) + self.nodes[0].syncwithvalidationinterfacequeue() main_block_hash = self.nodes[0].getblockhash(1000) assert main_block_hash != stale_block_hash, "node 0 chain did not reorganize" @@ -98,10 +100,10 @@ class CompactFiltersTest(BitcoinTestFramework): tip_hash = self.nodes[0].getbestblockhash() request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, - stop_hash=int(tip_hash, 16) + stop_hash=int(tip_hash, 16), ) - node0.send_and_ping(request) - response = node0.last_message['cfcheckpt'] + peer_0.send_and_ping(request) + response = peer_0.last_message['cfcheckpt'] assert_equal(response.filter_type, request.filter_type) assert_equal(response.stop_hash, request.stop_hash) @@ -109,51 +111,51 @@ class CompactFiltersTest(BitcoinTestFramework): tip_cfcheckpt = self.nodes[0].getblockfilter(tip_hash, 'basic')['header'] assert_equal( response.headers, - [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)] + [int(header, 16) for header in (main_cfcheckpt, tip_cfcheckpt)], ) self.log.info("Check that peers can fetch cfcheckpt on stale chain.") request = msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, - stop_hash=int(stale_block_hash, 16) + stop_hash=int(stale_block_hash, 16), ) - node0.send_and_ping(request) - response = node0.last_message['cfcheckpt'] + peer_0.send_and_ping(request) + response = peer_0.last_message['cfcheckpt'] stale_cfcheckpt = self.nodes[0].getblockfilter(stale_block_hash, 'basic')['header'] assert_equal( response.headers, - [int(header, 16) for header in (stale_cfcheckpt,)] + [int(header, 16) for header in (stale_cfcheckpt, )], ) self.log.info("Check that peers can fetch cfheaders on active chain.") request = msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1, - stop_hash=int(main_block_hash, 16) + stop_hash=int(main_block_hash, 16), ) - node0.send_and_ping(request) - response = node0.last_message['cfheaders'] + peer_0.send_and_ping(request) + response = peer_0.last_message['cfheaders'] main_cfhashes = response.hashes assert_equal(len(main_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), - int(main_cfcheckpt, 16) + int(main_cfcheckpt, 16), ) self.log.info("Check that peers can fetch cfheaders on stale chain.") request = msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1, - stop_hash=int(stale_block_hash, 16) + stop_hash=int(stale_block_hash, 16), ) - node0.send_and_ping(request) - response = node0.last_message['cfheaders'] + peer_0.send_and_ping(request) + response = peer_0.last_message['cfheaders'] stale_cfhashes = response.hashes assert_equal(len(stale_cfhashes), 1000) assert_equal( compute_last_header(response.prev_header, response.hashes), - int(stale_cfcheckpt, 16) + int(stale_cfcheckpt, 16), ) self.log.info("Check that peers can fetch cfilters.") @@ -161,11 +163,10 @@ class CompactFiltersTest(BitcoinTestFramework): request = msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1, - stop_hash=int(stop_hash, 16) + stop_hash=int(stop_hash, 16), ) - node0.send_message(request) - node0.sync_with_ping() - response = node0.pop_cfilters() + peer_0.send_and_ping(request) + response = peer_0.pop_cfilters() assert_equal(len(response), 10) self.log.info("Check that cfilter responses are correct.") @@ -180,11 +181,10 @@ class CompactFiltersTest(BitcoinTestFramework): request = msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1000, - stop_hash=int(stale_block_hash, 16) + stop_hash=int(stale_block_hash, 16), ) - node0.send_message(request) - node0.sync_with_ping() - response = node0.pop_cfilters() + peer_0.send_and_ping(request) + response = peer_0.pop_cfilters() assert_equal(len(response), 1) cfilter = response[0] @@ -197,23 +197,23 @@ class CompactFiltersTest(BitcoinTestFramework): requests = [ msg_getcfcheckpt( filter_type=FILTER_TYPE_BASIC, - stop_hash=int(main_block_hash, 16) + stop_hash=int(main_block_hash, 16), ), msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=1000, - stop_hash=int(main_block_hash, 16) + stop_hash=int(main_block_hash, 16), ), msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=1000, - stop_hash=int(main_block_hash, 16) + stop_hash=int(main_block_hash, 16), ), ] for request in requests: - node1 = self.nodes[1].add_p2p_connection(P2PInterface()) - node1.send_message(request) - node1.wait_for_disconnect() + peer_1 = self.nodes[1].add_p2p_connection(P2PInterface()) + peer_1.send_message(request) + peer_1.wait_for_disconnect() self.log.info("Check that invalid requests result in disconnection.") requests = [ @@ -221,18 +221,18 @@ class CompactFiltersTest(BitcoinTestFramework): msg_getcfilters( filter_type=FILTER_TYPE_BASIC, start_height=0, - stop_hash=int(main_block_hash, 16) + stop_hash=int(main_block_hash, 16), ), # Requesting too many filter headers results in disconnection. msg_getcfheaders( filter_type=FILTER_TYPE_BASIC, start_height=0, - stop_hash=int(tip_hash, 16) + stop_hash=int(tip_hash, 16), ), # Requesting unknown filter type results in disconnection. msg_getcfcheckpt( filter_type=255, - stop_hash=int(main_block_hash, 16) + stop_hash=int(main_block_hash, 16), ), # Requesting unknown hash results in disconnection. msg_getcfcheckpt( @@ -241,9 +241,10 @@ class CompactFiltersTest(BitcoinTestFramework): ), ] for request in requests: - node0 = self.nodes[0].add_p2p_connection(P2PInterface()) - node0.send_message(request) - node0.wait_for_disconnect() + peer_0 = self.nodes[0].add_p2p_connection(P2PInterface()) + peer_0.send_message(request) + peer_0.wait_for_disconnect() + def compute_last_header(prev_header, hashes): """Compute the last filter header from a starting header and a sequence of filter hashes.""" @@ -252,5 +253,6 @@ def compute_last_header(prev_header, hashes): header = hash256(ser_uint256(filter_hash) + header) return uint256_from_str(header) + if __name__ == '__main__': CompactFiltersTest().main() diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py index 3e4f2f974d..b4e662de2e 100755 --- a/test/functional/p2p_compactblocks.py +++ b/test/functional/p2p_compactblocks.py @@ -15,11 +15,56 @@ from test_framework.blocktools import ( add_witness_commitment, create_block, ) -from test_framework.messages import BlockTransactions, BlockTransactionsRequest, calculate_shortid, CBlock, CBlockHeader, CInv, COutPoint, CTransaction, CTxIn, CTxInWitness, CTxOut, FromHex, HeaderAndShortIDs, msg_no_witness_block, msg_no_witness_blocktxn, msg_cmpctblock, msg_getblocktxn, msg_getdata, msg_getheaders, msg_headers, msg_inv, msg_sendcmpct, msg_sendheaders, msg_tx, msg_block, msg_blocktxn, MSG_BLOCK, MSG_CMPCT_BLOCK, MSG_WITNESS_FLAG, NODE_NETWORK, P2PHeaderAndShortIDs, PrefilledTransaction, ser_uint256, ToHex -from test_framework.p2p import p2p_lock, P2PInterface -from test_framework.script import CScript, OP_TRUE, OP_DROP +from test_framework.messages import ( + BlockTransactions, + BlockTransactionsRequest, + CBlock, + CBlockHeader, + CInv, + COutPoint, + CTransaction, + CTxIn, + CTxInWitness, + CTxOut, + from_hex, + HeaderAndShortIDs, + MSG_BLOCK, + MSG_CMPCT_BLOCK, + MSG_WITNESS_FLAG, + NODE_NETWORK, + P2PHeaderAndShortIDs, + PrefilledTransaction, + calculate_shortid, + msg_block, + msg_blocktxn, + msg_cmpctblock, + msg_getblocktxn, + msg_getdata, + msg_getheaders, + msg_headers, + msg_inv, + msg_no_witness_block, + msg_no_witness_blocktxn, + msg_sendcmpct, + msg_sendheaders, + msg_tx, + ser_uint256, + tx_from_hex, +) +from test_framework.p2p import ( + P2PInterface, + p2p_lock, +) +from test_framework.script import ( + CScript, + OP_DROP, + OP_TRUE, +) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, softfork_active +from test_framework.util import ( + assert_equal, + softfork_active, +) # TestP2PConn: A peer we use to send messages to bitcoind, and store responses. class TestP2PConn(P2PInterface): @@ -257,7 +302,7 @@ class CompactBlocksTest(BitcoinTestFramework): for _ in range(num_transactions): txid = node.sendtoaddress(address, 0.1) hex_tx = node.gettransaction(txid)["hex"] - tx = FromHex(CTransaction(), hex_tx) + tx = tx_from_hex(hex_tx) if not tx.wit.is_null(): segwit_tx_generated = True @@ -276,7 +321,7 @@ class CompactBlocksTest(BitcoinTestFramework): block_hash = int(node.generate(1)[0], 16) # Store the raw block in our internal format. - block = FromHex(CBlock(), node.getblock("%064x" % block_hash, False)) + block = from_hex(CBlock(), node.getblock("%064x" % block_hash, False)) for tx in block.vtx: tx.calc_sha256() block.rehash() @@ -569,7 +614,7 @@ class CompactBlocksTest(BitcoinTestFramework): current_height = chain_height while (current_height >= chain_height - MAX_GETBLOCKTXN_DEPTH): block_hash = node.getblockhash(current_height) - block = FromHex(CBlock(), node.getblock(block_hash, False)) + block = from_hex(CBlock(), node.getblock(block_hash, False)) msg = msg_getblocktxn() msg.block_txn_request = BlockTransactionsRequest(int(block_hash, 16), []) @@ -672,9 +717,9 @@ class CompactBlocksTest(BitcoinTestFramework): [l.clear_block_announcement() for l in listeners] - # ToHex() won't serialize with witness, but this block has no witnesses - # anyway. TODO: repeat this test with witness tx's to a segwit node. - node.submitblock(ToHex(block)) + # serialize without witness (this block has no witnesses anyway). + # TODO: repeat this test with witness tx's to a segwit node. + node.submitblock(block.serialize().hex()) for l in listeners: l.wait_until(lambda: "cmpctblock" in l.last_message, timeout=30) diff --git a/test/functional/p2p_compactblocks_hb.py b/test/functional/p2p_compactblocks_hb.py new file mode 100755 index 0000000000..a3d30a6f04 --- /dev/null +++ b/test/functional/p2p_compactblocks_hb.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python3 +# Copyright (c) 2021 The Bitcoin Core developers +# Distributed under the MIT software license, see the accompanying +# file COPYING or http://www.opensource.org/licenses/mit-license.php. +"""Test compact blocks HB selection logic.""" + +from test_framework.test_framework import BitcoinTestFramework +from test_framework.util import assert_equal + + +class CompactBlocksConnectionTest(BitcoinTestFramework): + """Test class for verifying selection of HB peer connections.""" + + def set_test_params(self): + self.setup_clean_chain = True + self.num_nodes = 6 + + def peer_info(self, from_node, to_node): + """Query from_node for its getpeerinfo about to_node.""" + for peerinfo in self.nodes[from_node].getpeerinfo(): + if "(testnode%i)" % to_node in peerinfo['subver']: + return peerinfo + return None + + def setup_network(self): + self.setup_nodes() + # Start network with everyone disconnected + self.sync_all() + + def relay_block_through(self, peer): + """Relay a new block through peer peer, and return HB status between 1 and [2,3,4,5].""" + self.connect_nodes(peer, 0) + self.nodes[0].generate(1) + self.sync_blocks() + self.disconnect_nodes(peer, 0) + status_to = [self.peer_info(1, i)['bip152_hb_to'] for i in range(2, 6)] + status_from = [self.peer_info(i, 1)['bip152_hb_from'] for i in range(2, 6)] + assert_equal(status_to, status_from) + return status_to + + def run_test(self): + self.log.info("Testing reserved high-bandwidth mode slot for outbound peer...") + + # Connect everyone to node 0, and mine some blocks to get all nodes out of IBD. + for i in range(1, 6): + self.connect_nodes(i, 0) + self.nodes[0].generate(2) + self.sync_blocks() + for i in range(1, 6): + self.disconnect_nodes(i, 0) + + # Construct network topology: + # - Node 0 is the block producer + # - Node 1 is the "target" node being tested + # - Nodes 2-5 are intermediaries. + # - Node 1 has an outbound connection to node 2 + # - Node 1 has inbound connections from nodes 3-5 + self.connect_nodes(3, 1) + self.connect_nodes(4, 1) + self.connect_nodes(5, 1) + self.connect_nodes(1, 2) + + # Mine blocks subsequently relaying through nodes 3,4,5 (inbound to node 1) + for nodeid in range(3, 6): + status = self.relay_block_through(nodeid) + assert_equal(status, [False, nodeid >= 3, nodeid >= 4, nodeid >= 5]) + + # And again through each. This should not change HB status. + for nodeid in range(3, 6): + status = self.relay_block_through(nodeid) + assert_equal(status, [False, True, True, True]) + + # Now relay one block through peer 2 (outbound from node 1), so it should take HB status + # from one of the inbounds. + status = self.relay_block_through(2) + assert_equal(status[0], True) + assert_equal(sum(status), 3) + + # Now relay again through nodes 3,4,5. Since 2 is outbound, it should remain HB. + for nodeid in range(3, 6): + status = self.relay_block_through(nodeid) + assert status[0] + assert status[nodeid - 2] + assert_equal(sum(status), 3) + + # Reconnect peer 2, and retry. Now the three inbounds should be HB again. + self.disconnect_nodes(1, 2) + self.connect_nodes(1, 2) + for nodeid in range(3, 6): + status = self.relay_block_through(nodeid) + assert not status[0] + assert status[nodeid - 2] + assert_equal(status, [False, True, True, True]) + + +if __name__ == '__main__': + CompactBlocksConnectionTest().main() diff --git a/test/functional/p2p_dos_header_tree.py b/test/functional/p2p_dos_header_tree.py index 2349afa1ee..52a47c9bc2 100755 --- a/test/functional/p2p_dos_header_tree.py +++ b/test/functional/p2p_dos_header_tree.py @@ -6,7 +6,7 @@ from test_framework.messages import ( CBlockHeader, - FromHex, + from_hex, ) from test_framework.p2p import ( P2PInterface, @@ -42,8 +42,8 @@ class RejectLowDifficultyHeadersTest(BitcoinTestFramework): self.headers = [l for l in h_lines if not l.startswith(FORK_PREFIX)] self.headers_fork = [l[len(FORK_PREFIX):] for l in h_lines if l.startswith(FORK_PREFIX)] - self.headers = [FromHex(CBlockHeader(), h) for h in self.headers] - self.headers_fork = [FromHex(CBlockHeader(), h) for h in self.headers_fork] + self.headers = [from_hex(CBlockHeader(), h) for h in self.headers] + self.headers_fork = [from_hex(CBlockHeader(), h) for h in self.headers_fork] self.log.info("Feed all non-fork headers, including and up to the first checkpoint") peer_checkpoint = self.nodes[0].add_p2p_connection(P2PInterface()) diff --git a/test/functional/p2p_eviction.py b/test/functional/p2p_eviction.py index a525996493..35bce7c69e 100755 --- a/test/functional/p2p_eviction.py +++ b/test/functional/p2p_eviction.py @@ -20,7 +20,11 @@ from test_framework.blocktools import ( create_block, create_coinbase, ) -from test_framework.messages import CTransaction, FromHex, msg_pong, msg_tx +from test_framework.messages import ( + msg_pong, + msg_tx, + tx_from_hex, +) from test_framework.p2p import P2PDataStore, P2PInterface from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal @@ -89,7 +93,7 @@ class P2PEvict(BitcoinTestFramework): 'scriptPubKey': prevtx['vout'][0]['scriptPubKey']['hex'], }], )['hex'] - txpeer.send_message(msg_tx(FromHex(CTransaction(), sigtx))) + txpeer.send_message(msg_tx(tx_from_hex(sigtx))) protected_peers.add(current_peer) self.log.info("Create 8 peers and protect them from eviction by having faster pings") diff --git a/test/functional/p2p_invalid_block.py b/test/functional/p2p_invalid_block.py index 483f25f48c..91666d0f08 100755 --- a/test/functional/p2p_invalid_block.py +++ b/test/functional/p2p_invalid_block.py @@ -9,8 +9,11 @@ In this test we connect to one node over p2p, and test block requests: 2) Invalid block with duplicated transaction should be re-requested. 3) Invalid block with bad coinbase value should be rejected and not re-requested. +4) Invalid block due to future timestamp is later accepted when that timestamp +becomes valid. """ import copy +import time from test_framework.blocktools import create_block, create_coinbase, create_tx_with_script from test_framework.messages import COIN @@ -18,6 +21,9 @@ from test_framework.p2p import P2PDataStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal +MAX_FUTURE_BLOCK_TIME = 2 * 60 * 60 + + class InvalidBlockRequestTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 @@ -133,5 +139,18 @@ class InvalidBlockRequestTest(BitcoinTestFramework): self.log.info("Test inflation by duplicating input") peer.send_blocks_and_test([block4], node, success=False, reject_reason='bad-txns-inputs-duplicate') + self.log.info("Test accepting identical block after rejecting it due to a future timestamp.") + t = int(time.time()) + node.setmocktime(t) + # Set block time +1 second past max future validity + block = create_block(tip, create_coinbase(height), t + MAX_FUTURE_BLOCK_TIME + 1) + block.hashMerkleRoot = block.calc_merkle_root() + block.solve() + # Need force_send because the block will get rejected without a getdata otherwise + peer.send_blocks_and_test([block], node, force_send=True, success=False, reject_reason='time-too-new') + node.setmocktime(t + 1) + peer.send_blocks_and_test([block], node, success=True) + + if __name__ == '__main__': InvalidBlockRequestTest().main() diff --git a/test/functional/p2p_permissions.py b/test/functional/p2p_permissions.py index 62652d949d..594a28d662 100755 --- a/test/functional/p2p_permissions.py +++ b/test/functional/p2p_permissions.py @@ -9,9 +9,8 @@ Test that permissions are correctly calculated and applied from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE from test_framework.messages import ( - CTransaction, CTxInWitness, - FromHex, + tx_from_hex, ) from test_framework.p2p import P2PDataStore from test_framework.script import ( @@ -105,8 +104,7 @@ class P2PPermissionsTests(BitcoinTestFramework): p2p_rebroadcast_wallet = self.nodes[1].add_p2p_connection(P2PDataStore()) self.log.debug("Send a tx from the wallet initially") - tx = FromHex( - CTransaction(), + tx = tx_from_hex( self.nodes[0].createrawtransaction( inputs=[{ 'txid': block_op_true['tx'][0], diff --git a/test/functional/p2p_segwit.py b/test/functional/p2p_segwit.py index 9d32c1cb86..95c7aec318 100755 --- a/test/functional/p2p_segwit.py +++ b/test/functional/p2p_segwit.py @@ -40,8 +40,8 @@ from test_framework.messages import ( ser_uint256, ser_vector, sha256, + tx_from_hex, uint256_from_str, - FromHex, ) from test_framework.p2p import ( P2PInterface, @@ -2122,14 +2122,14 @@ class SegWitTest(BitcoinTestFramework): unspent = next(u for u in self.nodes[0].listunspent() if u['spendable'] and u['address'].startswith('bcrt')) raw = self.nodes[0].createrawtransaction([{"txid": unspent['txid'], "vout": unspent['vout']}], {self.nodes[0].getnewaddress(): 1}) - tx = FromHex(CTransaction(), raw) + tx = tx_from_hex(raw) assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, hexstring=serialize_with_bogus_witness(tx).hex(), iswitness=True) with self.nodes[0].assert_debug_log(['Superfluous witness record']): self.test_node.send_and_ping(msg_bogus_tx(tx)) raw = self.nodes[0].signrawtransactionwithwallet(raw) assert raw['complete'] raw = raw['hex'] - tx = FromHex(CTransaction(), raw) + tx = tx_from_hex(raw) assert_raises_rpc_error(-22, "TX decode failed", self.nodes[0].decoderawtransaction, hexstring=serialize_with_bogus_witness(tx).hex(), iswitness=True) with self.nodes[0].assert_debug_log(['Unknown transaction optional data']): self.test_node.send_and_ping(msg_bogus_tx(tx)) diff --git a/test/functional/p2p_tx_download.py b/test/functional/p2p_tx_download.py index 4bf96cb0e6..3e962b4450 100755 --- a/test/functional/p2p_tx_download.py +++ b/test/functional/p2p_tx_download.py @@ -8,13 +8,12 @@ Test transaction download behavior from test_framework.messages import ( CInv, - CTransaction, - FromHex, MSG_TX, MSG_TYPE_MASK, MSG_WTX, msg_inv, msg_notfound, + tx_from_hex, ) from test_framework.p2p import ( P2PInterface, @@ -100,7 +99,7 @@ class TxDownloadTest(BitcoinTestFramework): hexstring=tx, privkeys=[self.nodes[0].get_deterministic_priv_key().key], )['hex'] - ctx = FromHex(CTransaction(), tx) + ctx = tx_from_hex(tx) txid = int(ctx.rehash(), 16) self.log.info( diff --git a/test/functional/rpc_addresses_deprecation.py b/test/functional/rpc_addresses_deprecation.py index bc0559f3b5..ac430f5b39 100755 --- a/test/functional/rpc_addresses_deprecation.py +++ b/test/functional/rpc_addresses_deprecation.py @@ -4,9 +4,9 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test deprecation of reqSigs and addresses RPC fields.""" -from io import BytesIO - -from test_framework.messages import CTransaction +from test_framework.messages import ( + tx_from_hex, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, @@ -35,8 +35,7 @@ class AddressesDeprecationTest(BitcoinTestFramework): signed = node.signrawtransactionwithwallet(raw)['hex'] # This transaction is derived from test/util/data/txcreatemultisig1.json - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(signed))) + tx = tx_from_hex(signed) tx.vout[0].scriptPubKey = hex_str_to_bytes("522102a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff39721021ac43c7ff740014c3b33737ede99c967e4764553d1b2b83db77c83b8715fa72d2102df2089105c77f266fa11a9d33f05c735234075f2e8780824c6b709415f9fb48553ae") tx_signed = node.signrawtransactionwithwallet(tx.serialize().hex())['hex'] txid = node.sendrawtransaction(hexstring=tx_signed, maxfeerate=0) diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py index 00324347ed..90715cae26 100755 --- a/test/functional/rpc_blockchain.py +++ b/test/functional/rpc_blockchain.py @@ -31,7 +31,7 @@ from test_framework.blocktools import ( ) from test_framework.messages import ( CBlockHeader, - FromHex, + from_hex, msg_block, ) from test_framework.p2p import P2PInterface @@ -314,7 +314,7 @@ class BlockchainTest(BitcoinTestFramework): header_hex = node.getblockheader(blockhash=besthash, verbose=False) assert_is_hex_string(header_hex) - header = FromHex(CBlockHeader(), header_hex) + header = from_hex(CBlockHeader(), header_hex) header.calc_sha256() assert_equal(header.hash, besthash) diff --git a/test/functional/rpc_createmultisig.py b/test/functional/rpc_createmultisig.py index af515f3a27..816ec67492 100755 --- a/test/functional/rpc_createmultisig.py +++ b/test/functional/rpc_createmultisig.py @@ -97,6 +97,9 @@ class RpcCreateMultiSigTest(BitcoinTestFramework): sorted_key_desc = descsum_create('sh(multi(2,{}))'.format(sorted_key_str)) assert_equal(self.nodes[0].deriveaddresses(sorted_key_desc)[0], t['address']) + # Check that bech32m is currently not allowed + assert_raises_rpc_error(-5, "createmultisig cannot create bech32m multisig addresses", self.nodes[0].createmultisig, 2, self.pub, "bech32m") + def check_addmultisigaddress_errors(self): if self.options.descriptors: return @@ -108,6 +111,10 @@ class RpcCreateMultiSigTest(BitcoinTestFramework): self.nodes[0].importaddress(a) assert_raises_rpc_error(-5, 'no full public key for address', lambda: self.nodes[0].addmultisigaddress(nrequired=1, keys=addresses)) + # Bech32m address type is disallowed for legacy wallets + pubs = [self.nodes[1].getaddressinfo(addr)["pubkey"] for addr in addresses] + assert_raises_rpc_error(-5, "Bech32m multisig addresses cannot be created with legacy wallets", self.nodes[0].addmultisigaddress, 2, pubs, "", "bech32m") + def checkbalances(self): node0, node1, node2 = self.nodes node0.generate(COINBASE_MATURITY) diff --git a/test/functional/rpc_decodescript.py b/test/functional/rpc_decodescript.py index 01b8cb1854..f6643c7167 100755 --- a/test/functional/rpc_decodescript.py +++ b/test/functional/rpc_decodescript.py @@ -4,11 +4,16 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test decoding scripts via decodescript RPC command.""" -from test_framework.messages import CTransaction, sha256 +from test_framework.messages import ( + sha256, + tx_from_hex, +) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, hex_str_to_bytes +from test_framework.util import ( + assert_equal, + hex_str_to_bytes, +) -from io import BytesIO class DecodeScriptTest(BitcoinTestFramework): def set_test_params(self): @@ -179,8 +184,7 @@ class DecodeScriptTest(BitcoinTestFramework): assert_equal('0 3045022100ae3b4e589dfc9d48cb82d41008dc5fa6a86f94d5c54f9935531924602730ab8002202f88cf464414c4ed9fa11b773c5ee944f66e9b05cc1e51d97abc22ce098937ea[ALL] 3045022100b44883be035600e9328a01b66c7d8439b74db64187e76b99a68f7893b701d5380220225bf286493e4c4adcf928c40f785422572eb232f84a0b83b0dea823c3a19c75[ALL] 5221020743d44be989540d27b1b4bbbcfd17721c337cb6bc9af20eb8a32520b393532f2102c0120a1dda9e51a938d39ddd9fe0ebc45ea97e1d27a7cbd671d5431416d3dd87210213820eb3d5f509d7438c9eeecb4157b2f595105e7cd564b3cdbb9ead3da41eed53ae', rpc_result['vin'][0]['scriptSig']['asm']) assert_equal('OP_DUP OP_HASH160 dc863734a218bfe83ef770ee9d41a27f824a6e56 OP_EQUALVERIFY OP_CHECKSIG', rpc_result['vout'][0]['scriptPubKey']['asm']) assert_equal('OP_HASH160 2a5edea39971049a540474c6a99edf0aa4074c58 OP_EQUAL', rpc_result['vout'][1]['scriptPubKey']['asm']) - txSave = CTransaction() - txSave.deserialize(BytesIO(hex_str_to_bytes(tx))) + txSave = tx_from_hex(tx) # make sure that a specifically crafted op_return value will not pass all the IsDERSignature checks and then get decoded as a sighash type tx = '01000000015ded05872fdbda629c7d3d02b194763ce3b9b1535ea884e3c8e765d42e316724020000006b48304502204c10d4064885c42638cbff3585915b322de33762598321145ba033fc796971e2022100bb153ad3baa8b757e30a2175bd32852d2e1cb9080f84d7e32fcdfd667934ef1b012103163c0ff73511ea1743fb5b98384a2ff09dd06949488028fd819f4d83f56264efffffffff0200000000000000000b6a0930060201000201000180380100000000001976a9141cabd296e753837c086da7a45a6c2fe0d49d7b7b88ac00000000' diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py index 4b07a32c54..fa98c44152 100755 --- a/test/functional/rpc_fundrawtransaction.py +++ b/test/functional/rpc_fundrawtransaction.py @@ -551,7 +551,7 @@ class RawTransactionsTest(BitcoinTestFramework): # creating the key must be impossible because the wallet is locked outputs = {self.nodes[0].getnewaddress():1.1} rawtx = self.nodes[1].createrawtransaction(inputs, outputs) - assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it. Please call keypoolrefill first.", self.nodes[1].fundrawtransaction, rawtx) + assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it.", self.nodes[1].fundrawtransaction, rawtx) # Refill the keypool. self.nodes[1].walletpassphrase("test", 100) diff --git a/test/functional/rpc_packages.py b/test/functional/rpc_packages.py index 3d8d81d6b8..4b2ed20958 100755 --- a/test/functional/rpc_packages.py +++ b/test/functional/rpc_packages.py @@ -5,7 +5,6 @@ """RPCs that handle raw transaction packages.""" from decimal import Decimal -from io import BytesIO import random from test_framework.address import ADDRESS_BCRT1_P2WSH_OP_TRUE @@ -13,8 +12,8 @@ from test_framework.test_framework import BitcoinTestFramework from test_framework.messages import ( BIP125_SEQUENCE_NUMBER, COIN, - CTransaction, CTxInWitness, + tx_from_hex, ) from test_framework.script import ( CScript, @@ -22,7 +21,6 @@ from test_framework.script import ( ) from test_framework.util import ( assert_equal, - hex_str_to_bytes, ) class RPCPackagesTest(BitcoinTestFramework): @@ -97,9 +95,8 @@ class RPCPackagesTest(BitcoinTestFramework): "amount": parent_value, }] if parent_locking_script else None signedtx = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys, prevtxs=prevtxs) - tx = CTransaction() assert signedtx["complete"] - tx.deserialize(BytesIO(hex_str_to_bytes(signedtx["hex"]))) + tx = tx_from_hex(signedtx["hex"]) return (tx, signedtx["hex"], my_value, tx.vout[0].scriptPubKey.hex()) def test_independent(self): @@ -110,8 +107,7 @@ class RPCPackagesTest(BitcoinTestFramework): self.log.info("Test an otherwise valid package with an extra garbage tx appended") garbage_tx = node.createrawtransaction([{"txid": "00" * 32, "vout": 5}], {self.address: 1}) - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(garbage_tx))) + tx = tx_from_hex(garbage_tx) # Only the txid and wtxids are returned because validation is incomplete for the independent txns. # Package validation is atomic: if the node cannot find a UTXO for any single tx in the package, # it terminates immediately to avoid unnecessary, expensive signature verification. @@ -123,8 +119,7 @@ class RPCPackagesTest(BitcoinTestFramework): coin = self.coins.pop() tx_bad_sig_hex = node.createrawtransaction([{"txid": coin["txid"], "vout": 0}], {self.address : coin["amount"] - Decimal("0.0001")}) - tx_bad_sig = CTransaction() - tx_bad_sig.deserialize(BytesIO(hex_str_to_bytes(tx_bad_sig_hex))) + tx_bad_sig = tx_from_hex(tx_bad_sig_hex) testres_bad_sig = node.testmempoolaccept(self.independent_txns_hex + [tx_bad_sig_hex]) # By the time the signature for the last transaction is checked, all the other transactions # have been fully validated, which is why the node returns full validation results for all @@ -141,8 +136,7 @@ class RPCPackagesTest(BitcoinTestFramework): {self.address : coin["amount"] - Decimal("0.999")}) tx_high_fee_signed = node.signrawtransactionwithkey(hexstring=tx_high_fee_raw, privkeys=self.privkeys) assert tx_high_fee_signed["complete"] - tx_high_fee = CTransaction() - tx_high_fee.deserialize(BytesIO(hex_str_to_bytes(tx_high_fee_signed["hex"]))) + tx_high_fee = tx_from_hex(tx_high_fee_signed["hex"]) testres_high_fee = node.testmempoolaccept([tx_high_fee_signed["hex"]]) assert_equal(testres_high_fee, [ {"txid": tx_high_fee.rehash(), "wtxid": tx_high_fee.getwtxid(), "allowed": False, "reject-reason": "max-fee-exceeded"} @@ -198,9 +192,8 @@ class RPCPackagesTest(BitcoinTestFramework): rawtx = node.createrawtransaction(inputs, outputs) parent_signed = node.signrawtransactionwithkey(hexstring=rawtx, privkeys=self.privkeys) - parent_tx = CTransaction() assert parent_signed["complete"] - parent_tx.deserialize(BytesIO(hex_str_to_bytes(parent_signed["hex"]))) + parent_tx = tx_from_hex(parent_signed["hex"]) parent_txid = parent_tx.rehash() assert node.testmempoolaccept([parent_signed["hex"]])[0]["allowed"] @@ -213,8 +206,7 @@ class RPCPackagesTest(BitcoinTestFramework): # Child B rawtx_b = node.createrawtransaction([{"txid": parent_txid, "vout": 1}], {self.address : child_value}) - tx_child_b = CTransaction() - tx_child_b.deserialize(BytesIO(hex_str_to_bytes(rawtx_b))) + tx_child_b = tx_from_hex(rawtx_b) tx_child_b.wit.vtxinwit = [CTxInWitness()] tx_child_b.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE])] tx_child_b_hex = tx_child_b.serialize().hex() @@ -293,10 +285,8 @@ class RPCPackagesTest(BitcoinTestFramework): rawtx2 = node.createrawtransaction(inputs, output2) signedtx1 = node.signrawtransactionwithkey(hexstring=rawtx1, privkeys=self.privkeys) signedtx2 = node.signrawtransactionwithkey(hexstring=rawtx2, privkeys=self.privkeys) - tx1 = CTransaction() - tx1.deserialize(BytesIO(hex_str_to_bytes(signedtx1["hex"]))) - tx2 = CTransaction() - tx2.deserialize(BytesIO(hex_str_to_bytes(signedtx2["hex"]))) + tx1 = tx_from_hex(signedtx1["hex"]) + tx2 = tx_from_hex(signedtx2["hex"]) assert signedtx1["complete"] assert signedtx2["complete"] @@ -327,19 +317,17 @@ class RPCPackagesTest(BitcoinTestFramework): raw_replaceable_tx = node.createrawtransaction(inputs, output) signed_replaceable_tx = node.signrawtransactionwithkey(hexstring=raw_replaceable_tx, privkeys=self.privkeys) testres_replaceable = node.testmempoolaccept([signed_replaceable_tx["hex"]]) - replaceable_tx = CTransaction() - replaceable_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replaceable_tx["hex"]))) + replaceable_tx = tx_from_hex(signed_replaceable_tx["hex"]) assert_equal(testres_replaceable, [ {"txid": replaceable_tx.rehash(), "wtxid": replaceable_tx.getwtxid(), "allowed": True, "vsize": replaceable_tx.get_vsize(), "fees": { "base": fee }} ]) # Replacement transaction is identical except has double the fee - replacement_tx = CTransaction() - replacement_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replaceable_tx["hex"]))) + replacement_tx = tx_from_hex(signed_replaceable_tx["hex"]) replacement_tx.vout[0].nValue -= int(fee * COIN) # Doubled fee signed_replacement_tx = node.signrawtransactionwithkey(replacement_tx.serialize().hex(), self.privkeys) - replacement_tx.deserialize(BytesIO(hex_str_to_bytes(signed_replacement_tx["hex"]))) + replacement_tx = tx_from_hex(signed_replacement_tx["hex"]) self.log.info("Test that transactions within a package cannot replace each other") testres_rbf_conflicting = node.testmempoolaccept([signed_replaceable_tx["hex"], signed_replacement_tx["hex"]]) @@ -354,7 +342,8 @@ class RPCPackagesTest(BitcoinTestFramework): # This transaction is a valid BIP125 replace-by-fee assert testres_rbf_single[0]["allowed"] testres_rbf_package = self.independent_txns_testres_blank + [{ - "txid": replacement_tx.rehash(), "wtxid": replacement_tx.getwtxid(), "allowed": False, "reject-reason": "txn-mempool-conflict" + "txid": replacement_tx.rehash(), "wtxid": replacement_tx.getwtxid(), "allowed": False, + "reject-reason": "bip125-replacement-disallowed" }] self.assert_testres_equal(self.independent_txns_hex + [signed_replacement_tx["hex"]], testres_rbf_package) diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py index 53ddf24e47..db57368eae 100755 --- a/test/functional/rpc_rawtransaction.py +++ b/test/functional/rpc_rawtransaction.py @@ -14,16 +14,17 @@ Test the following RPCs: from collections import OrderedDict from decimal import Decimal -from io import BytesIO from test_framework.blocktools import COINBASE_MATURITY -from test_framework.messages import CTransaction, ToHex +from test_framework.messages import ( + CTransaction, + tx_from_hex, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_raises_rpc_error, find_vout_for_address, - hex_str_to_bytes, ) @@ -127,23 +128,22 @@ class RawTransactionsTest(BitcoinTestFramework): assert_raises_rpc_error(-3, "Expected type bool", self.nodes[0].createrawtransaction, [], {}, 0, 'foo') self.log.info('Check that createrawtransaction accepts an array and object as outputs') - tx = CTransaction() # One output - tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99})))) + tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs={address: 99})) assert_equal(len(tx.vout), 1) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}]), ) # Two outputs - tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)]))))) + tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=OrderedDict([(address, 99), (address2, 99)]))) assert_equal(len(tx.vout), 2) assert_equal( tx.serialize().hex(), self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=[{address: 99}, {address2: 99}]), ) # Multiple mixed outputs - tx.deserialize(BytesIO(hex_str_to_bytes(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')]))))) + tx = tx_from_hex(self.nodes[2].createrawtransaction(inputs=[{'txid': txid, 'vout': 9}], outputs=multidict([(address, 99), (address2, 99), ('data', '99')]))) assert_equal(len(tx.vout), 3) assert_equal( tx.serialize().hex(), @@ -450,14 +450,14 @@ class RawTransactionsTest(BitcoinTestFramework): # As transaction version is unsigned, this should convert to its unsigned equivalent. tx = CTransaction() tx.nVersion = -0x80000000 - rawtx = ToHex(tx) + rawtx = tx.serialize().hex() decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['version'], 0x80000000) # Test the maximum transaction version number that fits in a signed 32-bit integer. tx = CTransaction() tx.nVersion = 0x7fffffff - rawtx = ToHex(tx) + rawtx = tx.serialize().hex() decrawtx = self.nodes[0].decoderawtransaction(rawtx) assert_equal(decrawtx['version'], 0x7fffffff) diff --git a/test/functional/rpc_setban.py b/test/functional/rpc_setban.py index fd5f8aa098..36873f964b 100755 --- a/test/functional/rpc_setban.py +++ b/test/functional/rpc_setban.py @@ -22,7 +22,7 @@ class SetBanTests(BitcoinTestFramework): # Node 0 connects to Node 1, check that the noban permission is not granted self.connect_nodes(0, 1) peerinfo = self.nodes[1].getpeerinfo()[0] - assert(not 'noban' in peerinfo['permissions']) + assert not "noban" in peerinfo["permissions"] # Node 0 get banned by Node 1 self.nodes[1].setban("127.0.0.1", "add") @@ -36,27 +36,40 @@ class SetBanTests(BitcoinTestFramework): self.restart_node(1, ['-whitelist=127.0.0.1']) self.connect_nodes(0, 1) peerinfo = self.nodes[1].getpeerinfo()[0] - assert('noban' in peerinfo['permissions']) + assert "noban" in peerinfo["permissions"] # If we remove the ban, Node 0 should be able to reconnect even without noban permission self.nodes[1].setban("127.0.0.1", "remove") self.restart_node(1, []) self.connect_nodes(0, 1) peerinfo = self.nodes[1].getpeerinfo()[0] - assert(not 'noban' in peerinfo['permissions']) + assert not "noban" in peerinfo["permissions"] self.log.info("Test that a non-IP address can be banned/unbanned") node = self.nodes[1] tor_addr = "pg6mmjiyjmcrsslvykfwnntlaru7p5svn6y2ymmju6nubxndf4pscryd.onion" ip_addr = "1.2.3.4" - assert(not self.is_banned(node, tor_addr)) - assert(not self.is_banned(node, ip_addr)) + assert not self.is_banned(node, tor_addr) + assert not self.is_banned(node, ip_addr) + node.setban(tor_addr, "add") - assert(self.is_banned(node, tor_addr)) - assert(not self.is_banned(node, ip_addr)) + assert self.is_banned(node, tor_addr) + assert not self.is_banned(node, ip_addr) + + self.log.info("Test the ban list is preserved through restart") + + self.restart_node(1) + assert self.is_banned(node, tor_addr) + assert not self.is_banned(node, ip_addr) + node.setban(tor_addr, "remove") - assert(not self.is_banned(self.nodes[1], tor_addr)) - assert(not self.is_banned(node, ip_addr)) + assert not self.is_banned(self.nodes[1], tor_addr) + assert not self.is_banned(node, ip_addr) + + self.restart_node(1) + assert not self.is_banned(node, tor_addr) + assert not self.is_banned(node, ip_addr) + if __name__ == '__main__': SetBanTests().main() diff --git a/test/functional/rpc_signrawtransaction.py b/test/functional/rpc_signrawtransaction.py index 16b0019866..ef5d08e7b9 100755 --- a/test/functional/rpc_signrawtransaction.py +++ b/test/functional/rpc_signrawtransaction.py @@ -5,17 +5,44 @@ """Test transaction signing using the signrawtransaction* RPCs.""" from test_framework.blocktools import COINBASE_MATURITY -from test_framework.address import check_script, script_to_p2sh, script_to_p2wsh +from test_framework.address import ( + check_script, + script_to_p2sh, + script_to_p2wsh, +) from test_framework.key import ECKey from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_raises_rpc_error, find_vout_for_address, hex_str_to_bytes -from test_framework.messages import sha256, CTransaction, CTxInWitness -from test_framework.script import CScript, OP_0, OP_CHECKSIG, OP_CHECKSEQUENCEVERIFY, OP_CHECKLOCKTIMEVERIFY, OP_DROP, OP_TRUE -from test_framework.script_util import key_to_p2pkh_script, script_to_p2sh_p2wsh_script, script_to_p2wsh_script +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, + find_vout_for_address, + hex_str_to_bytes, +) +from test_framework.messages import ( + CTxInWitness, + sha256, + tx_from_hex, +) +from test_framework.script import ( + CScript, + OP_0, + OP_CHECKLOCKTIMEVERIFY, + OP_CHECKSIG, + OP_CHECKSEQUENCEVERIFY, + OP_DROP, + OP_TRUE, +) +from test_framework.script_util import ( + key_to_p2pkh_script, + script_to_p2sh_p2wsh_script, + script_to_p2wsh_script, +) from test_framework.wallet_util import bytes_to_wif -from decimal import Decimal, getcontext -from io import BytesIO +from decimal import ( + Decimal, + getcontext, +) class SignRawTransactionsTest(BitcoinTestFramework): def set_test_params(self): @@ -265,8 +292,7 @@ class SignRawTransactionsTest(BitcoinTestFramework): ) # Set the witness script - ctx = CTransaction() - ctx.deserialize(BytesIO(hex_str_to_bytes(tx))) + ctx = tx_from_hex(tx) ctx.wit.vtxinwit.append(CTxInWitness()) ctx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), script] tx = ctx.serialize_with_witness().hex() @@ -301,8 +327,7 @@ class SignRawTransactionsTest(BitcoinTestFramework): ) # Set the witness script - ctx = CTransaction() - ctx.deserialize(BytesIO(hex_str_to_bytes(tx))) + ctx = tx_from_hex(tx) ctx.wit.vtxinwit.append(CTxInWitness()) ctx.wit.vtxinwit[0].scriptWitness.stack = [CScript([OP_TRUE]), script] tx = ctx.serialize_with_witness().hex() diff --git a/test/functional/rpc_txoutproof.py b/test/functional/rpc_txoutproof.py index bf96b6353c..67af6b8f8e 100755 --- a/test/functional/rpc_txoutproof.py +++ b/test/functional/rpc_txoutproof.py @@ -5,9 +5,15 @@ """Test gettxoutproof and verifytxoutproof RPCs.""" from test_framework.blocktools import COINBASE_MATURITY -from test_framework.messages import CMerkleBlock, FromHex, ToHex +from test_framework.messages import ( + CMerkleBlock, + from_hex, +) from test_framework.test_framework import BitcoinTestFramework -from test_framework.util import assert_equal, assert_raises_rpc_error +from test_framework.util import ( + assert_equal, + assert_raises_rpc_error, +) from test_framework.wallet import MiniWallet @@ -88,10 +94,10 @@ class MerkleBlockTest(BitcoinTestFramework): assert txid1 in self.nodes[0].verifytxoutproof(proof) assert txid2 in self.nodes[1].verifytxoutproof(proof) - tweaked_proof = FromHex(CMerkleBlock(), proof) + tweaked_proof = from_hex(CMerkleBlock(), proof) # Make sure that our serialization/deserialization is working - assert txid1 in self.nodes[0].verifytxoutproof(ToHex(tweaked_proof)) + assert txid1 in self.nodes[0].verifytxoutproof(tweaked_proof.serialize().hex()) # Check to see if we can go up the merkle tree and pass this off as a # single-transaction block @@ -100,7 +106,7 @@ class MerkleBlockTest(BitcoinTestFramework): tweaked_proof.txn.vBits = [True] + [False]*7 for n in self.nodes: - assert not n.verifytxoutproof(ToHex(tweaked_proof)) + assert not n.verifytxoutproof(tweaked_proof.serialize().hex()) # TODO: try more variants, eg transactions at different depths, and # verify that the proofs are invalid diff --git a/test/functional/test_framework/blocktools.py b/test/functional/test_framework/blocktools.py index f35ea6c122..bc43438810 100644 --- a/test/functional/test_framework/blocktools.py +++ b/test/functional/test_framework/blocktools.py @@ -23,12 +23,11 @@ from .messages import ( CTxIn, CTxInWitness, CTxOut, - FromHex, - ToHex, hash256, hex_str_to_bytes, ser_uint256, sha256, + tx_from_hex, uint256_from_str, ) from .script import ( @@ -79,7 +78,7 @@ def create_block(hashprev=None, coinbase=None, ntime=None, *, version=None, tmpl if txlist: for tx in txlist: if not hasattr(tx, 'calc_sha256'): - tx = FromHex(CTransaction(), tx) + tx = tx_from_hex(tx) block.vtx.append(tx) block.hashMerkleRoot = block.calc_merkle_root() block.calc_sha256() @@ -166,7 +165,7 @@ def create_transaction(node, txid, to_address, *, amount): sign for the output that is being spent. """ raw_tx = create_raw_transaction(node, txid, to_address, amount=amount) - tx = FromHex(CTransaction(), raw_tx) + tx = tx_from_hex(raw_tx) return tx def create_raw_transaction(node, txid, to_address, *, amount): @@ -181,11 +180,6 @@ def create_raw_transaction(node, txid, to_address, *, amount): signed_psbt = wrpc.walletprocesspsbt(psbt) psbt = signed_psbt['psbt'] final_psbt = node.finalizepsbt(psbt) - if not final_psbt["complete"]: - node.log.info(f'final_psbt={final_psbt}') - for w in node.listwallets(): - wrpc = node.get_wallet_rpc(w) - node.log.info(f'listunspent={wrpc.listunspent()}') assert_equal(final_psbt["complete"], True) return final_psbt['hex'] @@ -248,9 +242,9 @@ def send_to_witness(use_p2wsh, node, utxo, pubkey, encode_p2sh, amount, sign=Tru return node.sendrawtransaction(signed["hex"]) else: if (insert_redeem_script): - tx = FromHex(CTransaction(), tx_to_witness) + tx = tx_from_hex(tx_to_witness) tx.vin[0].scriptSig += CScript([hex_str_to_bytes(insert_redeem_script)]) - tx_to_witness = ToHex(tx) + tx_to_witness = tx.serialize().hex() return node.sendrawtransaction(tx_to_witness) diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py index 5a9736a7a3..504c8c70d4 100755 --- a/test/functional/test_framework/messages.py +++ b/test/functional/test_framework/messages.py @@ -39,7 +39,7 @@ MAX_BLOOM_HASH_FUNCS = 50 COIN = 100000000 # 1 btc in satoshis MAX_MONEY = 21000000 * COIN -BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is BIP 125 opt-in and BIP 68-opt-out +BIP125_SEQUENCE_NUMBER = 0xfffffffd # Sequence number that is rbf-opt-in (BIP 125) and csv-opt-out (BIP 68) MAX_PROTOCOL_MESSAGE_LENGTH = 4000000 # Maximum length of incoming protocol messages MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result @@ -190,14 +190,20 @@ def ser_string_vector(l): return r -# Deserialize from a hex string representation (eg from RPC) -def FromHex(obj, hex_string): +def from_hex(obj, hex_string): + """Deserialize from a hex string representation (e.g. from RPC) + + Note that there is no complementary helper like e.g. `to_hex` for the + inverse operation. To serialize a message object to a hex string, simply + use obj.serialize().hex()""" obj.deserialize(BytesIO(hex_str_to_bytes(hex_string))) return obj -# Convert a binary-serializable object to hex (eg for submission via RPC) -def ToHex(obj): - return obj.serialize().hex() + +def tx_from_hex(hex_string): + """Deserialize from hex string to a transaction object""" + return from_hex(CTransaction(), hex_string) + # Objects that map to bitcoind objects, which can be serialized/deserialized diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py index a89a26caea..40360c54a0 100755 --- a/test/functional/test_framework/test_framework.py +++ b/test/functional/test_framework/test_framework.py @@ -194,6 +194,10 @@ class BitcoinTestFramework(metaclass=BitcoinTestMetaClass): help="Run test using legacy wallets", dest='descriptors') self.add_options(parser) + # Running TestShell in a Jupyter notebook causes an additional -f argument + # To keep TestShell from failing with an "unrecognized argument" error, we add a dummy "-f" argument + # source: https://stackoverflow.com/questions/48796169/how-to-fix-ipykernel-launcher-py-error-unrecognized-arguments-in-jupyter/56349168#56349168 + parser.add_argument("-f", "--fff", help="a dummy argument to fool ipython", default="1") self.options = parser.parse_args() self.options.previous_releases_path = previous_releases_path diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py index 462019566c..35dbfbba8d 100644 --- a/test/functional/test_framework/util.py +++ b/test/functional/test_framework/util.py @@ -19,7 +19,6 @@ import unittest from . import coverage from .authproxy import AuthServiceProxy, JSONRPCException -from io import BytesIO from typing import Callable, Optional logger = logging.getLogger("TestFramework.utils") @@ -481,6 +480,28 @@ def create_confirmed_utxos(fee, node, count): return utxos +def chain_transaction(node, parent_txids, vouts, value, fee, num_outputs): + """Build and send a transaction that spends the given inputs (specified + by lists of parent_txid:vout each), with the desired total value and fee, + equally divided up to the desired number of outputs. + + Returns a tuple with the txid and the amount sent per output. + """ + send_value = satoshi_round((value - fee)/num_outputs) + inputs = [] + for (txid, vout) in zip(parent_txids, vouts): + inputs.append({'txid' : txid, 'vout' : vout}) + outputs = {} + for _ in range(num_outputs): + outputs[node.getnewaddress()] = send_value + rawtx = node.createrawtransaction(inputs, outputs, 0, True) + signedtx = node.signrawtransactionwithwallet(rawtx) + txid = node.sendrawtransaction(signedtx['hex']) + fulltx = node.getrawtransaction(txid, 1) + assert len(fulltx['vout']) == num_outputs # make sure we didn't generate a change output + return (txid, send_value) + + # Create large OP_RETURN txouts that can be appended to a transaction # to make it large (helper for constructing large transactions). def gen_return_txouts(): @@ -506,7 +527,7 @@ def gen_return_txouts(): def create_lots_of_big_transactions(node, txouts, utxos, num, fee): addr = node.getnewaddress() txids = [] - from .messages import CTransaction + from .messages import tx_from_hex for _ in range(num): t = utxos.pop() inputs = [{"txid": t["txid"], "vout": t["vout"]}] @@ -514,8 +535,7 @@ def create_lots_of_big_transactions(node, txouts, utxos, num, fee): change = t['amount'] - fee outputs[addr] = satoshi_round(change) rawtx = node.createrawtransaction(inputs, outputs) - tx = CTransaction() - tx.deserialize(BytesIO(hex_str_to_bytes(rawtx))) + tx = tx_from_hex(rawtx) for txout in txouts: tx.vout.append(txout) newtx = tx.serialize().hex() diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py index 0962a5cb54..47ec6b0be2 100644 --- a/test/functional/test_framework/wallet.py +++ b/test/functional/test_framework/wallet.py @@ -88,12 +88,20 @@ class MiniWallet: if out['scriptPubKey']['hex'] == self._scriptPubKey.hex(): self._utxos.append({'txid': tx['txid'], 'vout': out['n'], 'value': out['value']}) - def sign_tx(self, tx): + def sign_tx(self, tx, fixed_length=True): """Sign tx that has been created by MiniWallet in P2PK mode""" assert self._priv_key is not None (sighash, err) = LegacySignatureHash(CScript(self._scriptPubKey), tx, 0, SIGHASH_ALL) assert err is None - tx.vin[0].scriptSig = CScript([self._priv_key.sign_ecdsa(sighash) + bytes(bytearray([SIGHASH_ALL]))]) + # for exact fee calculation, create only signatures with fixed size by default (>49.89% probability): + # 65 bytes: high-R val (33 bytes) + low-S val (32 bytes) + # with the DER header/skeleton data of 6 bytes added, this leads to a target size of 71 bytes + der_sig = b'' + while not len(der_sig) == 71: + der_sig = self._priv_key.sign_ecdsa(sighash) + if not fixed_length: + break + tx.vin[0].scriptSig = CScript([der_sig + bytes(bytearray([SIGHASH_ALL]))]) def generate(self, num_blocks): """Generate blocks with coinbase outputs to the internal address, and append the outputs to the internal list""" @@ -124,23 +132,26 @@ class MiniWallet: else: return self._utxos[index] - def send_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node, utxo_to_spend=None, locktime=0): + def send_self_transfer(self, **kwargs): """Create and send a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed.""" - tx = self.create_self_transfer(fee_rate=fee_rate, from_node=from_node, utxo_to_spend=utxo_to_spend) - self.sendrawtransaction(from_node=from_node, tx_hex=tx['hex']) + tx = self.create_self_transfer(**kwargs) + self.sendrawtransaction(from_node=kwargs['from_node'], tx_hex=tx['hex']) return tx - def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node, utxo_to_spend=None, mempool_valid=True, locktime=0): + def create_self_transfer(self, *, fee_rate=Decimal("0.003"), from_node, utxo_to_spend=None, mempool_valid=True, locktime=0, sequence=0): """Create and return a tx with the specified fee_rate. Fee may be exact or at most one satoshi higher than needed.""" self._utxos = sorted(self._utxos, key=lambda k: k['value']) utxo_to_spend = utxo_to_spend or self._utxos.pop() # Pick the largest utxo (if none provided) and hope it covers the fee - vsize = Decimal(96) + if self._priv_key is None: + vsize = Decimal(96) # anyone-can-spend + else: + vsize = Decimal(168) # P2PK (73 bytes scriptSig + 35 bytes scriptPubKey + 60 bytes other) send_value = satoshi_round(utxo_to_spend['value'] - fee_rate * (vsize / 1000)) fee = utxo_to_spend['value'] - send_value assert send_value > 0 tx = CTransaction() - tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']))] + tx.vin = [CTxIn(COutPoint(int(utxo_to_spend['txid'], 16), utxo_to_spend['vout']), nSequence=sequence)] tx.vout = [CTxOut(int(send_value * COIN), self._scriptPubKey)] tx.nLockTime = locktime if not self._address: @@ -159,10 +170,7 @@ class MiniWallet: tx_info = from_node.testmempoolaccept([tx_hex])[0] assert_equal(mempool_valid, tx_info['allowed']) if mempool_valid: - # TODO: for P2PK, vsize is not constant due to varying scriptSig length, - # so only check this for anyone-can-spend outputs right now - if self._priv_key is None: - assert_equal(tx_info['vsize'], vsize) + assert_equal(tx_info['vsize'], vsize) assert_equal(tx_info['fees']['base'], fee) return {'txid': tx_info['txid'], 'wtxid': tx_info['wtxid'], 'hex': tx_hex, 'tx': tx} diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py index c9a8cc5611..ad1acd2e2f 100755 --- a/test/functional/test_runner.py +++ b/test/functional/test_runner.py @@ -19,6 +19,7 @@ import datetime import os import time import shutil +import signal import subprocess import sys import tempfile @@ -173,6 +174,7 @@ BASE_SCRIPTS = [ 'wallet_groups.py --legacy-wallet', 'p2p_addrv2_relay.py', 'wallet_groups.py --descriptors', + 'p2p_compactblocks_hb.py', 'p2p_disconnect_ban.py', 'rpc_decodescript.py', 'rpc_blockchain.py', @@ -548,9 +550,11 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage= all_passed = all(map(lambda test_result: test_result.was_successful, test_results)) and coverage_passed - # This will be a no-op unless failfast is True in which case there may be dangling - # processes which need to be killed. - job_queue.kill_and_join() + # Clean up dangling processes if any. This may only happen with --failfast option. + # Killing the process group will also terminate the current process but that is + # not an issue + if len(job_queue.jobs): + os.killpg(os.getpgid(0), signal.SIGKILL) sys.exit(not all_passed) @@ -647,16 +651,6 @@ class TestHandler: print('.', end='', flush=True) dot_count += 1 - def kill_and_join(self): - """Send SIGKILL to all jobs and block until all have ended.""" - procs = [i[2] for i in self.jobs] - - for proc in procs: - proc.kill() - - for proc in procs: - proc.wait() - class TestResult(): def __init__(self, name, status, time): diff --git a/test/functional/wallet_address_types.py b/test/functional/wallet_address_types.py index 6d93cf412f..9b97d08424 100755 --- a/test/functional/wallet_address_types.py +++ b/test/functional/wallet_address_types.py @@ -373,5 +373,15 @@ class AddressTypeTest(BitcoinTestFramework): self.test_address(4, self.nodes[4].getrawchangeaddress(), multisig=False, typ='p2sh-segwit') self.test_address(4, self.nodes[4].getrawchangeaddress('bech32'), multisig=False, typ='bech32') + if self.options.descriptors: + self.log.info("Descriptor wallets do not have bech32m addresses by default yet") + # TODO: Remove this when they do + assert_raises_rpc_error(-12, "Error: No bech32m addresses available", self.nodes[0].getnewaddress, "", "bech32m") + assert_raises_rpc_error(-12, "Error: No bech32m addresses available", self.nodes[0].getrawchangeaddress, "bech32m") + else: + self.log.info("Legacy wallets cannot make bech32m addresses") + assert_raises_rpc_error(-8, "Legacy wallets cannot provide bech32m addresses", self.nodes[0].getnewaddress, "", "bech32m") + assert_raises_rpc_error(-8, "Legacy wallets cannot provide bech32m addresses", self.nodes[0].getrawchangeaddress, "bech32m") + if __name__ == '__main__': AddressTypeTest().main() diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py index a052ec7477..b5afc3785e 100755 --- a/test/functional/wallet_basic.py +++ b/test/functional/wallet_basic.py @@ -420,6 +420,9 @@ class WalletTest(BitcoinTestFramework): # This will raise an exception for importing an invalid pubkey assert_raises_rpc_error(-5, "Pubkey is not a valid public key", self.nodes[0].importpubkey, "5361746f736869204e616b616d6f746f") + # Bech32m addresses cannot be imported into a legacy wallet + assert_raises_rpc_error(-5, "Bech32m addresses cannot be imported into legacy wallets", self.nodes[0].importaddress, "bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6") + # Import address and private key to check correct behavior of spendable unspents # 1. Send some coins to generate new UTXO address_to_import = self.nodes[2].getnewaddress() diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py index b21461ee7b..c04986038d 100755 --- a/test/functional/wallet_bumpfee.py +++ b/test/functional/wallet_bumpfee.py @@ -14,17 +14,23 @@ added in the future, they should try to follow the same convention and not make assumptions about execution order. """ from decimal import Decimal -import io -from test_framework.blocktools import COINBASE_MATURITY -from test_framework.blocktools import add_witness_commitment, create_block, create_coinbase, send_to_witness -from test_framework.messages import BIP125_SEQUENCE_NUMBER, CTransaction +from test_framework.blocktools import ( + COINBASE_MATURITY, + add_witness_commitment, + create_block, + create_coinbase, + send_to_witness, +) +from test_framework.messages import ( + BIP125_SEQUENCE_NUMBER, + tx_from_hex, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, assert_greater_than, assert_raises_rpc_error, - hex_str_to_bytes, ) WALLET_PASSPHRASE = "test" @@ -576,9 +582,7 @@ def spend_one_input(node, dest_address, change_size=Decimal("0.00049000")): def submit_block_with_tx(node, tx): - ctx = CTransaction() - ctx.deserialize(io.BytesIO(hex_str_to_bytes(tx))) - + ctx = tx_from_hex(tx) tip = node.getbestblockhash() height = node.getblockcount() + 1 block_time = node.getblockheader(tip)["mediantime"] + 1 diff --git a/test/functional/wallet_groups.py b/test/functional/wallet_groups.py index f32acb8e15..d9d135a986 100755 --- a/test/functional/wallet_groups.py +++ b/test/functional/wallet_groups.py @@ -6,7 +6,9 @@ from test_framework.blocktools import COINBASE_MATURITY from test_framework.test_framework import BitcoinTestFramework -from test_framework.messages import CTransaction, FromHex, ToHex +from test_framework.messages import ( + tx_from_hex, +) from test_framework.util import ( assert_approx, assert_equal, @@ -154,10 +156,10 @@ class WalletGroupTest(BitcoinTestFramework): self.log.info("Fill a wallet with 10,000 outputs corresponding to the same scriptPubKey") for _ in range(5): raw_tx = self.nodes[0].createrawtransaction([{"txid":"0"*64, "vout":0}], [{addr2[0]: 0.05}]) - tx = FromHex(CTransaction(), raw_tx) + tx = tx_from_hex(raw_tx) tx.vin = [] tx.vout = [tx.vout[0]] * 2000 - funded_tx = self.nodes[0].fundrawtransaction(ToHex(tx)) + funded_tx = self.nodes[0].fundrawtransaction(tx.serialize().hex()) signed_tx = self.nodes[0].signrawtransactionwithwallet(funded_tx['hex']) self.nodes[0].sendrawtransaction(signed_tx['hex']) self.nodes[0].generate(1) diff --git a/test/functional/wallet_importmulti.py b/test/functional/wallet_importmulti.py index 0a00c5eed9..baeac655df 100755 --- a/test/functional/wallet_importmulti.py +++ b/test/functional/wallet_importmulti.py @@ -746,6 +746,27 @@ class ImportMultiTest(BitcoinTestFramework): assert 'hdmasterfingerprint' not in pub_import_info assert 'hdkeypath' not in pub_import_info + # Bech32m addresses and descriptors cannot be imported + self.log.info("Bech32m addresses and descriptors cannot be imported") + self.test_importmulti( + { + "scriptPubKey": {"address": "bcrt1p0xlxvlhemja6c4dqv22uapctqupfhlxm9h8z3k2e72q4k9hcz7vqc8gma6"}, + "timestamp": "now", + }, + success=False, + error_code=-5, + error_message="Bech32m addresses cannot be imported into legacy wallets", + ) + self.test_importmulti( + { + "desc": descsum_create("tr({})".format(pub)), + "timestamp": "now", + }, + success=False, + error_code=-5, + error_message="Bech32m descriptors cannot be imported into legacy wallets", + ) + # Import some public keys to the keypool of a no privkey wallet self.log.info("Adding pubkey to keypool of disableprivkey wallet") self.nodes[1].createwallet(wallet_name="noprivkeys", disable_private_keys=True) diff --git a/test/functional/wallet_keypool.py b/test/functional/wallet_keypool.py index 3fe6adeebc..28bfc9116f 100755 --- a/test/functional/wallet_keypool.py +++ b/test/functional/wallet_keypool.py @@ -161,7 +161,7 @@ class KeyPoolTest(BitcoinTestFramework): # Using a fee rate (10 sat / byte) well above the minimum relay rate # creating a 5,000 sat transaction with change should not be possible - assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it. Please call keypoolrefill first.", w2.walletcreatefundedpsbt, inputs=[], outputs=[{addr.pop(): 0.00005000}], options={"subtractFeeFromOutputs": [0], "feeRate": 0.00010}) + assert_raises_rpc_error(-4, "Transaction needs a change address, but we can't generate it.", w2.walletcreatefundedpsbt, inputs=[], outputs=[{addr.pop(): 0.00005000}], options={"subtractFeeFromOutputs": [0], "feeRate": 0.00010}) # creating a 10,000 sat transaction without change, with a manual input, should still be possible res = w2.walletcreatefundedpsbt(inputs=w2.listunspent(), outputs=[{destination: 0.00010000}], options={"subtractFeeFromOutputs": [0], "feeRate": 0.00010}) diff --git a/test/functional/wallet_labels.py b/test/functional/wallet_labels.py index 2d792bac52..a571454acf 100755 --- a/test/functional/wallet_labels.py +++ b/test/functional/wallet_labels.py @@ -135,31 +135,33 @@ class WalletLabelsTest(BitcoinTestFramework): # in the label. This is a no-op. change_label(node, labels[2].addresses[0], labels[2], labels[2]) - self.log.info('Check watchonly labels') - node.createwallet(wallet_name='watch_only', disable_private_keys=True) - wallet_watch_only = node.get_wallet_rpc('watch_only') - BECH32_VALID = { - '✔️_VER15_PROG40': 'bcrt10qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqxkg7fn', - '✔️_VER16_PROG03': 'bcrt1sqqqqq8uhdgr', - '✔️_VER16_PROB02': 'bcrt1sqqqq4wstyw', - } - BECH32_INVALID = { - '❌_VER15_PROG41': 'bcrt1sqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqajlxj8', - '❌_VER16_PROB01': 'bcrt1sqq5r4036', - } - for l in BECH32_VALID: - ad = BECH32_VALID[l] - wallet_watch_only.importaddress(label=l, rescan=False, address=ad) - node.generatetoaddress(1, ad) - assert_equal(wallet_watch_only.getaddressesbylabel(label=l), {ad: {'purpose': 'receive'}}) - assert_equal(wallet_watch_only.getreceivedbylabel(label=l), 0) - for l in BECH32_INVALID: - ad = BECH32_INVALID[l] - assert_raises_rpc_error( - -5, - "Address is not valid" if self.options.descriptors else "Invalid Bitcoin address or script", - lambda: wallet_watch_only.importaddress(label=l, rescan=False, address=ad), - ) + if self.options.descriptors: + # This is a descriptor wallet test because of segwit v1+ addresses + self.log.info('Check watchonly labels') + node.createwallet(wallet_name='watch_only', disable_private_keys=True) + wallet_watch_only = node.get_wallet_rpc('watch_only') + BECH32_VALID = { + '✔️_VER15_PROG40': 'bcrt10qqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqxkg7fn', + '✔️_VER16_PROG03': 'bcrt1sqqqqq8uhdgr', + '✔️_VER16_PROB02': 'bcrt1sqqqq4wstyw', + } + BECH32_INVALID = { + '❌_VER15_PROG41': 'bcrt1sqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqqajlxj8', + '❌_VER16_PROB01': 'bcrt1sqq5r4036', + } + for l in BECH32_VALID: + ad = BECH32_VALID[l] + wallet_watch_only.importaddress(label=l, rescan=False, address=ad) + node.generatetoaddress(1, ad) + assert_equal(wallet_watch_only.getaddressesbylabel(label=l), {ad: {'purpose': 'receive'}}) + assert_equal(wallet_watch_only.getreceivedbylabel(label=l), 0) + for l in BECH32_INVALID: + ad = BECH32_INVALID[l] + assert_raises_rpc_error( + -5, + "Address is not valid" if self.options.descriptors else "Invalid Bitcoin address or script", + lambda: wallet_watch_only.importaddress(label=l, rescan=False, address=ad), + ) class Label: diff --git a/test/functional/wallet_listtransactions.py b/test/functional/wallet_listtransactions.py index 71573964de..8b503f5971 100755 --- a/test/functional/wallet_listtransactions.py +++ b/test/functional/wallet_listtransactions.py @@ -4,22 +4,17 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the listtransactions API.""" from decimal import Decimal -from io import BytesIO -from test_framework.messages import COIN, CTransaction +from test_framework.messages import ( + COIN, + tx_from_hex, +) from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_array_result, assert_equal, - hex_str_to_bytes, ) -def tx_from_hex(hexstring): - tx = CTransaction() - f = BytesIO(hex_str_to_bytes(hexstring)) - tx.deserialize(f) - return tx - class ListTransactionsTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 2 diff --git a/test/functional/wallet_resendwallettransactions.py b/test/functional/wallet_resendwallettransactions.py index 78d88f8aa5..37dee219e7 100755 --- a/test/functional/wallet_resendwallettransactions.py +++ b/test/functional/wallet_resendwallettransactions.py @@ -5,8 +5,10 @@ """Test that the wallet resends transactions periodically.""" import time -from test_framework.blocktools import create_block, create_coinbase -from test_framework.messages import ToHex +from test_framework.blocktools import ( + create_block, + create_coinbase, +) from test_framework.p2p import P2PTxInvStore from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal @@ -48,7 +50,7 @@ class ResendWalletTransactionsTest(BitcoinTestFramework): block = create_block(int(node.getbestblockhash(), 16), create_coinbase(node.getblockcount() + 1), block_time) block.rehash() block.solve() - node.submitblock(ToHex(block)) + node.submitblock(block.serialize().hex()) # Set correct m_best_block_time, which is used in ResendWalletTransactions node.syncwithvalidationinterfacequeue() diff --git a/test/functional/wallet_taproot.py b/test/functional/wallet_taproot.py index 65ca7bdef7..9eb204bf37 100755 --- a/test/functional/wallet_taproot.py +++ b/test/functional/wallet_taproot.py @@ -6,6 +6,7 @@ import random +from decimal import Decimal from test_framework.test_framework import BitcoinTestFramework from test_framework.util import assert_equal from test_framework.descriptors import descsum_create @@ -172,9 +173,9 @@ class WalletTaprootTest(BitcoinTestFramework): """Test generation and spending of P2TR address outputs.""" def set_test_params(self): - self.num_nodes = 2 + self.num_nodes = 3 self.setup_clean_chain = True - self.extra_args = [['-keypool=100'], ['-keypool=100']] + self.extra_args = [['-keypool=100'], ['-keypool=100'], ["-vbparams=taproot:1:1"]] self.supports_cli = False def skip_test_if_missing_module(self): @@ -225,19 +226,124 @@ class WalletTaprootTest(BitcoinTestFramework): result = self.addr_gen.importdescriptors([{"desc": desc_pub, "active": True, "timestamp": "now"}]) assert(result[0]['success']) for i in range(4): - addr_g = self.addr_gen.getnewaddress(address_type='bech32') + addr_g = self.addr_gen.getnewaddress(address_type='bech32m') if treefn is not None: addr_r = self.make_addr(treefn, keys, i) assert_equal(addr_g, addr_r) + desc_a = self.addr_gen.getaddressinfo(addr_g)['desc'] + if desc.startswith("tr("): + assert desc_a.startswith("tr(") + rederive = self.nodes[1].deriveaddresses(desc_a) + assert_equal(len(rederive), 1) + assert_equal(rederive[0], addr_g) + + # tr descriptors cannot be imported when Taproot is not active + result = self.privs_tr_enabled.importdescriptors([{"desc": desc, "timestamp": "now"}]) + assert(result[0]["success"]) + result = self.pubs_tr_enabled.importdescriptors([{"desc": desc_pub, "timestamp": "now"}]) + assert(result[0]["success"]) + if desc.startswith("tr"): + result = self.privs_tr_disabled.importdescriptors([{"desc": desc, "timestamp": "now"}]) + assert(not result[0]["success"]) + assert_equal(result[0]["error"]["code"], -4) + assert_equal(result[0]["error"]["message"], "Cannot import tr() descriptor when Taproot is not active") + result = self.pubs_tr_disabled.importdescriptors([{"desc": desc_pub, "timestamp": "now"}]) + assert(not result[0]["success"]) + assert_equal(result[0]["error"]["code"], -4) + assert_equal(result[0]["error"]["message"], "Cannot import tr() descriptor when Taproot is not active") + + def do_test_sendtoaddress(self, comment, pattern, privmap, treefn, keys_pay, keys_change): + self.log.info("Testing %s through sendtoaddress" % comment) + desc_pay = self.make_desc(pattern, privmap, keys_pay) + desc_change = self.make_desc(pattern, privmap, keys_change) + desc_pay_pub = self.make_desc(pattern, privmap, keys_pay, True) + desc_change_pub = self.make_desc(pattern, privmap, keys_change, True) + assert_equal(self.nodes[0].getdescriptorinfo(desc_pay)['descriptor'], desc_pay_pub) + assert_equal(self.nodes[0].getdescriptorinfo(desc_change)['descriptor'], desc_change_pub) + result = self.rpc_online.importdescriptors([{"desc": desc_pay, "active": True, "timestamp": "now"}]) + assert(result[0]['success']) + result = self.rpc_online.importdescriptors([{"desc": desc_change, "active": True, "timestamp": "now", "internal": True}]) + assert(result[0]['success']) + for i in range(4): + addr_g = self.rpc_online.getnewaddress(address_type='bech32m') + if treefn is not None: + addr_r = self.make_addr(treefn, keys_pay, i) + assert_equal(addr_g, addr_r) + boring_balance = int(self.boring.getbalance() * 100000000) + to_amnt = random.randrange(1000000, boring_balance) + self.boring.sendtoaddress(address=addr_g, amount=Decimal(to_amnt) / 100000000, subtractfeefromamount=True) + self.nodes[0].generatetoaddress(1, self.boring.getnewaddress()) + test_balance = int(self.rpc_online.getbalance() * 100000000) + ret_amnt = random.randrange(100000, test_balance) + res = self.rpc_online.sendtoaddress(address=self.boring.getnewaddress(), amount=Decimal(ret_amnt) / 100000000, subtractfeefromamount=True) + self.nodes[0].generatetoaddress(1, self.boring.getnewaddress()) + assert(self.rpc_online.gettransaction(res)["confirmations"] > 0) + + def do_test_psbt(self, comment, pattern, privmap, treefn, keys_pay, keys_change): + self.log.info("Testing %s through PSBT" % comment) + desc_pay = self.make_desc(pattern, privmap, keys_pay, False) + desc_change = self.make_desc(pattern, privmap, keys_change, False) + desc_pay_pub = self.make_desc(pattern, privmap, keys_pay, True) + desc_change_pub = self.make_desc(pattern, privmap, keys_change, True) + assert_equal(self.nodes[0].getdescriptorinfo(desc_pay)['descriptor'], desc_pay_pub) + assert_equal(self.nodes[0].getdescriptorinfo(desc_change)['descriptor'], desc_change_pub) + result = self.psbt_online.importdescriptors([{"desc": desc_pay_pub, "active": True, "timestamp": "now"}]) + assert(result[0]['success']) + result = self.psbt_online.importdescriptors([{"desc": desc_change_pub, "active": True, "timestamp": "now", "internal": True}]) + assert(result[0]['success']) + result = self.psbt_offline.importdescriptors([{"desc": desc_pay, "active": True, "timestamp": "now"}]) + assert(result[0]['success']) + result = self.psbt_offline.importdescriptors([{"desc": desc_change, "active": True, "timestamp": "now", "internal": True}]) + assert(result[0]['success']) + for i in range(4): + addr_g = self.psbt_online.getnewaddress(address_type='bech32m') + if treefn is not None: + addr_r = self.make_addr(treefn, keys_pay, i) + assert_equal(addr_g, addr_r) + boring_balance = int(self.boring.getbalance() * 100000000) + to_amnt = random.randrange(1000000, boring_balance) + self.boring.sendtoaddress(address=addr_g, amount=Decimal(to_amnt) / 100000000, subtractfeefromamount=True) + self.nodes[0].generatetoaddress(1, self.boring.getnewaddress()) + test_balance = int(self.psbt_online.getbalance() * 100000000) + ret_amnt = random.randrange(100000, test_balance) + psbt = self.psbt_online.walletcreatefundedpsbt([], [{self.boring.getnewaddress(): Decimal(ret_amnt) / 100000000}], None, {"subtractFeeFromOutputs":[0]})['psbt'] + res = self.psbt_offline.walletprocesspsbt(psbt) + assert(res['complete']) + rawtx = self.nodes[0].finalizepsbt(res['psbt'])['hex'] + txid = self.nodes[0].sendrawtransaction(rawtx) + self.nodes[0].generatetoaddress(1, self.boring.getnewaddress()) + assert(self.psbt_online.gettransaction(txid)['confirmations'] > 0) def do_test(self, comment, pattern, privmap, treefn, nkeys): - keys = self.rand_keys(nkeys) - self.do_test_addr(comment, pattern, privmap, treefn, keys) + keys = self.rand_keys(nkeys * 4) + self.do_test_addr(comment, pattern, privmap, treefn, keys[0:nkeys]) + self.do_test_sendtoaddress(comment, pattern, privmap, treefn, keys[0:nkeys], keys[nkeys:2*nkeys]) + self.do_test_psbt(comment, pattern, privmap, treefn, keys[2*nkeys:3*nkeys], keys[3*nkeys:4*nkeys]) def run_test(self): self.log.info("Creating wallets...") + self.nodes[0].createwallet(wallet_name="privs_tr_enabled", descriptors=True, blank=True) + self.privs_tr_enabled = self.nodes[0].get_wallet_rpc("privs_tr_enabled") + self.nodes[2].createwallet(wallet_name="privs_tr_disabled", descriptors=True, blank=True) + self.privs_tr_disabled=self.nodes[2].get_wallet_rpc("privs_tr_disabled") + self.nodes[0].createwallet(wallet_name="pubs_tr_enabled", descriptors=True, blank=True, disable_private_keys=True) + self.pubs_tr_enabled = self.nodes[0].get_wallet_rpc("pubs_tr_enabled") + self.nodes[2].createwallet(wallet_name="pubs_tr_disabled", descriptors=True, blank=True, disable_private_keys=True) + self.pubs_tr_disabled=self.nodes[2].get_wallet_rpc("pubs_tr_disabled") + self.nodes[0].createwallet(wallet_name="boring") self.nodes[0].createwallet(wallet_name="addr_gen", descriptors=True, disable_private_keys=True, blank=True) + self.nodes[0].createwallet(wallet_name="rpc_online", descriptors=True, blank=True) + self.nodes[0].createwallet(wallet_name="psbt_online", descriptors=True, disable_private_keys=True, blank=True) + self.nodes[1].createwallet(wallet_name="psbt_offline", descriptors=True, blank=True) + self.boring = self.nodes[0].get_wallet_rpc("boring") self.addr_gen = self.nodes[0].get_wallet_rpc("addr_gen") + self.rpc_online = self.nodes[0].get_wallet_rpc("rpc_online") + self.psbt_online = self.nodes[0].get_wallet_rpc("psbt_online") + self.psbt_offline = self.nodes[1].get_wallet_rpc("psbt_offline") + + self.log.info("Mining blocks...") + gen_addr = self.boring.getnewaddress() + self.nodes[0].generatetoaddress(101, gen_addr) self.do_test( "tr(XPRV)", @@ -254,6 +360,13 @@ class WalletTaprootTest(BitcoinTestFramework): 1 ) self.do_test( + "wpkh(XPRV)", + "wpkh($1/*)", + [True], + None, + 1 + ) + self.do_test( "tr(XPRV,{H,{H,XPUB}})", "tr($1/*,{pk($H),{pk($H),pk($2/*)}})", [True, False], @@ -261,12 +374,54 @@ class WalletTaprootTest(BitcoinTestFramework): 2 ) self.do_test( + "wsh(multi(1,XPRV,XPUB))", + "wsh(multi(1,$1/*,$2/*))", + [True, False], + None, + 2 + ) + self.do_test( + "tr(XPRV,{XPUB,XPUB})", + "tr($1/*,{pk($2/*),pk($2/*)})", + [True, False], + lambda k1, k2: (key(k1), [pk(k2), pk(k2)]), + 2 + ) + self.do_test( + "tr(XPRV,{{XPUB,H},{H,XPUB}})", + "tr($1/*,{{pk($2/*),pk($H)},{pk($H),pk($2/*)}})", + [True, False], + lambda k1, k2: (key(k1), [[pk(k2), pk(H_POINT)], [pk(H_POINT), pk(k2)]]), + 2 + ) + self.do_test( "tr(XPUB,{{H,{H,XPUB}},{H,{H,{H,XPRV}}}})", "tr($1/*,{{pk($H),{pk($H),pk($2/*)}},{pk($H),{pk($H),{pk($H),pk($3/*)}}}})", [False, False, True], lambda k1, k2, k3: (key(k1), [[pk(H_POINT), [pk(H_POINT), pk(k2)]], [pk(H_POINT), [pk(H_POINT), [pk(H_POINT), pk(k3)]]]]), 3 ) + self.do_test( + "tr(XPRV,{XPUB,{{XPUB,{H,H}},{{H,H},XPUB}}})", + "tr($1/*,{pk($2/*),{{pk($2/*),{pk($H),pk($H)}},{{pk($H),pk($H)},pk($2/*)}}})", + [True, False], + lambda k1, k2: (key(k1), [pk(k2), [[pk(k2), [pk(H_POINT), pk(H_POINT)]], [[pk(H_POINT), pk(H_POINT)], pk(k2)]]]), + 2 + ) + + self.log.info("Sending everything back...") + + txid = self.rpc_online.sendtoaddress(address=self.boring.getnewaddress(), amount=self.rpc_online.getbalance(), subtractfeefromamount=True) + self.nodes[0].generatetoaddress(1, self.boring.getnewaddress()) + assert(self.rpc_online.gettransaction(txid)["confirmations"] > 0) + + psbt = self.psbt_online.walletcreatefundedpsbt([], [{self.boring.getnewaddress(): self.psbt_online.getbalance()}], None, {"subtractFeeFromOutputs": [0]})['psbt'] + res = self.psbt_offline.walletprocesspsbt(psbt) + assert(res['complete']) + rawtx = self.nodes[0].finalizepsbt(res['psbt'])['hex'] + txid = self.nodes[0].sendrawtransaction(rawtx) + self.nodes[0].generatetoaddress(1, self.boring.getnewaddress()) + assert(self.psbt_online.gettransaction(txid)['confirmations'] > 0) if __name__ == '__main__': WalletTaprootTest().main() diff --git a/test/functional/wallet_txn_clone.py b/test/functional/wallet_txn_clone.py index 84ff9ad772..76b39201e3 100755 --- a/test/functional/wallet_txn_clone.py +++ b/test/functional/wallet_txn_clone.py @@ -4,12 +4,14 @@ # file COPYING or http://www.opensource.org/licenses/mit-license.php. """Test the wallet accounts properly when there are cloned transactions with malleated scriptsigs.""" -import io from test_framework.test_framework import BitcoinTestFramework from test_framework.util import ( assert_equal, ) -from test_framework.messages import CTransaction, COIN +from test_framework.messages import ( + COIN, + tx_from_hex, +) class TxnMallTest(BitcoinTestFramework): @@ -71,8 +73,7 @@ class TxnMallTest(BitcoinTestFramework): clone_raw = self.nodes[0].createrawtransaction(clone_inputs, clone_outputs, clone_locktime) # createrawtransaction randomizes the order of its outputs, so swap them if necessary. - clone_tx = CTransaction() - clone_tx.deserialize(io.BytesIO(bytes.fromhex(clone_raw))) + clone_tx = tx_from_hex(clone_raw) if (rawtx1["vout"][0]["value"] == 40 and clone_tx.vout[0].nValue != 40*COIN or rawtx1["vout"][0]["value"] != 40 and clone_tx.vout[0].nValue == 40*COIN): (clone_tx.vout[0], clone_tx.vout[1]) = (clone_tx.vout[1], clone_tx.vout[0]) diff --git a/test/lint/lint-spelling.ignore-words.txt b/test/lint/lint-spelling.ignore-words.txt index 267c06ea20..9906b15e9a 100644 --- a/test/lint/lint-spelling.ignore-words.txt +++ b/test/lint/lint-spelling.ignore-words.txt @@ -7,6 +7,7 @@ hights hist inout invokable +keypair mor nin ser diff --git a/test/util/data/bitcoin-util-test.json b/test/util/data/bitcoin-util-test.json index 0a9846b4be..a648c0287a 100644 --- a/test/util/data/bitcoin-util-test.json +++ b/test/util/data/bitcoin-util-test.json @@ -1,4 +1,34 @@ [ + { "exec": "./bitcoin-util", + "args": ["foo"], + "return_code": 1, + "error_txt": "Error parsing command line arguments: Invalid command 'foo'", + "description": "" + }, + { "exec": "./bitcoin-util", + "args": ["help"], + "return_code": 1, + "error_txt": "Error parsing command line arguments: Invalid command 'help'", + "description": "`help` raises an error. Use `-help`" + }, + { "exec": "./bitcoin-util", + "args": ["grind"], + "return_code": 1, + "error_txt": "Must specify block header to grind", + "description": "" + }, + { "exec": "./bitcoin-util", + "args": ["grind", "1", "2"], + "return_code": 1, + "error_txt": "Must specify block header to grind", + "description": "" + }, + { "exec": "./bitcoin-util", + "args": ["grind", "aa"], + "return_code": 1, + "error_txt": "Could not decode block header", + "description": "" + }, { "exec": "./bitcoin-tx", "args": ["-create", "nversion=1"], "output_cmp": "blanktxv1.hex", |