diff options
62 files changed, 899 insertions, 357 deletions
diff --git a/ci/test/00_setup_env_win64.sh b/ci/test/00_setup_env_win64.sh index 44b6eb7ae3..6619852423 100755 --- a/ci/test/00_setup_env_win64.sh +++ b/ci/test/00_setup_env_win64.sh @@ -13,4 +13,4 @@ export DPKG_ADD_ARCH="i386" export PACKAGES="python3 nsis g++-mingw-w64-x86-64 wine-binfmt wine64 wine32 file" export RUN_FUNCTIONAL_TESTS=false export GOAL="deploy" -export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests --disable-external-signer" +export BITCOIN_CONFIG="--enable-reduce-exports --disable-gui-tests" diff --git a/configure.ac b/configure.ac index 5a6b54a1ae..bef3973996 100644 --- a/configure.ac +++ b/configure.ac @@ -321,7 +321,7 @@ AC_ARG_ENABLE([werror], AC_ARG_ENABLE([external-signer], [AS_HELP_STRING([--enable-external-signer],[compile external signer support (default is yes, requires Boost::Process)])], [use_external_signer=$enableval], - [use_external_signer=yes]) + [use_external_signer=auto]) AC_ARG_ENABLE([lto], [AS_HELP_STRING([--enable-lto],[build using LTO (default is no)])], @@ -1415,7 +1415,21 @@ if test "$use_boost" = "yes"; then fi if test "$use_external_signer" != "no"; then - AC_DEFINE([ENABLE_EXTERNAL_SIGNER], [], [Define if external signer support is enabled]) + case $host in + *mingw*) + dnl Boost Process uses Boost Filesystem when targeting Windows. Also, + dnl since Boost 1.71.0, Process does not work with mingw-w64 without + dnl workarounds. See 67669ab425b52a2b6be3d2f3b3b7e3939b676a2c. + if test "$use_external_signer" = "yes"; then + AC_MSG_ERROR([External signing is not supported on Windows]) + fi + use_external_signer="no"; + ;; + *) + use_external_signer="yes" + AC_DEFINE([ENABLE_EXTERNAL_SIGNER], [1], [Define if external signer support is enabled]) + ;; + esac fi AM_CONDITIONAL([ENABLE_EXTERNAL_SIGNER], [test "$use_external_signer" = "yes"]) diff --git a/contrib/guix/guix-build b/contrib/guix/guix-build index 98381f3e24..3e2542a418 100755 --- a/contrib/guix/guix-build +++ b/contrib/guix/guix-build @@ -239,7 +239,7 @@ SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(git -c log.showSignature=false log --f time-machine() { # shellcheck disable=SC2086 guix time-machine --url=https://git.savannah.gnu.org/git/guix.git \ - --commit=6ba510d76d6847065be725e958718002f3b13c7a \ + --commit=1ef7a03a148cf5f83ab1820444f6bd50d8e732d1 \ --cores="$JOBS" \ --keep-failed \ --fallback \ diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm index 3528030bec..c1950ad08e 100644 --- a/contrib/guix/manifest.scm +++ b/contrib/guix/manifest.scm @@ -397,6 +397,11 @@ thus should be able to compile on most platforms where these exist.") (string-append indent "@unittest.skip(\"Disabled by Guix\")\n" line))) + (substitute* "tests/test_validate.py" + (("^(.*)def test_revocation_mode_soft" line indent) + (string-append indent + "@unittest.skip(\"Disabled by Guix\")\n" + line))) #t)) (replace 'check (lambda _ diff --git a/depends/Makefile b/depends/Makefile index d2a3c35f1e..73e2af5501 100644 --- a/depends/Makefile +++ b/depends/Makefile @@ -137,7 +137,7 @@ include packages/packages.mk build_id:=$(shell env CC='$(build_CC)' CXX='$(build_CXX)' AR='$(build_AR)' RANLIB='$(build_RANLIB)' STRIP='$(build_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(BUILD_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') $(host_arch)_$(host_os)_id:=$(shell env CC='$(host_CC)' CXX='$(host_CXX)' AR='$(host_AR)' RANLIB='$(host_RANLIB)' STRIP='$(host_STRIP)' SHA256SUM='$(build_SHA256SUM)' DEBUG='$(DEBUG)' ./gen_id '$(HOST_ID_SALT)' 'GUIX_ENVIRONMENT=$(realpath $(GUIX_ENVIRONMENT))') -qrencode_packages_$(NO_QR) = $(qrencode_packages) +qrencode_packages_$(NO_QR) = $(qrencode_$(host_os)_packages) qt_packages_$(NO_QT) = $(qt_packages) $(qt_$(host_os)_packages) $(qt_$(host_arch)_$(host_os)_packages) $(qrencode_packages_) diff --git a/depends/packages/packages.mk b/depends/packages/packages.mk index 77866c8e7a..4c66b3bdb9 100644 --- a/depends/packages/packages.mk +++ b/depends/packages/packages.mk @@ -1,10 +1,12 @@ packages:=boost libevent -qrencode_packages = qrencode +qrencode_linux_packages = qrencode +qrencode_android_packages = qrencode +qrencode_darwin_packages = qrencode +qrencode_mingw32_packages = qrencode qt_linux_packages:=qt expat libxcb xcb_proto libXau xproto freetype fontconfig libxkbcommon libxcb_util libxcb_util_render libxcb_util_keysyms libxcb_util_image libxcb_util_wm qt_android_packages=qt - qt_darwin_packages=qt qt_mingw32_packages=qt diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk index 9798248c61..94e92431a2 100644 --- a/depends/packages/zeromq.mk +++ b/depends/packages/zeromq.mk @@ -1,9 +1,9 @@ package=zeromq -$(package)_version=4.3.1 +$(package)_version=4.3.4 $(package)_download_path=https://github.com/zeromq/libzmq/releases/download/v$($(package)_version)/ $(package)_file_name=$(package)-$($(package)_version).tar.gz -$(package)_sha256_hash=bcbabe1e2c7d0eec4ed612e10b94b112dd5f06fcefa994a0c79a45d835cd21eb -$(package)_patches=remove_libstd_link.patch +$(package)_sha256_hash=c593001a89f5a85dd2ddf564805deb860e02471171b3f204944857336295c3e5 +$(package)_patches=remove_libstd_link.patch netbsd_kevent_void.patch define $(package)_set_vars $(package)_config_opts=--without-docs --disable-shared --disable-curve --disable-curve-keygen --disable-perf @@ -17,10 +17,12 @@ endef define $(package)_preprocess_cmds patch -p1 < $($(package)_patch_dir)/remove_libstd_link.patch && \ + patch -p1 < $($(package)_patch_dir)/netbsd_kevent_void.patch && \ cp -f $(BASEDIR)/config.guess $(BASEDIR)/config.sub config endef define $(package)_config_cmds + ./autogen.sh && \ $($(package)_autoconf) endef diff --git a/depends/patches/zeromq/netbsd_kevent_void.patch b/depends/patches/zeromq/netbsd_kevent_void.patch new file mode 100644 index 0000000000..4e36a363fb --- /dev/null +++ b/depends/patches/zeromq/netbsd_kevent_void.patch @@ -0,0 +1,57 @@ +commit 129137d5182967dbfcfec66bad843df2a992a78f +Author: fanquake <fanquake@gmail.com> +Date: Mon Jan 3 20:13:33 2022 +0800 + + problem: kevent udata is now void* on NetBSD Current (10) + + solution: check for the intptr_t variant in configure. + +diff --git a/configure.ac b/configure.ac +index 1a571291..402f8b86 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -308,6 +308,27 @@ case "${host_os}" in + if test "x$libzmq_netbsd_has_atomic" = "xno"; then + AC_DEFINE(ZMQ_FORCE_MUTEXES, 1, [Force to use mutexes]) + fi ++ # NetBSD Current (to become 10) has changed the type of udata in it's ++ # kevent struct from intptr_t to void * to align with darwin and other ++ # BSDs, see upstream commit: ++ # https://github.com/NetBSD/src/commit/e5ead823eb916b56589d2c6c560dbcfe4a2d0afc ++ AC_MSG_CHECKING([whether kevent udata type is intptr_t]) ++ AC_LANG_PUSH([C++]) ++ AC_LINK_IFELSE([AC_LANG_PROGRAM( ++ [[#include <sys/types.h> ++ #include <sys/event.h> ++ #include <sys/time.h>]], ++ [[struct kevent ev; ++ intptr_t udata; ++ EV_SET(&ev, 0, 0, EV_ADD, 0, 0, udata); ++ return 0;]])], ++ [libzmq_netbsd_kevent_udata_intptr_t=yes], ++ [libzmq_netbsd_kevent_udata_intptr_t=no]) ++ AC_LANG_POP([C++]) ++ AC_MSG_RESULT([$libzmq_netbsd_kevent_udata_intptr_t]) ++ if test "x$libzmq_netbsd_kevent_udata_intptr_t" = "xyes"; then ++ AC_DEFINE(ZMQ_NETBSD_KEVENT_UDATA_INTPTR_T, 1, [kevent udata type is intptr_t]) ++ fi + ;; + *openbsd*|*bitrig*) + # Define on OpenBSD to enable all library features +diff --git a/src/kqueue.cpp b/src/kqueue.cpp +index 53d82ac4..a6a7a7f2 100644 +--- a/src/kqueue.cpp ++++ b/src/kqueue.cpp +@@ -46,9 +46,9 @@ + #include "i_poll_events.hpp" + #include "likely.hpp" + +-// NetBSD defines (struct kevent).udata as intptr_t, everyone else +-// as void *. +-#if defined ZMQ_HAVE_NETBSD ++// NetBSD up to version 9 defines (struct kevent).udata as intptr_t, ++// everyone else as void *. ++#if defined ZMQ_HAVE_NETBSD && defined(ZMQ_NETBSD_KEVENT_UDATA_INTPTR_T) + #define kevent_udata_t intptr_t + #else + #define kevent_udata_t void * diff --git a/doc/dependencies.md b/doc/dependencies.md index 490ffd3c00..63315cdcc2 100644 --- a/doc/dependencies.md +++ b/doc/dependencies.md @@ -20,7 +20,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct | PCRE | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) | | Python (tests) | | [3.6](https://www.python.org/downloads) | | | | | qrencode | [3.4.4](https://fukuchi.org/works/qrencode) | | No | | | -| Qt | [5.12.11](https://download.qt.io/official_releases/qt/) | [5.9.5](https://github.com/bitcoin/bitcoin/issues/20104) | No | | | +| Qt | [5.15.2](https://download.qt.io/official_releases/qt/) | [5.9.5](https://github.com/bitcoin/bitcoin/issues/20104) | No | | | | SQLite | [3.32.1](https://sqlite.org/download.html) | [3.7.17](https://github.com/bitcoin/bitcoin/pull/19077) | | | | | XCB | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Linux only) | | systemtap ([tracing](tracing.md))| [4.5](https://sourceware.org/systemtap/ftp/releases/) | | | | | diff --git a/doc/fuzzing.md b/doc/fuzzing.md index 73d04837f1..9abfbc9213 100644 --- a/doc/fuzzing.md +++ b/doc/fuzzing.md @@ -71,6 +71,15 @@ block^@M-^?M-^?M-^?M-^?M-^?nM-^?M-^? In this case the fuzzer managed to create a `block` message which when passed to `ProcessMessage(...)` increased coverage. +It is possible to specify `bitcoind` arguments to the `fuzz` executable. +Depending on the test, they may be ignored or consumed and alter the behavior +of the test. Just make sure to use double-dash to distinguish them from the +fuzzer's own arguments: + +```sh +$ FUZZ=address_deserialize_v2 src/test/fuzz/fuzz -runs=1 fuzz_seed_corpus/address_deserialize_v2 --checkaddrman=5 --printtoconsole=1 +``` + ## Fuzzing corpora The project's collection of seed corpora is found in the [`bitcoin-core/qa-assets`](https://github.com/bitcoin-core/qa-assets) repo. diff --git a/doc/release-notes-14707.md b/doc/release-notes-14707.md deleted file mode 100644 index b53204f788..0000000000 --- a/doc/release-notes-14707.md +++ /dev/null @@ -1,19 +0,0 @@ -Wallet `receivedby` RPCs now include coinbase transactions -------------- - -Previously, the following wallet RPCs excluded coinbase transactions: - -`getreceivedbyaddress` - -`getreceivedbylabel` - -`listreceivedbyaddress` - -`listreceivedbylabel` - -This release changes this behaviour and returns results accounting for received coins from coinbase outputs. - -A new option, `include_immature_coinbase` (default=`false`), determines whether to account for immature coinbase transactions. -Immature coinbase transactions are coinbase transactions that have 100 or fewer confirmations, and are not spendable. - -The previous behaviour can be restored using the configuration `-deprecatedrpc=exclude_coinbase`, but may be removed in a future release. diff --git a/doc/release-notes-23113.md b/doc/release-notes-23113.md deleted file mode 100644 index b0904c9d7b..0000000000 --- a/doc/release-notes-23113.md +++ /dev/null @@ -1,9 +0,0 @@ -Notable changes -=============== - -Updated RPCs ------------- - -- Both `createmultisig` and `addmultisigaddress` now include a `warnings` -field, which will show a warning if a non-legacy address type is requested -when using uncompressed public keys. diff --git a/doc/release-notes-gui-459.md b/doc/release-notes-gui-459.md deleted file mode 100644 index b590ac5d45..0000000000 --- a/doc/release-notes-gui-459.md +++ /dev/null @@ -1,4 +0,0 @@ -GUI changes ------------ - -- The Bech32 checkbox has been replaced with a dropdown for all address types, including the new Bech32m (BIP-350) standard for Taproot enabled wallets. diff --git a/doc/release-notes.md b/doc/release-notes.md index 17b0ef545d..7a47d76bba 100644 --- a/doc/release-notes.md +++ b/doc/release-notes.md @@ -77,13 +77,6 @@ Otherwise, please use the `rescanblockchain` RPC to trigger a rescan. (#23123) Updated RPCs ------------ -- `upgradewallet` will now automatically flush the keypool if upgrading - from a non-HD wallet to an HD wallet, to immediately start using the - newly-generated HD keys. (#23093) - -- a new RPC `newkeypool` has been added, which will flush (entirely - clear and refill) the keypool. (#23093) - - The `validateaddress` RPC now returns an `error_locations` array for invalid addresses, with the indices of invalid character locations in the address (if known). For example, this will attempt to locate up to two Bech32 errors, and @@ -106,14 +99,6 @@ Updated RPCs - `value` - `scriptPubKey` -- `listunspent` now includes `ancestorcount`, `ancestorsize`, and - `ancestorfees` for each transaction output that is still in the mempool. - (#12677) - -- `lockunspent` now optionally takes a third parameter, `persistent`, which - causes the lock to be written persistently to the wallet database. This - allows UTXOs to remain locked even after node restarts or crashes. (#23065) - - The top-level fee fields `fee`, `modifiedfee`, `ancestorfees` and `descendantfees` returned by RPCs `getmempoolentry`,`getrawmempool(verbose=true)`, `getmempoolancestors(verbose=true)` and `getmempooldescendants(verbose=true)` @@ -123,6 +108,10 @@ Updated RPCs fields `ancestorfees` and `descendantfees` are denominated in sats, whereas all fields in the `fees` object are denominated in BTC. (#22689) +- Both `createmultisig` and `addmultisigaddress` now include a `warnings` + field, which will show a warning if a non-legacy address type is requested + when using uncompressed public keys. (#23113) + New RPCs -------- @@ -167,12 +156,42 @@ Tools and Utilities Wallet ------ +- `upgradewallet` will now automatically flush the keypool if upgrading + from a non-HD wallet to an HD wallet, to immediately start using the + newly-generated HD keys. (#23093) + +- a new RPC `newkeypool` has been added, which will flush (entirely + clear and refill) the keypool. (#23093) + +- `listunspent` now includes `ancestorcount`, `ancestorsize`, and + `ancestorfees` for each transaction output that is still in the mempool. + (#12677) + +- `lockunspent` now optionally takes a third parameter, `persistent`, which + causes the lock to be written persistently to the wallet database. This + allows UTXOs to remain locked even after node restarts or crashes. (#23065) + +- `receivedby` RPCs now include coinbase transactions. Previously, the + following wallet RPCs excluded coinbase transactions: `getreceivedbyaddress`, + `getreceivedbylabel`, `listreceivedbyaddress`, `listreceivedbylabel`. This + release changes this behaviour and returns results accounting for received + coins from coinbase outputs. The previous behaviour can be restored using the + configuration `-deprecatedrpc=exclude_coinbase`, but may be removed in a + future release. (#14707) + +- A new option in the same `receivedby` RPCs, `include_immature_coinbase` + (default=`false`), determines whether to account for immature coinbase + transactions. Immature coinbase transactions are coinbase transactions that + have 100 or fewer confirmations, and are not spendable. (#14707) + GUI changes ----------- - UTXOs which are locked via the GUI are now stored persistently in the wallet database, so are not lost on node shutdown or crash. (#23065) +- The Bech32 checkbox has been replaced with a dropdown for all address types, including the new Bech32m (BIP-350) standard for Taproot enabled wallets. + Low-level changes ================= diff --git a/doc/tor.md b/doc/tor.md index 8dc82ca91e..d23d8a1810 100644 --- a/doc/tor.md +++ b/doc/tor.md @@ -40,9 +40,11 @@ outgoing connections, but more is possible. -onion=ip:port Set the proxy server to use for Tor onion services. You do not need to set this if it's the same as -proxy. You can use -onion=0 to explicitly disable access to onion services. + ------------------------------------------------------------------ Note: Only the -proxy option sets the proxy for DNS requests; with -onion they will not route over Tor, so use -proxy if you have privacy concerns. + ------------------------------------------------------------------ -listen When using -proxy, listening is disabled by default. If you want to manually configure an onion service (see section 3), you'll diff --git a/src/arith_uint256.cpp b/src/arith_uint256.cpp index 0bebb0cf54..f7f62dfc68 100644 --- a/src/arith_uint256.cpp +++ b/src/arith_uint256.cpp @@ -96,7 +96,7 @@ base_uint<BITS>& base_uint<BITS>::operator/=(const base_uint& b) while (shift >= 0) { if (num >= div) { num -= div; - pn[shift / 32] |= (1 << (shift & 31)); // set a bit of the result. + pn[shift / 32] |= (1U << (shift & 31)); // set a bit of the result. } div >>= 1; // shift back. shift--; @@ -236,7 +236,7 @@ uint32_t arith_uint256::GetCompact(bool fNegative) const nCompact >>= 8; nSize++; } - assert((nCompact & ~0x007fffff) == 0); + assert((nCompact & ~0x007fffffU) == 0); assert(nSize < 256); nCompact |= nSize << 24; nCompact |= (fNegative && (nCompact & 0x007fffff) ? 0x00800000 : 0); diff --git a/src/bench/addrman.cpp b/src/bench/addrman.cpp index 2d94e835f0..3ca58b923e 100644 --- a/src/bench/addrman.cpp +++ b/src/bench/addrman.cpp @@ -16,6 +16,9 @@ static constexpr size_t NUM_SOURCES = 64; static constexpr size_t NUM_ADDRESSES_PER_SOURCE = 256; +static const std::vector<bool> EMPTY_ASMAP; +static constexpr uint32_t ADDRMAN_CONSISTENCY_CHECK_RATIO{0}; + static std::vector<CAddress> g_sources; static std::vector<std::vector<CAddress>> g_addresses; @@ -74,14 +77,14 @@ static void AddrManAdd(benchmark::Bench& bench) CreateAddresses(); bench.run([&] { - AddrMan addrman{/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0}; + AddrMan addrman{EMPTY_ASMAP, /*deterministic=*/false, ADDRMAN_CONSISTENCY_CHECK_RATIO}; AddAddressesToAddrMan(addrman); }); } static void AddrManSelect(benchmark::Bench& bench) { - AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0); + AddrMan addrman{EMPTY_ASMAP, /*deterministic=*/false, ADDRMAN_CONSISTENCY_CHECK_RATIO}; FillAddrMan(addrman); @@ -93,7 +96,7 @@ static void AddrManSelect(benchmark::Bench& bench) static void AddrManGetAddr(benchmark::Bench& bench) { - AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0); + AddrMan addrman{EMPTY_ASMAP, /*deterministic=*/false, ADDRMAN_CONSISTENCY_CHECK_RATIO}; FillAddrMan(addrman); @@ -122,7 +125,7 @@ static void AddrManAddThenGood(benchmark::Bench& bench) // // This has some overhead (exactly the result of AddrManAdd benchmark), but that overhead is constant so improvements in // AddrMan::Good() will still be noticeable. - AddrMan addrman(/* asmap */ std::vector<bool>(), /* deterministic */ false, /* consistency_check_ratio */ 0); + AddrMan addrman{EMPTY_ASMAP, /*deterministic=*/false, ADDRMAN_CONSISTENCY_CHECK_RATIO}; AddAddressesToAddrMan(addrman); markSomeAsGood(addrman); diff --git a/src/bench/bench.cpp b/src/bench/bench.cpp index f696396e12..d7b4228566 100644 --- a/src/bench/bench.cpp +++ b/src/bench/bench.cpp @@ -19,6 +19,8 @@ using namespace std::chrono_literals; const std::function<void(const std::string&)> G_TEST_LOG_FUN{}; +const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS{}; + namespace { void GenerateTemplateResults(const std::vector<ankerl::nanobench::Result>& benchmarkResults, const std::string& filename, const char* tpl) @@ -88,7 +88,7 @@ static inline auto quoted(const std::string& s) // Allow safe path append operations. static inline path operator+(path p1, path p2) { - p1 += std::move(p2); + p1 += static_cast<boost::filesystem::path&&>(p2); return p1; } diff --git a/src/net.cpp b/src/net.cpp index 89a4aee5d9..0260e14da7 100644 --- a/src/net.cpp +++ b/src/net.cpp @@ -112,9 +112,9 @@ static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256 // bool fDiscover = true; bool fListen = true; -RecursiveMutex cs_mapLocalHost; -std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost); -static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {}; +Mutex g_maplocalhost_mutex; +std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex); +static bool vfLimited[NET_MAX] GUARDED_BY(g_maplocalhost_mutex) = {}; std::string strSubVersion; void CConnman::AddAddrFetch(const std::string& strDest) @@ -137,7 +137,7 @@ bool GetLocal(CService& addr, const CNetAddr *paddrPeer) int nBestScore = -1; int nBestReachability = -1; { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); for (const auto& entry : mapLocalHost) { int nScore = entry.second.nScore; @@ -193,7 +193,7 @@ CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices) static int GetnScore(const CService& addr) { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); const auto it = mapLocalHost.find(addr); return (it != mapLocalHost.end()) ? it->second.nScore : 0; } @@ -264,7 +264,7 @@ bool AddLocal(const CService& addr_, int nScore) LogPrintf("AddLocal(%s,%i)\n", addr.ToString(), nScore); { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); const auto [it, is_newly_added] = mapLocalHost.emplace(addr, LocalServiceInfo()); LocalServiceInfo &info = it->second; if (is_newly_added || nScore >= info.nScore) { @@ -283,7 +283,7 @@ bool AddLocal(const CNetAddr &addr, int nScore) void RemoveLocal(const CService& addr) { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); LogPrintf("RemoveLocal(%s)\n", addr.ToString()); mapLocalHost.erase(addr); } @@ -292,13 +292,13 @@ void SetReachable(enum Network net, bool reachable) { if (net == NET_UNROUTABLE || net == NET_INTERNAL) return; - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); vfLimited[net] = !reachable; } bool IsReachable(enum Network net) { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); return !vfLimited[net]; } @@ -310,7 +310,7 @@ bool IsReachable(const CNetAddr &addr) /** vote for a local address */ bool SeenLocal(const CService& addr) { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); const auto it = mapLocalHost.find(addr); if (it == mapLocalHost.end()) return false; ++it->second.nScore; @@ -321,7 +321,7 @@ bool SeenLocal(const CService& addr) /** check whether a given address is potentially local */ bool IsLocal(const CService& addr) { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); return mapLocalHost.count(addr) > 0; } @@ -553,12 +553,14 @@ std::string ConnectionTypeAsString(ConnectionType conn_type) CService CNode::GetAddrLocal() const { - LOCK(cs_addrLocal); + AssertLockNotHeld(m_addr_local_mutex); + LOCK(m_addr_local_mutex); return addrLocal; } void CNode::SetAddrLocal(const CService& addrLocalIn) { - LOCK(cs_addrLocal); + AssertLockNotHeld(m_addr_local_mutex); + LOCK(m_addr_local_mutex); if (addrLocal.IsValid()) { error("Addr local already set for node: %i. Refusing to change from %s to %s", id, addrLocal.ToString(), addrLocalIn.ToString()); } else { @@ -595,7 +597,7 @@ void CNode::CopyStats(CNodeStats& stats) X(m_addr_name); X(nVersion); { - LOCK(cs_SubVer); + LOCK(m_subver_mutex); X(cleanSubVer); } stats.fInbound = IsInboundConn(); @@ -657,7 +659,7 @@ bool CNode::ReceiveMsgBytes(Span<const uint8_t> msg_bytes, bool& complete) // Store received bytes per message command // to prevent a memory DOS, only allow valid commands - auto i = mapRecvBytesPerMsgCmd.find(msg.m_command); + auto i = mapRecvBytesPerMsgCmd.find(msg.m_type); if (i == mapRecvBytesPerMsgCmd.end()) { i = mapRecvBytesPerMsgCmd.find(NET_MESSAGE_COMMAND_OTHER); } @@ -747,7 +749,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const std::chrono::microseconds CNetMessage msg(std::move(vRecv)); // store command string, time, and sizes - msg.m_command = hdr.GetCommand(); + msg.m_type = hdr.GetCommand(); msg.m_time = time; msg.m_message_size = hdr.nMessageSize; msg.m_raw_message_size = hdr.nMessageSize + CMessageHeader::HEADER_SIZE; @@ -760,7 +762,7 @@ CNetMessage V1TransportDeserializer::GetMessage(const std::chrono::microseconds // Check checksum and header command string if (memcmp(hash.begin(), hdr.pchChecksum, CMessageHeader::CHECKSUM_SIZE) != 0) { LogPrint(BCLog::NET, "Header error: Wrong checksum (%s, %u bytes), expected %s was %s, peer=%d\n", - SanitizeString(msg.m_command), msg.m_message_size, + SanitizeString(msg.m_type), msg.m_message_size, HexStr(Span{hash}.first(CMessageHeader::CHECKSUM_SIZE)), HexStr(hdr.pchChecksum), m_node_id); @@ -1878,8 +1880,8 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) auto start = GetTime<std::chrono::microseconds>(); // Minimum time before next feeler connection (in microseconds). - auto next_feeler = PoissonNextSend(start, FEELER_INTERVAL); - auto next_extra_block_relay = PoissonNextSend(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + auto next_feeler = GetExponentialRand(start, FEELER_INTERVAL); + auto next_extra_block_relay = GetExponentialRand(start, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); const bool dnsseed = gArgs.GetBoolArg("-dnsseed", DEFAULT_DNSSEED); bool add_fixed_seeds = gArgs.GetBoolArg("-fixedseeds", DEFAULT_FIXEDSEEDS); @@ -1999,7 +2001,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // // This is similar to the logic for trying extra outbound (full-relay) // peers, except: - // - we do this all the time on a poisson timer, rather than just when + // - we do this all the time on an exponential timer, rather than just when // our tip is stale // - we potentially disconnect our next-youngest block-relay-only peer, if our // newest block-relay-only peer delivers a block more recently. @@ -2008,10 +2010,10 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect) // Because we can promote these connections to block-relay-only // connections, they do not get their own ConnectionType enum // (similar to how we deal with extra outbound peers). - next_extra_block_relay = PoissonNextSend(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); + next_extra_block_relay = GetExponentialRand(now, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL); conn_type = ConnectionType::BLOCK_RELAY; } else if (now > next_feeler) { - next_feeler = PoissonNextSend(now, FEELER_INTERVAL); + next_feeler = GetExponentialRand(now, FEELER_INTERVAL); conn_type = ConnectionType::FEELER; fFeeler = true; } else { @@ -3058,23 +3060,6 @@ bool CConnman::ForNode(NodeId id, std::function<bool(CNode* pnode)> func) return found != nullptr && NodeFullyConnected(found) && func(found); } -std::chrono::microseconds CConnman::PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval) -{ - if (m_next_send_inv_to_incoming.load() < now) { - // If this function were called from multiple threads simultaneously - // it would possible that both update the next send variable, and return a different result to their caller. - // This is not possible in practice as only the net processing thread invokes this function. - m_next_send_inv_to_incoming = PoissonNextSend(now, average_interval); - } - return m_next_send_inv_to_incoming; -} - -std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval) -{ - double unscaled = -log1p(GetRand(1ULL << 48) * -0.0000000000000035527136788 /* -1/2^48 */); - return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us); -} - CSipHasher CConnman::GetDeterministicRandomizer(uint64_t id) const { return CSipHasher(nSeed0, nSeed1).Write(id); @@ -230,8 +230,8 @@ struct LocalServiceInfo { uint16_t nPort; }; -extern RecursiveMutex cs_mapLocalHost; -extern std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost); +extern Mutex g_maplocalhost_mutex; +extern std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(g_maplocalhost_mutex); extern const std::string NET_MESSAGE_COMMAND_OTHER; typedef std::map<std::string, uint64_t> mapMsgCmdSize; //command, total bytes @@ -278,7 +278,7 @@ public: /** Transport protocol agnostic message container. * Ideally it should only contain receive time, payload, - * command and size. + * type and size. */ class CNetMessage { public: @@ -286,7 +286,7 @@ public: std::chrono::microseconds m_time{0}; //!< time of message receipt uint32_t m_message_size{0}; //!< size of the payload uint32_t m_raw_message_size{0}; //!< used wire size of the message (including header/checksum) - std::string m_command; + std::string m_type; CNetMessage(CDataStream&& recv_in) : m_recv(std::move(recv_in)) {} @@ -434,12 +434,12 @@ public: //! Whether this peer is an inbound onion, i.e. connected via our Tor onion service. const bool m_inbound_onion; std::atomic<int> nVersion{0}; - RecursiveMutex cs_SubVer; + Mutex m_subver_mutex; /** * cleanSubVer is a sanitized string of the user agent byte array we read * from the wire. This cleaned string can safely be logged or displayed. */ - std::string cleanSubVer GUARDED_BY(cs_SubVer){}; + std::string cleanSubVer GUARDED_BY(m_subver_mutex){}; bool m_prefer_evict{false}; // This peer is preferred for eviction. bool HasPermission(NetPermissionFlags permission) const { return NetPermissions::HasFlag(m_permissionFlags, permission); @@ -618,9 +618,9 @@ public: return m_greatest_common_version; } - CService GetAddrLocal() const; + CService GetAddrLocal() const LOCKS_EXCLUDED(m_addr_local_mutex); //! May not be called more than once - void SetAddrLocal(const CService& addrLocalIn); + void SetAddrLocal(const CService& addrLocalIn) LOCKS_EXCLUDED(m_addr_local_mutex); CNode* AddRef() { @@ -693,8 +693,8 @@ private: std::list<CNetMessage> vRecvMsg; // Used only by SocketHandler thread // Our address, as reported by the peer - CService addrLocal GUARDED_BY(cs_addrLocal); - mutable RecursiveMutex cs_addrLocal; + CService addrLocal GUARDED_BY(m_addr_local_mutex); + mutable Mutex m_addr_local_mutex; mapMsgCmdSize mapSendBytesPerMsgCmd GUARDED_BY(cs_vSend); mapMsgCmdSize mapRecvBytesPerMsgCmd GUARDED_BY(cs_vRecv); @@ -936,12 +936,6 @@ public: void WakeMessageHandler(); - /** Attempts to obfuscate tx time through exponentially distributed emitting. - Works assuming that a single interval is used. - Variable intervals will result in privacy decrease. - */ - std::chrono::microseconds PoissonNextSendInbound(std::chrono::microseconds now, std::chrono::seconds average_interval); - /** Return true if we should disconnect the peer for failing an inactivity check. */ bool ShouldRunInactivityChecks(const CNode& node, std::chrono::seconds now) const; @@ -1221,8 +1215,6 @@ private: */ std::atomic_bool m_start_extra_block_relay_peers{false}; - std::atomic<std::chrono::microseconds> m_next_send_inv_to_incoming{0us}; - /** * A vector of -bind=<address>:<port>=onion arguments each of which is * an address and port that are designated for incoming Tor connections. @@ -1270,9 +1262,6 @@ private: friend struct ConnmanTestMsg; }; -/** Return a timestamp in the future (in microseconds) for exponentially distributed events. */ -std::chrono::microseconds PoissonNextSend(std::chrono::microseconds now, std::chrono::seconds average_interval); - /** Dump binary message to file, with timestamp */ void CaptureMessage(const CAddress& addr, const std::string& msg_type, const Span<const unsigned char>& data, bool is_incoming); diff --git a/src/net_processing.cpp b/src/net_processing.cpp index 5bff29c097..cc23876af5 100644 --- a/src/net_processing.cpp +++ b/src/net_processing.cpp @@ -450,6 +450,8 @@ private: */ std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex); + std::atomic<std::chrono::microseconds> m_next_inv_to_inbounds{0us}; + /** Number of nodes with fSyncStarted. */ int nSyncStarted GUARDED_BY(cs_main) = 0; @@ -524,6 +526,15 @@ private: Mutex m_recent_confirmed_transactions_mutex; CRollingBloomFilter m_recent_confirmed_transactions GUARDED_BY(m_recent_confirmed_transactions_mutex){48'000, 0.000'001}; + /** + * For sending `inv`s to inbound peers, we use a single (exponentially + * distributed) timer for all peers. If we used a separate timer for each + * peer, a spy node could make multiple inbound connections to us to + * accurately determine when we received the transaction (and potentially + * determine the transaction's origin). */ + std::chrono::microseconds NextInvToInbounds(std::chrono::microseconds now, + std::chrono::seconds average_interval); + /** Have we requested this block from a peer */ bool IsBlockRequested(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main); @@ -825,6 +836,18 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS nPreferredDownload += state->fPreferredDownload; } +std::chrono::microseconds PeerManagerImpl::NextInvToInbounds(std::chrono::microseconds now, + std::chrono::seconds average_interval) +{ + if (m_next_inv_to_inbounds.load() < now) { + // If this function were called from multiple threads simultaneously + // it would possible that both update the next send variable, and return a different result to their caller. + // This is not possible in practice as only the net processing thread invokes this function. + m_next_inv_to_inbounds = GetExponentialRand(now, average_interval); + } + return m_next_inv_to_inbounds; +} + bool PeerManagerImpl::IsBlockRequested(const uint256& hash) { return mapBlocksInFlight.find(hash) != mapBlocksInFlight.end(); @@ -2636,7 +2659,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type, pfrom.nServices = nServices; pfrom.SetAddrLocal(addrMe); { - LOCK(pfrom.cs_SubVer); + LOCK(pfrom.m_subver_mutex); pfrom.cleanSubVer = cleanSubVer; } peer->m_starting_height = starting_height; @@ -4154,32 +4177,28 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt pfrom->GetId(), pfrom->m_addr_name.c_str(), pfrom->ConnectionTypeAsString().c_str(), - msg.m_command.c_str(), + msg.m_type.c_str(), msg.m_recv.size(), msg.m_recv.data() ); if (gArgs.GetBoolArg("-capturemessages", false)) { - CaptureMessage(pfrom->addr, msg.m_command, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true); + CaptureMessage(pfrom->addr, msg.m_type, MakeUCharSpan(msg.m_recv), /*is_incoming=*/true); } msg.SetVersion(pfrom->GetCommonVersion()); - const std::string& msg_type = msg.m_command; - - // Message size - unsigned int nMessageSize = msg.m_message_size; try { - ProcessMessage(*pfrom, msg_type, msg.m_recv, msg.m_time, interruptMsgProc); + ProcessMessage(*pfrom, msg.m_type, msg.m_recv, msg.m_time, interruptMsgProc); if (interruptMsgProc) return false; { LOCK(peer->m_getdata_requests_mutex); if (!peer->m_getdata_requests.empty()) fMoreWork = true; } } catch (const std::exception& e) { - LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg_type), nMessageSize, e.what(), typeid(e).name()); + LogPrint(BCLog::NET, "%s(%s, %u bytes): Exception '%s' (%s) caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size, e.what(), typeid(e).name()); } catch (...) { - LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg_type), nMessageSize); + LogPrint(BCLog::NET, "%s(%s, %u bytes): Unknown exception caught\n", __func__, SanitizeString(msg.m_type), msg.m_message_size); } return fMoreWork; @@ -4434,13 +4453,13 @@ void PeerManagerImpl::MaybeSendAddr(CNode& node, Peer& peer, std::chrono::micros FastRandomContext insecure_rand; PushAddress(peer, *local_addr, insecure_rand); } - peer.m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); + peer.m_next_local_addr_send = GetExponentialRand(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL); } // We sent an `addr` message to this peer recently. Nothing more to do. if (current_time <= peer.m_next_addr_send) return; - peer.m_next_addr_send = PoissonNextSend(current_time, AVG_ADDRESS_BROADCAST_INTERVAL); + peer.m_next_addr_send = GetExponentialRand(current_time, AVG_ADDRESS_BROADCAST_INTERVAL); if (!Assume(peer.m_addrs_to_send.size() <= MAX_ADDR_TO_SEND)) { // Should be impossible since we always check size before adding to @@ -4512,7 +4531,7 @@ void PeerManagerImpl::MaybeSendFeefilter(CNode& pto, std::chrono::microseconds c m_connman.PushMessage(&pto, CNetMsgMaker(pto.GetCommonVersion()).Make(NetMsgType::FEEFILTER, filterToSend)); pto.m_tx_relay->lastSentFeeFilter = filterToSend; } - pto.m_tx_relay->m_next_send_feefilter = PoissonNextSend(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL); + pto.m_tx_relay->m_next_send_feefilter = GetExponentialRand(current_time, AVG_FEEFILTER_BROADCAST_INTERVAL); } // If the fee filter has changed substantially and it's still more than MAX_FEEFILTER_CHANGE_DELAY // until scheduled broadcast, then move the broadcast to within MAX_FEEFILTER_CHANGE_DELAY. @@ -4792,9 +4811,9 @@ bool PeerManagerImpl::SendMessages(CNode* pto) if (pto->m_tx_relay->nNextInvSend < current_time) { fSendTrickle = true; if (pto->IsInboundConn()) { - pto->m_tx_relay->nNextInvSend = m_connman.PoissonNextSendInbound(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); + pto->m_tx_relay->nNextInvSend = NextInvToInbounds(current_time, INBOUND_INVENTORY_BROADCAST_INTERVAL); } else { - pto->m_tx_relay->nNextInvSend = PoissonNextSend(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL); + pto->m_tx_relay->nNextInvSend = GetExponentialRand(current_time, OUTBOUND_INVENTORY_BROADCAST_INTERVAL); } } diff --git a/src/policy/packages.h b/src/policy/packages.h index d2744f1265..9f274f6b7d 100644 --- a/src/policy/packages.h +++ b/src/policy/packages.h @@ -25,6 +25,7 @@ enum class PackageValidationResult { PCKG_RESULT_UNSET = 0, //!< Initial value. The package has not yet been rejected. PCKG_POLICY, //!< The package itself is invalid (e.g. too many transactions). PCKG_TX, //!< At least one tx is invalid. + PCKG_MEMPOOL_ERROR, //!< Mempool logic error. }; /** A package is an ordered list of transactions. The transactions cannot conflict with (spend the diff --git a/src/qt/test/test_main.cpp b/src/qt/test/test_main.cpp index 11aa61c7fc..10b7e2ffe7 100644 --- a/src/qt/test/test_main.cpp +++ b/src/qt/test/test_main.cpp @@ -22,6 +22,7 @@ #include <QApplication> #include <QObject> #include <QTest> +#include <functional> #if defined(QT_STATICPLUGIN) #include <QtPlugin> @@ -43,6 +44,8 @@ using node::NodeContext; const std::function<void(const std::string&)> G_TEST_LOG_FUN{}; +const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS{}; + // This is all you need to run all the tests int main(int argc, char* argv[]) { diff --git a/src/random.cpp b/src/random.cpp index 6eb06c5d47..5dae80fe31 100644 --- a/src/random.cpp +++ b/src/random.cpp @@ -19,6 +19,7 @@ #include <sync.h> // for Mutex #include <util/time.h> // for GetTimeMicros() +#include <cmath> #include <stdlib.h> #include <thread> @@ -714,3 +715,9 @@ void RandomInit() ReportHardwareRand(); } + +std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval) +{ + double unscaled = -std::log1p(GetRand(uint64_t{1} << 48) * -0.0000000000000035527136788 /* -1/2^48 */); + return now + std::chrono::duration_cast<std::chrono::microseconds>(unscaled * average_interval + 0.5us); +} diff --git a/src/random.h b/src/random.h index 0c6dc24983..5174c553fb 100644 --- a/src/random.h +++ b/src/random.h @@ -10,7 +10,7 @@ #include <crypto/common.h> #include <uint256.h> -#include <chrono> // For std::chrono::microseconds +#include <chrono> #include <cstdint> #include <limits> @@ -82,6 +82,18 @@ D GetRandomDuration(typename std::common_type<D>::type max) noexcept }; constexpr auto GetRandMicros = GetRandomDuration<std::chrono::microseconds>; constexpr auto GetRandMillis = GetRandomDuration<std::chrono::milliseconds>; + +/** + * Return a timestamp in the future sampled from an exponential distribution + * (https://en.wikipedia.org/wiki/Exponential_distribution). This distribution + * is memoryless and should be used for repeated network events (e.g. sending a + * certain type of message) to minimize leaking information to observers. + * + * The probability of an event occuring before time x is 1 - e^-(x/a) where a + * is the average interval between events. + * */ +std::chrono::microseconds GetExponentialRand(std::chrono::microseconds now, std::chrono::seconds average_interval); + int GetRandInt(int nMax) noexcept; uint256 GetRandHash() noexcept; diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp index 6fe990691a..3d7c00edfc 100644 --- a/src/rpc/net.cpp +++ b/src/rpc/net.cpp @@ -648,7 +648,7 @@ static RPCHelpMan getnetworkinfo() obj.pushKV("incrementalfee", ValueFromAmount(::incrementalRelayFee.GetFeePerK())); UniValue localAddresses(UniValue::VARR); { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); for (const std::pair<const CNetAddr, LocalServiceInfo> &item : mapLocalHost) { UniValue rec(UniValue::VOBJ); diff --git a/src/scheduler.cpp b/src/scheduler.cpp index 378f866e09..0b2ad3c553 100644 --- a/src/scheduler.cpp +++ b/src/scheduler.cpp @@ -136,7 +136,7 @@ bool CScheduler::AreThreadsServicingQueue() const void SingleThreadedSchedulerClient::MaybeScheduleProcessQueue() { { - LOCK(m_cs_callbacks_pending); + LOCK(m_callbacks_mutex); // Try to avoid scheduling too many copies here, but if we // accidentally have two ProcessQueue's scheduled at once its // not a big deal. @@ -150,7 +150,7 @@ void SingleThreadedSchedulerClient::ProcessQueue() { std::function<void()> callback; { - LOCK(m_cs_callbacks_pending); + LOCK(m_callbacks_mutex); if (m_are_callbacks_running) return; if (m_callbacks_pending.empty()) return; m_are_callbacks_running = true; @@ -167,7 +167,7 @@ void SingleThreadedSchedulerClient::ProcessQueue() ~RAIICallbacksRunning() { { - LOCK(instance->m_cs_callbacks_pending); + LOCK(instance->m_callbacks_mutex); instance->m_are_callbacks_running = false; } instance->MaybeScheduleProcessQueue(); @@ -182,7 +182,7 @@ void SingleThreadedSchedulerClient::AddToProcessQueue(std::function<void()> func assert(m_pscheduler); { - LOCK(m_cs_callbacks_pending); + LOCK(m_callbacks_mutex); m_callbacks_pending.emplace_back(std::move(func)); } MaybeScheduleProcessQueue(); @@ -194,13 +194,13 @@ void SingleThreadedSchedulerClient::EmptyQueue() bool should_continue = true; while (should_continue) { ProcessQueue(); - LOCK(m_cs_callbacks_pending); + LOCK(m_callbacks_mutex); should_continue = !m_callbacks_pending.empty(); } } size_t SingleThreadedSchedulerClient::CallbacksPending() { - LOCK(m_cs_callbacks_pending); + LOCK(m_callbacks_mutex); return m_callbacks_pending.size(); } diff --git a/src/scheduler.h b/src/scheduler.h index 5366a5989c..bb0abfbf7a 100644 --- a/src/scheduler.h +++ b/src/scheduler.h @@ -119,9 +119,9 @@ class SingleThreadedSchedulerClient private: CScheduler* m_pscheduler; - RecursiveMutex m_cs_callbacks_pending; - std::list<std::function<void()>> m_callbacks_pending GUARDED_BY(m_cs_callbacks_pending); - bool m_are_callbacks_running GUARDED_BY(m_cs_callbacks_pending) = false; + Mutex m_callbacks_mutex; + std::list<std::function<void()>> m_callbacks_pending GUARDED_BY(m_callbacks_mutex); + bool m_are_callbacks_running GUARDED_BY(m_callbacks_mutex) = false; void MaybeScheduleProcessQueue(); void ProcessQueue(); diff --git a/src/test/README.md b/src/test/README.md index d03411c3ed..90d0e7102d 100644 --- a/src/test/README.md +++ b/src/test/README.md @@ -33,19 +33,31 @@ the `src/qt/test/test_main.cpp` file. ### Running individual tests -`test_bitcoin` has some built-in command-line arguments; for -example, to run just the `getarg_tests` verbosely: +`test_bitcoin` accepts the command line arguments from the boost framework. +For example, to run just the `getarg_tests` suite of tests: - test_bitcoin --log_level=all --run_test=getarg_tests -- DEBUG_LOG_OUT +```bash +test_bitcoin --log_level=all --run_test=getarg_tests +``` `log_level` controls the verbosity of the test framework, which logs when a -test case is entered, for example. The `DEBUG_LOG_OUT` after the two dashes -redirects the debug log, which would normally go to a file in the test datadir +test case is entered, for example. `test_bitcoin` also accepts the command +line arguments accepted by `bitcoind`. Use `--` to separate both types of +arguments: + +```bash +test_bitcoin --log_level=all --run_test=getarg_tests -- -printtoconsole=1 +``` + +The `-printtoconsole=1` after the two dashes redirects the debug log, which +would normally go to a file in the test datadir (`BasicTestingSetup::m_path_root`), to the standard terminal output. ... or to run just the doubledash test: - test_bitcoin --run_test=getarg_tests/doubledash +```bash +test_bitcoin --run_test=getarg_tests/doubledash +``` Run `test_bitcoin --help` for the full list. @@ -68,7 +80,7 @@ on failure. For running individual tests verbosely, refer to the section To write to logs from unit tests you need to use specific message methods provided by Boost. The simplest is `BOOST_TEST_MESSAGE`. -For debugging you can launch the `test_bitcoin` executable with `gdb`or `lldb` and +For debugging you can launch the `test_bitcoin` executable with `gdb` or `lldb` and start debugging, just like you would with any other program: ```bash @@ -95,7 +107,7 @@ Running the tests and hitting a segmentation fault should now produce a file cal `/proc/sys/kernel/core_pattern`). You can then explore the core dump using -``` bash +```bash gdb src/test/test_bitcoin core (gbd) bt # produce a backtrace for where a segfault occurred diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp index 752bd0af9e..efc30b6822 100644 --- a/src/test/addrman_tests.cpp +++ b/src/test/addrman_tests.cpp @@ -21,6 +21,15 @@ #include <string> using namespace std::literals; +using node::NodeContext; + +static const std::vector<bool> EMPTY_ASMAP; +static const bool DETERMINISTIC{true}; + +static int32_t GetCheckRatio(const NodeContext& node_ctx) +{ + return std::clamp<int32_t>(node_ctx.args->GetIntArg("-checkaddrman", 100), 0, 1000000); +} static CNetAddr ResolveIP(const std::string& ip) { @@ -49,17 +58,11 @@ static std::vector<bool> FromBytes(const unsigned char* source, int vector_size) return result; } -/* Utility function to create a deterministic addrman, as used in most tests */ -static std::unique_ptr<AddrMan> TestAddrMan(std::vector<bool> asmap = std::vector<bool>()) -{ - return std::make_unique<AddrMan>(asmap, /*deterministic=*/true, /*consistency_check_ratio=*/100); -} - BOOST_FIXTURE_TEST_SUITE(addrman_tests, BasicTestingSetup) BOOST_AUTO_TEST_CASE(addrman_simple) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); CNetAddr source = ResolveIP("252.2.2.2"); @@ -93,7 +96,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple) BOOST_CHECK(addrman->size() >= 1); // Test: reset addrman and test AddrMan::Add multiple addresses works as expected - addrman = TestAddrMan(); + addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); std::vector<CAddress> vAddr; vAddr.push_back(CAddress(ResolveService("250.1.1.3", 8333), NODE_NONE)); vAddr.push_back(CAddress(ResolveService("250.1.1.4", 8333), NODE_NONE)); @@ -103,7 +106,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple) BOOST_AUTO_TEST_CASE(addrman_ports) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); CNetAddr source = ResolveIP("252.2.2.2"); @@ -132,7 +135,7 @@ BOOST_AUTO_TEST_CASE(addrman_ports) BOOST_AUTO_TEST_CASE(addrman_select) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); CNetAddr source = ResolveIP("252.2.2.2"); @@ -191,7 +194,7 @@ BOOST_AUTO_TEST_CASE(addrman_select) BOOST_AUTO_TEST_CASE(addrman_new_collisions) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); CNetAddr source = ResolveIP("252.2.2.2"); @@ -220,7 +223,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_collisions) BOOST_AUTO_TEST_CASE(addrman_new_multiplicity) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); CAddress addr{CAddress(ResolveService("253.3.3.3", 8333), NODE_NONE)}; int64_t start_time{GetAdjustedTime()}; addr.nTime = start_time; @@ -252,7 +255,7 @@ BOOST_AUTO_TEST_CASE(addrman_new_multiplicity) BOOST_AUTO_TEST_CASE(addrman_tried_collisions) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); CNetAddr source = ResolveIP("252.2.2.2"); @@ -283,7 +286,7 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions) BOOST_AUTO_TEST_CASE(addrman_getaddr) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); // Test: Sanity check, GetAddr should never return anything if addrman // is empty. @@ -604,9 +607,11 @@ BOOST_AUTO_TEST_CASE(addrman_serialization) { std::vector<bool> asmap1 = FromBytes(asmap_raw, sizeof(asmap_raw) * 8); - auto addrman_asmap1 = TestAddrMan(asmap1); - auto addrman_asmap1_dup = TestAddrMan(asmap1); - auto addrman_noasmap = TestAddrMan(); + const auto ratio = GetCheckRatio(m_node); + auto addrman_asmap1 = std::make_unique<AddrMan>(asmap1, DETERMINISTIC, ratio); + auto addrman_asmap1_dup = std::make_unique<AddrMan>(asmap1, DETERMINISTIC, ratio); + auto addrman_noasmap = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, ratio); + CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); CAddress addr = CAddress(ResolveService("250.1.1.1"), NODE_NONE); @@ -634,8 +639,8 @@ BOOST_AUTO_TEST_CASE(addrman_serialization) BOOST_CHECK(addr_pos1.position != addr_pos3.position); // deserializing non-asmaped peers.dat to asmaped addrman - addrman_asmap1 = TestAddrMan(asmap1); - addrman_noasmap = TestAddrMan(); + addrman_asmap1 = std::make_unique<AddrMan>(asmap1, DETERMINISTIC, ratio); + addrman_noasmap = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, ratio); addrman_noasmap->Add({addr}, default_source); stream << *addrman_noasmap; stream >> *addrman_asmap1; @@ -646,8 +651,8 @@ BOOST_AUTO_TEST_CASE(addrman_serialization) BOOST_CHECK(addr_pos4 == addr_pos2); // used to map to different buckets, now maps to the same bucket. - addrman_asmap1 = TestAddrMan(asmap1); - addrman_noasmap = TestAddrMan(); + addrman_asmap1 = std::make_unique<AddrMan>(asmap1, DETERMINISTIC, ratio); + addrman_noasmap = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, ratio); CAddress addr1 = CAddress(ResolveService("250.1.1.1"), NODE_NONE); CAddress addr2 = CAddress(ResolveService("250.2.1.1"), NODE_NONE); addrman_noasmap->Add({addr, addr2}, default_source); @@ -666,7 +671,7 @@ BOOST_AUTO_TEST_CASE(remove_invalid) { // Confirm that invalid addresses are ignored in unserialization. - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); CDataStream stream(SER_NETWORK, PROTOCOL_VERSION); const CAddress new1{ResolveService("5.5.5.5"), NODE_NONE}; @@ -698,14 +703,14 @@ BOOST_AUTO_TEST_CASE(remove_invalid) BOOST_REQUIRE(pos + sizeof(tried2_raw_replacement) <= stream.size()); memcpy(stream.data() + pos, tried2_raw_replacement, sizeof(tried2_raw_replacement)); - addrman = TestAddrMan(); + addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); stream >> *addrman; BOOST_CHECK_EQUAL(addrman->size(), 2); } BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); BOOST_CHECK(addrman->size() == 0); @@ -738,7 +743,7 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision) BOOST_AUTO_TEST_CASE(addrman_noevict) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); // Add 35 addresses. CNetAddr source = ResolveIP("252.2.2.2"); @@ -790,7 +795,7 @@ BOOST_AUTO_TEST_CASE(addrman_noevict) BOOST_AUTO_TEST_CASE(addrman_evictionworks) { - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); BOOST_CHECK(addrman->size() == 0); @@ -860,8 +865,7 @@ static CDataStream AddrmanToStream(const AddrMan& addrman) BOOST_AUTO_TEST_CASE(load_addrman) { - AddrMan addrman{/*asmap=*/ std::vector<bool>(), /*deterministic=*/ true, - /*consistency_check_ratio=*/ 100}; + AddrMan addrman{EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)}; CService addr1, addr2, addr3; BOOST_CHECK(Lookup("250.7.1.1", addr1, 8333, false)); @@ -880,7 +884,7 @@ BOOST_AUTO_TEST_CASE(load_addrman) // Test that the de-serialization does not throw an exception. CDataStream ssPeers1 = AddrmanToStream(addrman); bool exceptionThrown = false; - AddrMan addrman1(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/100); + AddrMan addrman1{EMPTY_ASMAP, !DETERMINISTIC, GetCheckRatio(m_node)}; BOOST_CHECK(addrman1.size() == 0); try { @@ -897,7 +901,7 @@ BOOST_AUTO_TEST_CASE(load_addrman) // Test that ReadFromStream creates an addrman with the correct number of addrs. CDataStream ssPeers2 = AddrmanToStream(addrman); - AddrMan addrman2(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/100); + AddrMan addrman2{EMPTY_ASMAP, !DETERMINISTIC, GetCheckRatio(m_node)}; BOOST_CHECK(addrman2.size() == 0); ReadFromStream(addrman2, ssPeers2); BOOST_CHECK(addrman2.size() == 3); @@ -935,7 +939,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted) // Test that the de-serialization of corrupted peers.dat throws an exception. CDataStream ssPeers1 = MakeCorruptPeersDat(); bool exceptionThrown = false; - AddrMan addrman1(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/100); + AddrMan addrman1{EMPTY_ASMAP, !DETERMINISTIC, GetCheckRatio(m_node)}; BOOST_CHECK(addrman1.size() == 0); try { unsigned char pchMsgTmp[4]; @@ -951,7 +955,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted) // Test that ReadFromStream fails if peers.dat is corrupt CDataStream ssPeers2 = MakeCorruptPeersDat(); - AddrMan addrman2(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/100); + AddrMan addrman2{EMPTY_ASMAP, !DETERMINISTIC, GetCheckRatio(m_node)}; BOOST_CHECK(addrman2.size() == 0); BOOST_CHECK_THROW(ReadFromStream(addrman2, ssPeers2), std::ios_base::failure); } @@ -959,7 +963,7 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted) BOOST_AUTO_TEST_CASE(addrman_update_address) { // Tests updating nTime via Connected() and nServices via SetServices() - auto addrman = TestAddrMan(); + auto addrman = std::make_unique<AddrMan>(EMPTY_ASMAP, DETERMINISTIC, GetCheckRatio(m_node)); CNetAddr source{ResolveIP("252.2.2.2")}; CAddress addr{CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE)}; diff --git a/src/test/arith_uint256_tests.cpp b/src/test/arith_uint256_tests.cpp index a7494be882..a923d38467 100644 --- a/src/test/arith_uint256_tests.cpp +++ b/src/test/arith_uint256_tests.cpp @@ -129,11 +129,11 @@ static void shiftArrayRight(unsigned char* to, const unsigned char* from, unsign { unsigned int F = (T+bitsToShift/8); if (F < arrayLength) - to[T] = from[F] >> (bitsToShift%8); + to[T] = uint8_t(from[F] >> (bitsToShift % 8)); else to[T] = 0; if (F + 1 < arrayLength) - to[T] |= from[(F+1)] << (8-bitsToShift%8); + to[T] |= uint8_t(from[(F + 1)] << (8 - bitsToShift % 8)); } } @@ -144,9 +144,9 @@ static void shiftArrayLeft(unsigned char* to, const unsigned char* from, unsigne if (T >= bitsToShift/8) { unsigned int F = T-bitsToShift/8; - to[T] = from[F] << (bitsToShift%8); + to[T] = uint8_t(from[F] << (bitsToShift % 8)); if (T >= bitsToShift/8+1) - to[T] |= from[F-1] >> (8-bitsToShift%8); + to[T] |= uint8_t(from[F - 1] >> (8 - bitsToShift % 8)); } else { to[T] = 0; @@ -202,7 +202,7 @@ BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ - BOOST_CHECK(~ZeroL == MaxL); unsigned char TmpArray[32]; - for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = ~R1Array[i]; } + for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = uint8_t(~R1Array[i]); } BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (~R1L)); BOOST_CHECK(-ZeroL == ZeroL); @@ -215,7 +215,7 @@ BOOST_AUTO_TEST_CASE( unaryOperators ) // ! ~ - // Check if doing _A_ _OP_ _B_ results in the same as applying _OP_ onto each // element of Aarray and Barray, and then converting the result into an arith_uint256. #define CHECKBITWISEOPERATOR(_A_,_B_,_OP_) \ - for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = _A_##Array[i] _OP_ _B_##Array[i]; } \ + for (unsigned int i = 0; i < 32; ++i) { TmpArray[i] = uint8_t(_A_##Array[i] _OP_ _B_##Array[i]); } \ BOOST_CHECK(arith_uint256V(std::vector<unsigned char>(TmpArray,TmpArray+32)) == (_A_##L _OP_ _B_##L)); #define CHECKASSIGNMENTOPERATOR(_A_,_B_,_OP_) \ diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp index 3921a9d2d1..922fd8e513 100644 --- a/src/test/coins_tests.cpp +++ b/src/test/coins_tests.cpp @@ -393,11 +393,11 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test) // Update the expected result to know about the new output coins assert(tx.vout.size() == 1); const COutPoint outpoint(tx.GetHash(), 0); - result[outpoint] = Coin(tx.vout[0], height, CTransaction(tx).IsCoinBase()); + result[outpoint] = Coin{tx.vout[0], int(height), CTransaction(tx).IsCoinBase()}; // Call UpdateCoins on the top cache CTxUndo undo; - UpdateCoins(CTransaction(tx), *(stack.back()), undo, height); + UpdateCoins(CTransaction(tx), *(stack.back()), undo, int(height)); // Update the utxo set for future spends utxoset.insert(outpoint); diff --git a/src/test/descriptor_tests.cpp b/src/test/descriptor_tests.cpp index 9b58711eb8..5a3e382c3f 100644 --- a/src/test/descriptor_tests.cpp +++ b/src/test/descriptor_tests.cpp @@ -42,6 +42,7 @@ constexpr int HARDENED = 2; // Derivation needs access to private keys constexpr int UNSOLVABLE = 4; // This descriptor is not expected to be solvable constexpr int SIGNABLE = 8; // We can sign with this descriptor (this is not true when actual BIP32 derivation is used, as that's not integrated in our signing code) constexpr int DERIVE_HARDENED = 16; // The final derivation is hardened, i.e. ends with *' or *h +constexpr int MIXED_PUBKEYS = 32; /** Compare two descriptors. If only one of them has a checksum, the checksum is ignored. */ bool EqualDescriptor(std::string a, std::string b) @@ -73,6 +74,18 @@ std::string UseHInsteadOfApostrophe(const std::string& desc) return ret; } +// Count the number of times the string "xpub" appears in a descriptor string +static size_t CountXpubs(const std::string& desc) +{ + size_t count = 0; + size_t p = desc.find("xpub", 0); + while (p != std::string::npos) { + count++; + p = desc.find("xpub", p + 1); + } + return count; +} + const std::set<std::vector<uint32_t>> ONLY_EMPTY{{}}; void DoCheck(const std::string& prv, const std::string& pub, const std::string& norm_prv, const std::string& norm_pub, int flags, const std::vector<std::vector<std::string>>& scripts, const std::optional<OutputType>& type, const std::set<std::vector<uint32_t>>& paths = ONLY_EMPTY, @@ -171,7 +184,8 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string& // Check whether keys are in the cache const auto& der_xpub_cache = desc_cache.GetCachedDerivedExtPubKeys(); const auto& parent_xpub_cache = desc_cache.GetCachedParentExtPubKeys(); - if ((flags & RANGE) && !(flags & DERIVE_HARDENED)) { + const size_t num_xpubs = CountXpubs(pub1); + if ((flags & RANGE) && !(flags & (DERIVE_HARDENED))) { // For ranged, unhardened derivation, None of the keys in origins should appear in the cache but the cache should have parent keys // But we can derive one level from each of those parent keys and find them all BOOST_CHECK(der_xpub_cache.empty()); @@ -183,13 +197,22 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string& xpub.Derive(der, i); pubkeys.insert(der.pubkey); } + int count_pks = 0; for (const auto& origin_pair : script_provider_cached.origins) { const CPubKey& pk = origin_pair.second.first; - BOOST_CHECK(pubkeys.count(pk) > 0); + count_pks += pubkeys.count(pk); } - } else if (pub1.find("xpub") != std::string::npos) { + if (flags & MIXED_PUBKEYS) { + BOOST_CHECK_EQUAL(num_xpubs, count_pks); + } else { + BOOST_CHECK_EQUAL(script_provider_cached.origins.size(), count_pks); + } + } else if (num_xpubs > 0) { // For ranged, hardened derivation, or not ranged, but has an xpub, all of the keys should appear in the cache - BOOST_CHECK(der_xpub_cache.size() + parent_xpub_cache.size() == script_provider_cached.origins.size()); + BOOST_CHECK(der_xpub_cache.size() + parent_xpub_cache.size() == num_xpubs); + if (!(flags & MIXED_PUBKEYS)) { + BOOST_CHECK(num_xpubs == script_provider_cached.origins.size()); + } // Get all of the derived pubkeys std::set<CPubKey> pubkeys; for (const auto& xpub_map_pair : der_xpub_cache) { @@ -206,12 +229,18 @@ void DoCheck(const std::string& prv, const std::string& pub, const std::string& xpub.Derive(der, i); pubkeys.insert(der.pubkey); } + int count_pks = 0; for (const auto& origin_pair : script_provider_cached.origins) { const CPubKey& pk = origin_pair.second.first; - BOOST_CHECK(pubkeys.count(pk) > 0); + count_pks += pubkeys.count(pk); } - } else { - // No xpub, nothing should be cached + if (flags & MIXED_PUBKEYS) { + BOOST_CHECK_EQUAL(num_xpubs, count_pks); + } else { + BOOST_CHECK_EQUAL(script_provider_cached.origins.size(), count_pks); + } + } else if (!(flags & MIXED_PUBKEYS)) { + // Only const pubkeys, nothing should be cached BOOST_CHECK(der_xpub_cache.empty()); BOOST_CHECK(parent_xpub_cache.empty()); } @@ -333,6 +362,11 @@ BOOST_AUTO_TEST_CASE(descriptor_test) Check("wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", "wpkh([ffffffff/13']xprv9vHkqa6EV4sPZHYqZznhT2NPtPCjKuDKGY38FBWLvgaDx45zo9WQRUT3dKYnjwih2yJD9mkrocEZXo1ex8G81dwSM1fwqWpWkeS3v86pgKt/1/2/*)", "wpkh([ffffffff/13']xpub69H7F5d8KSRgmmdJg2KhpAK8SR3DjMwAdkxj3ZuxV27CprR9LgpeyGmXUbC6wb7ERfvrnKZjXoUmmDznezpbZb7ap6r1D3tgFxHmwMkQTPH/1/2/*)", RANGE, {{"0014326b2249e3a25d5dc60935f044ee835d090ba859"},{"0014af0bd98abc2f2cae66e36896a39ffe2d32984fb7"},{"00141fa798efd1cbf95cebf912c031b8a4a6e9fb9f27"}}, OutputType::BECH32, {{0x8000000DUL, 1, 2, 0}, {0x8000000DUL, 1, 2, 1}, {0x8000000DUL, 1, 2, 2}}); Check("sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", "sh(wpkh(xprv9s21ZrQH143K3QTDL4LXw2F7HEK3wJUD2nW2nRk4stbPy6cq3jPPqjiChkVvvNKmPGJxWUtg6LnF5kejMRNNU3TGtRBeJgk33yuGBxrMPHi/10/20/30/40/*'))", "sh(wpkh(xpub661MyMwAqRbcFtXgS5sYJABqqG9YLmC4Q1Rdap9gSE8NqtwybGhePY2gZ29ESFjqJoCu1Rupje8YtGqsefD265TMg7usUDFdp6W1EGMcet8/10/20/30/40/*'))", RANGE | HARDENED | DERIVE_HARDENED, {{"a9149a4d9901d6af519b2a23d4a2f51650fcba87ce7b87"},{"a914bed59fc0024fae941d6e20a3b44a109ae740129287"},{"a9148483aa1116eb9c05c482a72bada4b1db24af654387"}}, OutputType::P2SH_SEGWIT, {{10, 20, 30, 40, 0x80000000UL}, {10, 20, 30, 40, 0x80000001UL}, {10, 20, 30, 40, 0x80000002UL}}); Check("combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", "combo(xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*)", "combo(xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*)", RANGE, {{"2102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e076ac","76a914f90e3178ca25f2c808dc76624032d352fdbdfaf288ac","0014f90e3178ca25f2c808dc76624032d352fdbdfaf2","a91408f3ea8c68d4a7585bf9e8bda226723f70e445f087"},{"21032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ecac","76a914a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b788ac","0014a8409d1b6dfb1ed2a3e8aa5e0ef2ff26b15b75b7","a91473e39884cb71ae4e5ac9739e9225026c99763e6687"}}, std::nullopt, {{0}, {1}}); + // Mixed xpubs and const pubkeys + Check("wsh(multi(1,xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/0,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))","wsh(multi(1,xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/0,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))","wsh(multi(1,xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/0,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1))","wsh(multi(1,xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/0,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd))", MIXED_PUBKEYS, {{"0020cb155486048b23a6da976d4c6fe071a2dbc8a7b57aaf225b8955f2e2a27b5f00"}},OutputType::BECH32,{{0},{}}); + // Mixed range xpubs and const pubkeys + Check("multi(1,xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)","multi(1,xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)","multi(1,xprvA2JDeKCSNNZky6uBCviVfJSKyQ1mDYahRjijr5idH2WwLsEd4Hsb2Tyh8RfQMuPh7f7RtyzTtdrbdqqsunu5Mm3wDvUAKRHSC34sJ7in334/*,L4rK1yDtCWekvXuE6oXD9jCYfFNV2cWRpVuPLBcCU2z8TrisoyY1)","multi(1,xpub6FHa3pjLCk84BayeJxFW2SP4XRrFd1JYnxeLeU8EqN3vDfZmbqBqaGJAyiLjTAwm6ZLRQUMv1ZACTj37sR62cfN7fe5JnJ7dh8zL4fiyLHV/*,03a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd)", RANGE | MIXED_PUBKEYS, {{"512102df12b7035bdac8e3bab862a3a83d06ea6b17b6753d52edecba9be46f5d09e0762103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd52ae"},{"5121032869a233c9adff9a994e4966e5b821fd5bac066da6c3112488dc52383b4a98ec2103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd52ae"},{"5121035d30b6c66dc1e036c45369da8287518cf7e0d6ed1e2b905171c605708f14ca032103a34b99f22c790c4e36b2b3c2c35a36db06226e41c692fc82b8b56ac1c540c5bd52ae"}}, std::nullopt,{{2},{1},{0},{}}); + CheckUnparsable("combo([012345678]xprvA1RpRA33e1JQ7ifknakTFpgNXPmW2YvmhqLQYMmrj4xJXXWYpDPS3xz7iAxn8L39njGVyuoseXzU6rcxFLJ8HFsTjSyQbLYnMpCqE2VbFWc)", "combo([012345678]xpub6ERApfZwUNrhLCkDtcHTcxd75RbzS1ed54G1LkBUHQVHQKqhMkhgbmJbZRkrgZw4koxb5JaHWkY4ALHY2grBGRjaDMzQLcgJvLJuZZvRcEL)", "Fingerprint is not 4 bytes (9 characters instead of 8 characters)"); // Too long key fingerprint CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/2147483648)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/2147483648)", "Key path value 2147483648 is out of range"); // BIP 32 path element overflow CheckUnparsable("pkh(xprv9s21ZrQH143K31xYSDQpPDxsXRTUcvj2iNHm5NUtrGiGG5e2DtALGdso3pGz6ssrdK4PFmM8NSpSBHNqPqm55Qn3LqFtT2emdEXVYsCzC2U/1aa)", "pkh(xpub661MyMwAqRbcFW31YEwpkMuc5THy2PSt5bDMsktWQcFF8syAmRUapSCGu8ED9W6oDMSgv6Zz8idoc4a6mr8BDzTJY47LJhkJ8UB7WEGuduB/1aa)", "Key path value '1aa' is not a valid uint32"); // Path is not valid uint diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp index 9c85c20e2b..3699abb597 100644 --- a/src/test/fuzz/addrman.cpp +++ b/src/test/fuzz/addrman.cpp @@ -11,8 +11,10 @@ #include <test/fuzz/FuzzedDataProvider.h> #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> +#include <test/util/setup_common.h> #include <time.h> #include <util/asmap.h> +#include <util/system.h> #include <cassert> #include <cstdint> @@ -20,16 +22,26 @@ #include <string> #include <vector> +namespace { +const BasicTestingSetup* g_setup; + +int32_t GetCheckRatio() +{ + return std::clamp<int32_t>(g_setup->m_node.args->GetIntArg("-checkaddrman", 0), 0, 1000000); +} +} // namespace + void initialize_addrman() { - SelectParams(CBaseChainParams::REGTEST); + static const auto testing_setup = MakeNoLogFileContext<>(CBaseChainParams::REGTEST); + g_setup = testing_setup.get(); } FUZZ_TARGET_INIT(data_stream_addr_man, initialize_addrman) { FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; CDataStream data_stream = ConsumeDataStream(fuzzed_data_provider); - AddrMan addr_man(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/0); + AddrMan addr_man{/*asmap=*/std::vector<bool>(), /*deterministic=*/false, GetCheckRatio()}; try { ReadFromStream(addr_man, data_stream); } catch (const std::exception&) { @@ -113,7 +125,7 @@ class AddrManDeterministic : public AddrMan { public: explicit AddrManDeterministic(std::vector<bool> asmap, FuzzedDataProvider& fuzzed_data_provider) - : AddrMan(std::move(asmap), /*deterministic=*/true, /*consistency_check_ratio=*/0) + : AddrMan{std::move(asmap), /*deterministic=*/true, GetCheckRatio()} { WITH_LOCK(m_impl->cs, m_impl->insecure_rand = FastRandomContext{ConsumeUInt256(fuzzed_data_provider)}); } diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp index f87b6f1503..a14d28f4ef 100644 --- a/src/test/fuzz/connman.cpp +++ b/src/test/fuzz/connman.cpp @@ -12,21 +12,29 @@ #include <test/fuzz/fuzz.h> #include <test/fuzz/util.h> #include <test/util/setup_common.h> +#include <util/system.h> #include <util/translation.h> #include <cstdint> #include <vector> +namespace { +const BasicTestingSetup* g_setup; +} // namespace + void initialize_connman() { static const auto testing_setup = MakeNoLogFileContext<>(); + g_setup = testing_setup.get(); } FUZZ_TARGET_INIT(connman, initialize_connman) { FuzzedDataProvider fuzzed_data_provider{buffer.data(), buffer.size()}; SetMockTime(ConsumeTime(fuzzed_data_provider)); - AddrMan addrman(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/0); + AddrMan addrman(/*asmap=*/std::vector<bool>(), + /*deterministic=*/false, + g_setup->m_node.args->GetIntArg("-checkaddrman", 0)); CConnman connman{fuzzed_data_provider.ConsumeIntegral<uint64_t>(), fuzzed_data_provider.ConsumeIntegral<uint64_t>(), addrman, fuzzed_data_provider.ConsumeBool()}; CNetAddr random_netaddr; CNode random_node = ConsumeNode(fuzzed_data_provider); @@ -90,12 +98,6 @@ FUZZ_TARGET_INIT(connman, initialize_connman) (void)connman.OutboundTargetReached(fuzzed_data_provider.ConsumeBool()); }, [&] { - // Limit now to int32_t to avoid signed integer overflow - (void)connman.PoissonNextSendInbound( - std::chrono::microseconds{fuzzed_data_provider.ConsumeIntegral<int32_t>()}, - std::chrono::seconds{fuzzed_data_provider.ConsumeIntegral<int>()}); - }, - [&] { CSerializedNetMsg serialized_net_msg; serialized_net_msg.m_type = fuzzed_data_provider.ConsumeRandomLengthString(CMessageHeader::COMMAND_SIZE); serialized_net_msg.data = ConsumeRandomLengthByteVector(fuzzed_data_provider); diff --git a/src/test/fuzz/deserialize.cpp b/src/test/fuzz/deserialize.cpp index 8b4faf2f5f..ed6f172a2a 100644 --- a/src/test/fuzz/deserialize.cpp +++ b/src/test/fuzz/deserialize.cpp @@ -22,7 +22,9 @@ #include <pubkey.h> #include <script/keyorigin.h> #include <streams.h> +#include <test/util/setup_common.h> #include <undo.h> +#include <util/system.h> #include <version.h> #include <exception> @@ -35,8 +37,15 @@ using node::SnapshotMetadata; +namespace { +const BasicTestingSetup* g_setup; +} // namespace + void initialize_deserialize() { + static const auto testing_setup = MakeNoLogFileContext<>(); + g_setup = testing_setup.get(); + // Fuzzers using pubkey must hold an ECCVerifyHandle. static const ECCVerifyHandle verify_handle; } @@ -191,7 +200,9 @@ FUZZ_TARGET_DESERIALIZE(blockmerkleroot, { BlockMerkleRoot(block, &mutated); }) FUZZ_TARGET_DESERIALIZE(addrman_deserialize, { - AddrMan am(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/0); + AddrMan am(/*asmap=*/std::vector<bool>(), + /*deterministic=*/false, + g_setup->m_node.args->GetIntArg("-checkaddrman", 0)); DeserializeFromFuzzingInput(buffer, am); }) FUZZ_TARGET_DESERIALIZE(blockheader_deserialize, { diff --git a/src/test/fuzz/fuzz.cpp b/src/test/fuzz/fuzz.cpp index a33297e0ed..e9debd8c45 100644 --- a/src/test/fuzz/fuzz.cpp +++ b/src/test/fuzz/fuzz.cpp @@ -12,6 +12,7 @@ #include <cstdint> #include <exception> +#include <functional> #include <memory> #include <string> #include <unistd.h> @@ -19,6 +20,29 @@ const std::function<void(const std::string&)> G_TEST_LOG_FUN{}; +/** + * A copy of the command line arguments that start with `--`. + * First `LLVMFuzzerInitialize()` is called, which saves the arguments to `g_args`. + * Later, depending on the fuzz test, `G_TEST_COMMAND_LINE_ARGUMENTS()` may be + * called by `BasicTestingSetup` constructor to fetch those arguments and store + * them in `BasicTestingSetup::m_node::args`. + */ +static std::vector<const char*> g_args; + +static void SetArgs(int argc, char** argv) { + for (int i = 1; i < argc; ++i) { + // Only take into account arguments that start with `--`. The others are for the fuzz engine: + // `fuzz -runs=1 fuzz_seed_corpus/address_deserialize_v2 --checkaddrman=5` + if (strlen(argv[i]) > 2 && argv[i][0] == '-' && argv[i][1] == '-') { + g_args.push_back(argv[i]); + } + } +} + +const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS = []() { + return g_args; +}; + std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize, TypeHidden>>& FuzzTargets() { static std::map<std::string_view, std::tuple<TypeTestOneInput, TypeInitialize, TypeHidden>> g_fuzz_targets; @@ -95,6 +119,7 @@ extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) // This function is used by libFuzzer extern "C" int LLVMFuzzerInitialize(int* argc, char*** argv) { + SetArgs(*argc, *argv); initialize(); return 0; } diff --git a/src/test/fuzz/p2p_transport_serialization.cpp b/src/test/fuzz/p2p_transport_serialization.cpp index a7b2b8bfc1..88c22ca305 100644 --- a/src/test/fuzz/p2p_transport_serialization.cpp +++ b/src/test/fuzz/p2p_transport_serialization.cpp @@ -70,13 +70,13 @@ FUZZ_TARGET_INIT(p2p_transport_serialization, initialize_p2p_transport_serializa const std::chrono::microseconds m_time{std::numeric_limits<int64_t>::max()}; bool reject_message{false}; CNetMessage msg = deserializer.GetMessage(m_time, reject_message); - assert(msg.m_command.size() <= CMessageHeader::COMMAND_SIZE); + assert(msg.m_type.size() <= CMessageHeader::COMMAND_SIZE); assert(msg.m_raw_message_size <= mutable_msg_bytes.size()); assert(msg.m_raw_message_size == CMessageHeader::HEADER_SIZE + msg.m_message_size); assert(msg.m_time == m_time); std::vector<unsigned char> header; - auto msg2 = CNetMsgMaker{msg.m_recv.GetVersion()}.Make(msg.m_command, MakeUCharSpan(msg.m_recv)); + auto msg2 = CNetMsgMaker{msg.m_recv.GetVersion()}.Make(msg.m_type, MakeUCharSpan(msg.m_recv)); serializer.prepareForTransport(msg2, header); } } diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp index f89b597eed..47c2be3faa 100644 --- a/src/test/fuzz/util.cpp +++ b/src/test/fuzz/util.cpp @@ -281,8 +281,8 @@ CAmount ConsumeMoney(FuzzedDataProvider& fuzzed_data_provider, const std::option int64_t ConsumeTime(FuzzedDataProvider& fuzzed_data_provider, const std::optional<int64_t>& min, const std::optional<int64_t>& max) noexcept { // Avoid t=0 (1970-01-01T00:00:00Z) since SetMockTime(0) disables mocktime. - static const int64_t time_min = ParseISO8601DateTime("1970-01-01T00:00:01Z"); - static const int64_t time_max = ParseISO8601DateTime("9999-12-31T23:59:59Z"); + static const int64_t time_min{ParseISO8601DateTime("2000-01-01T00:00:01Z")}; + static const int64_t time_max{ParseISO8601DateTime("2100-12-31T23:59:59Z")}; return fuzzed_data_provider.ConsumeIntegralInRange<int64_t>(min.value_or(time_min), max.value_or(time_max)); } diff --git a/src/test/main.cpp b/src/test/main.cpp index 5885564074..1ad8fcce3a 100644 --- a/src/test/main.cpp +++ b/src/test/main.cpp @@ -11,6 +11,7 @@ #include <test/util/setup_common.h> +#include <functional> #include <iostream> /** Redirect debug log to unit_test.log files */ @@ -24,3 +25,17 @@ const std::function<void(const std::string&)> G_TEST_LOG_FUN = [](const std::str if (!should_log) return; std::cout << s; }; + +/** + * Retrieve the command line arguments from boost. + * Allows usage like: + * `test_bitcoin --run_test="net_tests/cnode_listen_port" -- -checkaddrman=1 -printtoconsole=1` + * which would return `["-checkaddrman=1", "-printtoconsole=1"]`. + */ +const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS = []() { + std::vector<const char*> args; + for (int i = 1; i < boost::unit_test::framework::master_test_suite().argc; ++i) { + args.push_back(boost::unit_test::framework::master_test_suite().argv[i]); + } + return args; +}; diff --git a/src/test/net_tests.cpp b/src/test/net_tests.cpp index 86786af450..b0befe2f58 100644 --- a/src/test/net_tests.cpp +++ b/src/test/net_tests.cpp @@ -593,7 +593,7 @@ BOOST_AUTO_TEST_CASE(ipv4_peer_with_ipv6_addrMe_test) // that a normal IPv4 address is among the entries, but if this address is // !IsRoutable the undefined behavior is easier to trigger deterministically { - LOCK(cs_mapLocalHost); + LOCK(g_maplocalhost_mutex); in_addr ipv4AddrLocal; ipv4AddrLocal.s_addr = 0x0100007f; CNetAddr addr = CNetAddr(ipv4AddrLocal); diff --git a/src/test/pow_tests.cpp b/src/test/pow_tests.cpp index d5a4d3fd80..2f43ae52f7 100644 --- a/src/test/pow_tests.cpp +++ b/src/test/pow_tests.cpp @@ -73,7 +73,7 @@ BOOST_AUTO_TEST_CASE(CheckProofOfWork_test_overflow_target) { const auto consensus = CreateChainParams(*m_node.args, CBaseChainParams::MAIN)->GetConsensus(); uint256 hash; - unsigned int nBits = ~0x00800000; + unsigned int nBits{~0x00800000U}; hash.SetHex("0x1"); BOOST_CHECK(!CheckProofOfWork(hash, nBits, consensus)); } diff --git a/src/test/prevector_tests.cpp b/src/test/prevector_tests.cpp index 12c5848eaf..89814748fe 100644 --- a/src/test/prevector_tests.cpp +++ b/src/test/prevector_tests.cpp @@ -220,7 +220,7 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt) prevector_tester<8, int> test; for (int i = 0; i < 2048; i++) { if (InsecureRandBits(2) == 0) { - test.insert(InsecureRandRange(test.size() + 1), InsecureRand32()); + test.insert(InsecureRandRange(test.size() + 1), int(InsecureRand32())); } if (test.size() > 0 && InsecureRandBits(2) == 1) { test.erase(InsecureRandRange(test.size())); @@ -230,7 +230,7 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt) test.resize(new_size); } if (InsecureRandBits(3) == 3) { - test.insert(InsecureRandRange(test.size() + 1), 1 + InsecureRandBool(), InsecureRand32()); + test.insert(InsecureRandRange(test.size() + 1), 1 + InsecureRandBool(), int(InsecureRand32())); } if (InsecureRandBits(3) == 4) { int del = std::min<int>(test.size(), 1 + (InsecureRandBool())); @@ -238,7 +238,7 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt) test.erase(beg, beg + del); } if (InsecureRandBits(4) == 5) { - test.push_back(InsecureRand32()); + test.push_back(int(InsecureRand32())); } if (test.size() > 0 && InsecureRandBits(4) == 6) { test.pop_back(); @@ -247,7 +247,7 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt) int values[4]; int num = 1 + (InsecureRandBits(2)); for (int k = 0; k < num; k++) { - values[k] = InsecureRand32(); + values[k] = int(InsecureRand32()); } test.insert_range(InsecureRandRange(test.size() + 1), values, values + num); } @@ -263,13 +263,13 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt) test.shrink_to_fit(); } if (test.size() > 0) { - test.update(InsecureRandRange(test.size()), InsecureRand32()); + test.update(InsecureRandRange(test.size()), int(InsecureRand32())); } if (InsecureRandBits(10) == 11) { test.clear(); } if (InsecureRandBits(9) == 12) { - test.assign(InsecureRandBits(5), InsecureRand32()); + test.assign(InsecureRandBits(5), int(InsecureRand32())); } if (InsecureRandBits(3) == 3) { test.swap(); @@ -283,8 +283,8 @@ BOOST_AUTO_TEST_CASE(PrevectorTestInt) if (InsecureRandBits(5) == 19) { unsigned int num = 1 + (InsecureRandBits(4)); std::vector<int> values(num); - for (auto &v : values) { - v = InsecureRand32(); + for (int& v : values) { + v = int(InsecureRand32()); } test.resize_uninitialized(values); } diff --git a/src/test/sighash_tests.cpp b/src/test/sighash_tests.cpp index c16087bc1b..1601b02356 100644 --- a/src/test/sighash_tests.cpp +++ b/src/test/sighash_tests.cpp @@ -91,8 +91,9 @@ void static RandomScript(CScript &script) { script << oplist[InsecureRandRange(std::size(oplist))]; } -void static RandomTransaction(CMutableTransaction &tx, bool fSingle) { - tx.nVersion = InsecureRand32(); +void static RandomTransaction(CMutableTransaction& tx, bool fSingle) +{ + tx.nVersion = int(InsecureRand32()); tx.vin.clear(); tx.vout.clear(); tx.nLockTime = (InsecureRandBool()) ? InsecureRand32() : 0; @@ -126,7 +127,7 @@ BOOST_AUTO_TEST_CASE(sighash_test) int nRandomTests = 50000; #endif for (int i=0; i<nRandomTests; i++) { - int nHashType = InsecureRand32(); + int nHashType{int(InsecureRand32())}; CMutableTransaction txTo; RandomTransaction(txTo, (nHashType & 0x1f) == SIGHASH_SINGLE); CScript scriptCode; diff --git a/src/test/skiplist_tests.cpp b/src/test/skiplist_tests.cpp index 7ede79279f..6dadf09176 100644 --- a/src/test/skiplist_tests.cpp +++ b/src/test/skiplist_tests.cpp @@ -114,8 +114,8 @@ BOOST_AUTO_TEST_CASE(findearliestatleast_test) } else { // randomly choose something in the range [MTP, MTP*2] int64_t medianTimePast = vBlocksMain[i].GetMedianTimePast(); - int r = InsecureRandRange(medianTimePast); - vBlocksMain[i].nTime = r + medianTimePast; + int r{int(InsecureRandRange(medianTimePast))}; + vBlocksMain[i].nTime = uint32_t(r + medianTimePast); vBlocksMain[i].nTimeMax = std::max(vBlocksMain[i].nTime, vBlocksMain[i-1].nTimeMax); } } diff --git a/src/test/transaction_tests.cpp b/src/test/transaction_tests.cpp index 1fe51fadd4..4fb7f9c405 100644 --- a/src/test/transaction_tests.cpp +++ b/src/test/transaction_tests.cpp @@ -220,7 +220,7 @@ BOOST_AUTO_TEST_CASE(tx_valid) fValid = false; break; } - COutPoint outpoint(uint256S(vinput[0].get_str()), vinput[1].get_int()); + COutPoint outpoint{uint256S(vinput[0].get_str()), uint32_t(vinput[1].get_int())}; mapprevOutScriptPubKeys[outpoint] = ParseScript(vinput[2].get_str()); if (vinput.size() >= 4) { @@ -308,7 +308,7 @@ BOOST_AUTO_TEST_CASE(tx_invalid) fValid = false; break; } - COutPoint outpoint(uint256S(vinput[0].get_str()), vinput[1].get_int()); + COutPoint outpoint{uint256S(vinput[0].get_str()), uint32_t(vinput[1].get_int())}; mapprevOutScriptPubKeys[outpoint] = ParseScript(vinput[2].get_str()); if (vinput.size() >= 4) { diff --git a/src/test/txpackage_tests.cpp b/src/test/txpackage_tests.cpp index 6f78b43826..560efb6b42 100644 --- a/src/test/txpackage_tests.cpp +++ b/src/test/txpackage_tests.cpp @@ -327,4 +327,236 @@ BOOST_FIXTURE_TEST_CASE(package_submission_tests, TestChain100Setup) BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(tx_child->GetHash()))); } } + +// Tests for packages containing transactions that have same-txid-different-witness equivalents in +// the mempool. +BOOST_FIXTURE_TEST_CASE(package_witness_swap_tests, TestChain100Setup) +{ + // Mine blocks to mature coinbases. + mineBlocks(5); + LOCK(cs_main); + + // Transactions with a same-txid-different-witness transaction in the mempool should be ignored, + // and the mempool entry's wtxid returned. + CScript witnessScript = CScript() << OP_DROP << OP_TRUE; + CScript scriptPubKey = GetScriptForDestination(WitnessV0ScriptHash(witnessScript)); + auto mtx_parent = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[0], /*vout=*/ 0, + /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey, + /*output_destination=*/ scriptPubKey, + /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ false); + CTransactionRef ptx_parent = MakeTransactionRef(mtx_parent); + + // Make two children with the same txid but different witnesses. + CScriptWitness witness1; + witness1.stack.push_back(std::vector<unsigned char>(1)); + witness1.stack.push_back(std::vector<unsigned char>(witnessScript.begin(), witnessScript.end())); + + CScriptWitness witness2(witness1); + witness2.stack.push_back(std::vector<unsigned char>(2)); + witness2.stack.push_back(std::vector<unsigned char>(witnessScript.begin(), witnessScript.end())); + + CKey child_key; + child_key.MakeNewKey(true); + CScript child_locking_script = GetScriptForDestination(WitnessV0KeyHash(child_key.GetPubKey())); + CMutableTransaction mtx_child1; + mtx_child1.nVersion = 1; + mtx_child1.vin.resize(1); + mtx_child1.vin[0].prevout.hash = ptx_parent->GetHash(); + mtx_child1.vin[0].prevout.n = 0; + mtx_child1.vin[0].scriptSig = CScript(); + mtx_child1.vin[0].scriptWitness = witness1; + mtx_child1.vout.resize(1); + mtx_child1.vout[0].nValue = CAmount(48 * COIN); + mtx_child1.vout[0].scriptPubKey = child_locking_script; + + CMutableTransaction mtx_child2{mtx_child1}; + mtx_child2.vin[0].scriptWitness = witness2; + + CTransactionRef ptx_child1 = MakeTransactionRef(mtx_child1); + CTransactionRef ptx_child2 = MakeTransactionRef(mtx_child2); + + // child1 and child2 have the same txid + BOOST_CHECK_EQUAL(ptx_child1->GetHash(), ptx_child2->GetHash()); + // child1 and child2 have different wtxids + BOOST_CHECK(ptx_child1->GetWitnessHash() != ptx_child2->GetWitnessHash()); + + // Try submitting Package1{parent, child1} and Package2{parent, child2} where the children are + // same-txid-different-witness. + { + const auto submit_witness1 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, + {ptx_parent, ptx_child1}, /*test_accept=*/ false); + BOOST_CHECK_MESSAGE(submit_witness1.m_state.IsValid(), + "Package validation unexpectedly failed: " << submit_witness1.m_state.GetRejectReason()); + auto it_parent1 = submit_witness1.m_tx_results.find(ptx_parent->GetWitnessHash()); + auto it_child1 = submit_witness1.m_tx_results.find(ptx_child1->GetWitnessHash()); + BOOST_CHECK(it_parent1 != submit_witness1.m_tx_results.end()); + BOOST_CHECK_MESSAGE(it_parent1->second.m_state.IsValid(), + "Transaction unexpectedly failed: " << it_parent1->second.m_state.GetRejectReason()); + BOOST_CHECK(it_child1 != submit_witness1.m_tx_results.end()); + BOOST_CHECK_MESSAGE(it_child1->second.m_state.IsValid(), + "Transaction unexpectedly failed: " << it_child1->second.m_state.GetRejectReason()); + + BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent->GetHash()))); + BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child1->GetHash()))); + + const auto submit_witness2 = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, + {ptx_parent, ptx_child2}, /*test_accept=*/ false); + BOOST_CHECK_MESSAGE(submit_witness2.m_state.IsValid(), + "Package validation unexpectedly failed: " << submit_witness2.m_state.GetRejectReason()); + auto it_parent2_deduped = submit_witness2.m_tx_results.find(ptx_parent->GetWitnessHash()); + auto it_child2 = submit_witness2.m_tx_results.find(ptx_child2->GetWitnessHash()); + BOOST_CHECK(it_parent2_deduped != submit_witness2.m_tx_results.end()); + BOOST_CHECK(it_parent2_deduped->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY); + BOOST_CHECK(it_child2 != submit_witness2.m_tx_results.end()); + BOOST_CHECK(it_child2->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS); + BOOST_CHECK_EQUAL(ptx_child1->GetWitnessHash(), it_child2->second.m_other_wtxid.value()); + + BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child2->GetHash()))); + BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_child2->GetWitnessHash()))); + } + + // Try submitting Package1{child2, grandchild} where child2 is same-txid-different-witness as + // the in-mempool transaction, child1. Since child1 exists in the mempool and its outputs are + // available, child2 should be ignored and grandchild should be accepted. + // + // This tests a potential censorship vector in which an attacker broadcasts a competing package + // where a parent's witness is mutated. The honest package should be accepted despite the fact + // that we don't allow witness replacement. + CKey grandchild_key; + grandchild_key.MakeNewKey(true); + CScript grandchild_locking_script = GetScriptForDestination(WitnessV0KeyHash(grandchild_key.GetPubKey())); + auto mtx_grandchild = CreateValidMempoolTransaction(/*input_transaction=*/ ptx_child2, /* vout=*/ 0, + /*input_height=*/ 0, /*input_signing_key=*/ child_key, + /*output_destination=*/ grandchild_locking_script, + /*output_amount=*/ CAmount(47 * COIN), /*submit=*/ false); + CTransactionRef ptx_grandchild = MakeTransactionRef(mtx_grandchild); + + // We already submitted child1 above. + { + const auto submit_spend_ignored = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, + {ptx_child2, ptx_grandchild}, /*test_accept=*/ false); + BOOST_CHECK_MESSAGE(submit_spend_ignored.m_state.IsValid(), + "Package validation unexpectedly failed: " << submit_spend_ignored.m_state.GetRejectReason()); + auto it_child2_ignored = submit_spend_ignored.m_tx_results.find(ptx_child2->GetWitnessHash()); + auto it_grandchild = submit_spend_ignored.m_tx_results.find(ptx_grandchild->GetWitnessHash()); + BOOST_CHECK(it_child2_ignored != submit_spend_ignored.m_tx_results.end()); + BOOST_CHECK(it_child2_ignored->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS); + BOOST_CHECK(it_grandchild != submit_spend_ignored.m_tx_results.end()); + BOOST_CHECK(it_grandchild->second.m_result_type == MempoolAcceptResult::ResultType::VALID); + + BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_child2->GetHash()))); + BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_child2->GetWitnessHash()))); + BOOST_CHECK(m_node.mempool->exists(GenTxid::Wtxid(ptx_grandchild->GetWitnessHash()))); + } + + // A package Package{parent1, parent2, parent3, child} where the parents are a mixture of + // identical-tx-in-mempool, same-txid-different-witness-in-mempool, and new transactions. + Package package_mixed; + + // Give all the parents anyone-can-spend scripts so we don't have to deal with signing the child. + CScript acs_script = CScript() << OP_TRUE; + CScript acs_spk = GetScriptForDestination(WitnessV0ScriptHash(acs_script)); + CScriptWitness acs_witness; + acs_witness.stack.push_back(std::vector<unsigned char>(acs_script.begin(), acs_script.end())); + + // parent1 will already be in the mempool + auto mtx_parent1 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[1], /*vout=*/ 0, + /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey, + /*output_destination=*/ acs_spk, + /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ true); + CTransactionRef ptx_parent1 = MakeTransactionRef(mtx_parent1); + package_mixed.push_back(ptx_parent1); + + // parent2 will have a same-txid-different-witness tx already in the mempool + CScript grandparent2_script = CScript() << OP_DROP << OP_TRUE; + CScript grandparent2_spk = GetScriptForDestination(WitnessV0ScriptHash(grandparent2_script)); + CScriptWitness parent2_witness1; + parent2_witness1.stack.push_back(std::vector<unsigned char>(1)); + parent2_witness1.stack.push_back(std::vector<unsigned char>(grandparent2_script.begin(), grandparent2_script.end())); + CScriptWitness parent2_witness2; + parent2_witness2.stack.push_back(std::vector<unsigned char>(2)); + parent2_witness2.stack.push_back(std::vector<unsigned char>(grandparent2_script.begin(), grandparent2_script.end())); + + // Create grandparent2 creating an output with multiple spending paths. Submit to mempool. + auto mtx_grandparent2 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[2], /* vout=*/ 0, + /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey, + /*output_destination=*/ grandparent2_spk, + /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ true); + CTransactionRef ptx_grandparent2 = MakeTransactionRef(mtx_grandparent2); + + CMutableTransaction mtx_parent2_v1; + mtx_parent2_v1.nVersion = 1; + mtx_parent2_v1.vin.resize(1); + mtx_parent2_v1.vin[0].prevout.hash = ptx_grandparent2->GetHash(); + mtx_parent2_v1.vin[0].prevout.n = 0; + mtx_parent2_v1.vin[0].scriptSig = CScript(); + mtx_parent2_v1.vin[0].scriptWitness = parent2_witness1; + mtx_parent2_v1.vout.resize(1); + mtx_parent2_v1.vout[0].nValue = CAmount(48 * COIN); + mtx_parent2_v1.vout[0].scriptPubKey = acs_spk; + + CMutableTransaction mtx_parent2_v2{mtx_parent2_v1}; + mtx_parent2_v2.vin[0].scriptWitness = parent2_witness2; + + CTransactionRef ptx_parent2_v1 = MakeTransactionRef(mtx_parent2_v1); + CTransactionRef ptx_parent2_v2 = MakeTransactionRef(mtx_parent2_v2); + // Put parent2_v1 in the package, submit parent2_v2 to the mempool. + const MempoolAcceptResult parent2_v2_result = m_node.chainman->ProcessTransaction(ptx_parent2_v2); + BOOST_CHECK(parent2_v2_result.m_result_type == MempoolAcceptResult::ResultType::VALID); + package_mixed.push_back(ptx_parent2_v1); + + // parent3 will be a new transaction + auto mtx_parent3 = CreateValidMempoolTransaction(/*input_transaction=*/ m_coinbase_txns[3], /*vout=*/ 0, + /*input_height=*/ 0, /*input_signing_key=*/ coinbaseKey, + /*output_destination=*/ acs_spk, + /*output_amount=*/ CAmount(49 * COIN), /*submit=*/ false); + CTransactionRef ptx_parent3 = MakeTransactionRef(mtx_parent3); + package_mixed.push_back(ptx_parent3); + + // child spends parent1, parent2, and parent3 + CKey mixed_grandchild_key; + mixed_grandchild_key.MakeNewKey(true); + CScript mixed_child_spk = GetScriptForDestination(WitnessV0KeyHash(mixed_grandchild_key.GetPubKey())); + + CMutableTransaction mtx_mixed_child; + mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent1->GetHash(), 0))); + mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent2_v1->GetHash(), 0))); + mtx_mixed_child.vin.push_back(CTxIn(COutPoint(ptx_parent3->GetHash(), 0))); + mtx_mixed_child.vin[0].scriptWitness = acs_witness; + mtx_mixed_child.vin[1].scriptWitness = acs_witness; + mtx_mixed_child.vin[2].scriptWitness = acs_witness; + mtx_mixed_child.vout.push_back(CTxOut(145 * COIN, mixed_child_spk)); + CTransactionRef ptx_mixed_child = MakeTransactionRef(mtx_mixed_child); + package_mixed.push_back(ptx_mixed_child); + + // Submit package: + // parent1 should be ignored + // parent2_v1 should be ignored (and v2 wtxid returned) + // parent3 should be accepted + // child should be accepted + { + const auto mixed_result = ProcessNewPackage(m_node.chainman->ActiveChainstate(), *m_node.mempool, package_mixed, false); + BOOST_CHECK_MESSAGE(mixed_result.m_state.IsValid(), mixed_result.m_state.GetRejectReason()); + auto it_parent1 = mixed_result.m_tx_results.find(ptx_parent1->GetWitnessHash()); + auto it_parent2 = mixed_result.m_tx_results.find(ptx_parent2_v1->GetWitnessHash()); + auto it_parent3 = mixed_result.m_tx_results.find(ptx_parent3->GetWitnessHash()); + auto it_child = mixed_result.m_tx_results.find(ptx_mixed_child->GetWitnessHash()); + BOOST_CHECK(it_parent1 != mixed_result.m_tx_results.end()); + BOOST_CHECK(it_parent2 != mixed_result.m_tx_results.end()); + BOOST_CHECK(it_parent3 != mixed_result.m_tx_results.end()); + BOOST_CHECK(it_child != mixed_result.m_tx_results.end()); + + BOOST_CHECK(it_parent1->second.m_result_type == MempoolAcceptResult::ResultType::MEMPOOL_ENTRY); + BOOST_CHECK(it_parent2->second.m_result_type == MempoolAcceptResult::ResultType::DIFFERENT_WITNESS); + BOOST_CHECK(it_parent3->second.m_result_type == MempoolAcceptResult::ResultType::VALID); + BOOST_CHECK(it_child->second.m_result_type == MempoolAcceptResult::ResultType::VALID); + BOOST_CHECK_EQUAL(ptx_parent2_v2->GetWitnessHash(), it_parent2->second.m_other_wtxid.value()); + + BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent1->GetHash()))); + BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent2_v1->GetHash()))); + BOOST_CHECK(!m_node.mempool->exists(GenTxid::Wtxid(ptx_parent2_v1->GetWitnessHash()))); + BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_parent3->GetHash()))); + BOOST_CHECK(m_node.mempool->exists(GenTxid::Txid(ptx_mixed_child->GetHash()))); + } +} BOOST_AUTO_TEST_SUITE_END() diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp index 87546f45f2..c968e4d124 100644 --- a/src/test/util/setup_common.cpp +++ b/src/test/util/setup_common.cpp @@ -42,6 +42,7 @@ #include <walletinitinterface.h> #include <functional> +#include <stdexcept> using node::BlockAssembler; using node::CalculateCacheSizes; @@ -88,7 +89,7 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve m_args{} { m_node.args = &gArgs; - const std::vector<const char*> arguments = Cat( + std::vector<const char*> arguments = Cat( { "dummy", "-printtoconsole=0", @@ -100,6 +101,9 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve "-debugexclude=leveldb", }, extra_args); + if (G_TEST_COMMAND_LINE_ARGUMENTS) { + arguments = Cat(arguments, G_TEST_COMMAND_LINE_ARGUMENTS()); + } util::ThreadRename("test"); fs::create_directories(m_path_root); m_args.ForceSetArg("-datadir", fs::PathToString(m_path_root)); @@ -108,9 +112,10 @@ BasicTestingSetup::BasicTestingSetup(const std::string& chainName, const std::ve { SetupServerArgs(*m_node.args); std::string error; - const bool success{m_node.args->ParseParameters(arguments.size(), arguments.data(), error)}; - assert(success); - assert(error.empty()); + if (!m_node.args->ParseParameters(arguments.size(), arguments.data(), error)) { + m_node.args->ClearArgs(); + throw std::runtime_error{error}; + } } SelectParams(chainName); SeedInsecureRand(); @@ -218,7 +223,9 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString())); } - m_node.addrman = std::make_unique<AddrMan>(/*asmap=*/std::vector<bool>(), /*deterministic=*/false, /*consistency_check_ratio=*/0); + m_node.addrman = std::make_unique<AddrMan>(/*asmap=*/std::vector<bool>(), + /*deterministic=*/false, + m_node.args->GetIntArg("-checkaddrman", 0)); m_node.banman = std::make_unique<BanMan>(m_args.GetDataDirBase() / "banlist", nullptr, DEFAULT_MISBEHAVING_BANTIME); m_node.connman = std::make_unique<CConnman>(0x1337, 0x1337, *m_node.addrman); // Deterministic randomness for tests. m_node.peerman = PeerManager::make(chainparams, *m_node.connman, *m_node.addrman, diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h index 58ffd77995..a1b7525cf4 100644 --- a/src/test/util/setup_common.h +++ b/src/test/util/setup_common.h @@ -19,12 +19,16 @@ #include <util/string.h> #include <util/vector.h> +#include <functional> #include <type_traits> #include <vector> /** This is connected to the logger. Can be used to redirect logs to any other log */ extern const std::function<void(const std::string&)> G_TEST_LOG_FUN; +/** Retrieve the command line arguments. */ +extern const std::function<std::vector<const char*>()> G_TEST_COMMAND_LINE_ARGUMENTS; + // Enable BOOST_CHECK_EQUAL for enum class types namespace std { template <typename T> diff --git a/src/txmempool.cpp b/src/txmempool.cpp index dc2769b81e..fb5652d0a0 100644 --- a/src/txmempool.cpp +++ b/src/txmempool.cpp @@ -116,10 +116,9 @@ size_t CTxMemPoolEntry::GetTxSize() const return GetVirtualTransactionSize(nTxWeight, sigOpCost); } -// Update the given tx for any in-mempool descendants. -// Assumes that CTxMemPool::m_children is correct for the given tx and all -// descendants. -void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendants, const std::set<uint256> &setExclude) +void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants, + const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove, + uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) { CTxMemPoolEntry::Children stageEntries, descendants; stageEntries = updateIt->GetMemPoolChildrenConst(); @@ -156,17 +155,18 @@ void CTxMemPool::UpdateForDescendants(txiter updateIt, cacheMap &cachedDescendan cachedDescendants[updateIt].insert(mapTx.iterator_to(descendant)); // Update ancestor state for each descendant mapTx.modify(mapTx.iterator_to(descendant), update_ancestor_state(updateIt->GetTxSize(), updateIt->GetModifiedFee(), 1, updateIt->GetSigOpCost())); + // Don't directly remove the transaction here -- doing so would + // invalidate iterators in cachedDescendants. Mark it for removal + // by inserting into descendants_to_remove. + if (descendant.GetCountWithAncestors() > ancestor_count_limit || descendant.GetSizeWithAncestors() > ancestor_size_limit) { + descendants_to_remove.insert(descendant.GetTx().GetHash()); + } } } mapTx.modify(updateIt, update_descendant_state(modifySize, modifyFee, modifyCount)); } -// vHashesToUpdate is the set of transaction hashes from a disconnected block -// which has been re-added to the mempool. -// for each entry, look for descendants that are outside vHashesToUpdate, and -// add fee/size information for such descendants to the parent. -// for each such descendant, also update the ancestor state to include the parent. -void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate) +void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashesToUpdate, uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) { AssertLockHeld(cs); // For each entry in vHashesToUpdate, store the set of in-mempool, but not @@ -178,6 +178,8 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes // accounted for in the state of their ancestors) std::set<uint256> setAlreadyIncluded(vHashesToUpdate.begin(), vHashesToUpdate.end()); + std::set<uint256> descendants_to_remove; + // Iterate in reverse, so that whenever we are looking at a transaction // we are sure that all in-mempool descendants have already been processed. // This maximizes the benefit of the descendant cache and guarantees that @@ -207,7 +209,15 @@ void CTxMemPool::UpdateTransactionsFromBlock(const std::vector<uint256> &vHashes } } } // release epoch guard for UpdateForDescendants - UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded); + UpdateForDescendants(it, mapMemPoolDescendantsToUpdate, setAlreadyIncluded, descendants_to_remove, ancestor_size_limit, ancestor_count_limit); + } + + for (const auto& txid : descendants_to_remove) { + // This txid may have been removed already in a prior call to removeRecursive. + // Therefore we ensure it is not yet removed already. + if (const std::optional<txiter> txiter = GetIter(txid)) { + removeRecursive((*txiter)->GetTx(), MemPoolRemovalReason::SIZELIMIT); + } } } diff --git a/src/txmempool.h b/src/txmempool.h index b8c508fd90..e7e5a3c402 100644 --- a/src/txmempool.h +++ b/src/txmempool.h @@ -312,16 +312,6 @@ public: } }; -struct update_lock_points -{ - explicit update_lock_points(const LockPoints& _lp) : lp(_lp) { } - - void operator() (CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); } - -private: - const LockPoints& lp; -}; - // Multi_index tag names struct descendant_score {}; struct entry_time {}; @@ -599,10 +589,14 @@ public: void addUnchecked(const CTxMemPoolEntry& entry, setEntries& setAncestors, bool validFeeEstimate = true) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void removeRecursive(const CTransaction& tx, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs); - /** After reorg, check if mempool entries are now non-final, premature coinbase spends, or have - * invalid lockpoints. Update lockpoints and remove entries (and descendants of entries) that - * are no longer valid. */ - void removeForReorg(CChain& chain, std::function<bool(txiter)> check_final_and_mature) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); + /** After reorg, filter the entries that would no longer be valid in the next block, and update + * the entries' cached LockPoints if needed. The mempool does not have any knowledge of + * consensus rules. It just appplies the callable function and removes the ones for which it + * returns true. + * @param[in] filter_final_and_mature Predicate that checks the relevant validation rules + * and updates an entry's LockPoints. + * */ + void removeForReorg(CChain& chain, std::function<bool(txiter)> filter_final_and_mature) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main); void removeConflicts(const CTransaction& tx) EXCLUSIVE_LOCKS_REQUIRED(cs); void removeForBlock(const std::vector<CTransactionRef>& vtx, unsigned int nBlockHeight) EXCLUSIVE_LOCKS_REQUIRED(cs); @@ -642,16 +636,25 @@ public: */ void RemoveStaged(setEntries& stage, bool updateDescendants, MemPoolRemovalReason reason) EXCLUSIVE_LOCKS_REQUIRED(cs); - /** When adding transactions from a disconnected block back to the mempool, - * new mempool entries may have children in the mempool (which is generally - * not the case when otherwise adding transactions). - * UpdateTransactionsFromBlock() will find child transactions and update the - * descendant state for each transaction in vHashesToUpdate (excluding any - * child transactions present in vHashesToUpdate, which are already accounted - * for). Note: vHashesToUpdate should be the set of transactions from the - * disconnected block that have been accepted back into the mempool. + /** UpdateTransactionsFromBlock is called when adding transactions from a + * disconnected block back to the mempool, new mempool entries may have + * children in the mempool (which is generally not the case when otherwise + * adding transactions). + * @post updated descendant state for descendants of each transaction in + * vHashesToUpdate (excluding any child transactions present in + * vHashesToUpdate, which are already accounted for). Updated state + * includes add fee/size information for such descendants to the + * parent and updated ancestor state to include the parent. + * + * @param[in] vHashesToUpdate The set of txids from the + * disconnected block that have been accepted back into the mempool. + * @param[in] ancestor_size_limit The maximum allowed size in virtual + * bytes of an entry and its ancestors + * @param[in] ancestor_count_limit The maximum allowed number of + * transactions including the entry and its ancestors. */ - void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch); + void UpdateTransactionsFromBlock(const std::vector<uint256>& vHashesToUpdate, + uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) EXCLUSIVE_LOCKS_REQUIRED(cs, cs_main) LOCKS_EXCLUDED(m_epoch); /** Try to calculate all in-mempool ancestors of entry. * (these are all calculated including the tx itself) @@ -800,19 +803,38 @@ private: /** UpdateForDescendants is used by UpdateTransactionsFromBlock to update * the descendants for a single transaction that has been added to the * mempool but may have child transactions in the mempool, eg during a - * chain reorg. setExclude is the set of descendant transactions in the - * mempool that must not be accounted for (because any descendants in - * setExclude were added to the mempool after the transaction being - * updated and hence their state is already reflected in the parent - * state). + * chain reorg. + * + * @pre CTxMemPool::m_children is correct for the given tx and all + * descendants. + * @pre cachedDescendants is an accurate cache where each entry has all + * descendants of the corresponding key, including those that should + * be removed for violation of ancestor limits. + * @post if updateIt has any non-excluded descendants, cachedDescendants has + * a new cache line for updateIt. + * @post descendants_to_remove has a new entry for any descendant which exceeded + * ancestor limits relative to updateIt. * - * cachedDescendants will be updated with the descendants of the transaction - * being updated, so that future invocations don't need to walk the - * same transaction again, if encountered in another transaction chain. + * @param[in] updateIt the entry to update for its descendants + * @param[in,out] cachedDescendants a cache where each line corresponds to all + * descendants. It will be updated with the descendants of the transaction + * being updated, so that future invocations don't need to walk the same + * transaction again, if encountered in another transaction chain. + * @param[in] setExclude the set of descendant transactions in the mempool + * that must not be accounted for (because any descendants in setExclude + * were added to the mempool after the transaction being updated and hence + * their state is already reflected in the parent state). + * @param[out] descendants_to_remove Populated with the txids of entries that + * exceed ancestor limits. It's the responsibility of the caller to + * removeRecursive them. + * @param[in] ancestor_size_limit the max number of ancestral bytes allowed + * for any descendant + * @param[in] ancestor_count_limit the max number of ancestor transactions + * allowed for any descendant */ - void UpdateForDescendants(txiter updateIt, - cacheMap &cachedDescendants, - const std::set<uint256> &setExclude) EXCLUSIVE_LOCKS_REQUIRED(cs); + void UpdateForDescendants(txiter updateIt, cacheMap& cachedDescendants, + const std::set<uint256>& setExclude, std::set<uint256>& descendants_to_remove, + uint64_t ancestor_size_limit, uint64_t ancestor_count_limit) EXCLUSIVE_LOCKS_REQUIRED(cs); /** Update ancestors of hash to add/remove it as a descendant transaction. */ void UpdateAncestorsOf(bool add, txiter hash, setEntries &setAncestors) EXCLUSIVE_LOCKS_REQUIRED(cs); /** Set ancestor state for an entry */ diff --git a/src/util/settings.cpp b/src/util/settings.cpp index 442a55ffb0..683b7ae652 100644 --- a/src/util/settings.cpp +++ b/src/util/settings.cpp @@ -112,7 +112,7 @@ bool WriteSettings(const fs::path& path, errors.emplace_back(strprintf("Error: Unable to open settings file %s for writing", fs::PathToString(path))); return false; } - file << out.write(/* prettyIndent= */ 1, /* indentLevel= */ 4) << std::endl; + file << out.write(/* prettyIndent= */ 4, /* indentLevel= */ 1) << std::endl; file.close(); return true; } diff --git a/src/util/system.cpp b/src/util/system.cpp index 8f35b7b6c6..e34cdc7fb9 100644 --- a/src/util/system.cpp +++ b/src/util/system.cpp @@ -6,11 +6,6 @@ #include <util/system.h> #ifdef ENABLE_EXTERNAL_SIGNER -#if defined(WIN32) && !defined(__kernel_entry) -// A workaround for boost 1.71 incompatibility with mingw-w64 compiler. -// For details see https://github.com/bitcoin/bitcoin/pull/22348. -#define __kernel_entry -#endif #include <boost/process.hpp> #endif // ENABLE_EXTERNAL_SIGNER diff --git a/src/validation.cpp b/src/validation.cpp index 5deb44699a..1b1d01a4c2 100644 --- a/src/validation.cpp +++ b/src/validation.cpp @@ -347,43 +347,59 @@ void CChainState::MaybeUpdateMempoolForReorg( // previously-confirmed transactions back to the mempool. // UpdateTransactionsFromBlock finds descendants of any transactions in // the disconnectpool that were added back and cleans up the mempool state. - m_mempool->UpdateTransactionsFromBlock(vHashUpdate); - - const auto check_final_and_mature = [this, flags=STANDARD_LOCKTIME_VERIFY_FLAGS](CTxMemPool::txiter it) + const uint64_t ancestor_count_limit = gArgs.GetIntArg("-limitancestorcount", DEFAULT_ANCESTOR_LIMIT); + const uint64_t ancestor_size_limit = gArgs.GetIntArg("-limitancestorsize", DEFAULT_ANCESTOR_SIZE_LIMIT) * 1000; + m_mempool->UpdateTransactionsFromBlock(vHashUpdate, ancestor_size_limit, ancestor_count_limit); + + // Predicate to use for filtering transactions in removeForReorg. + // Checks whether the transaction is still final and, if it spends a coinbase output, mature. + // Also updates valid entries' cached LockPoints if needed. + // If false, the tx is still valid and its lockpoints are updated. + // If true, the tx would be invalid in the next block; remove this entry and all of its descendants. + const auto filter_final_and_mature = [this, flags=STANDARD_LOCKTIME_VERIFY_FLAGS](CTxMemPool::txiter it) EXCLUSIVE_LOCKS_REQUIRED(m_mempool->cs, ::cs_main) { - bool should_remove = false; AssertLockHeld(m_mempool->cs); AssertLockHeld(::cs_main); const CTransaction& tx = it->GetTx(); + + // The transaction must be final. + if (!CheckFinalTx(m_chain.Tip(), tx, flags)) return true; LockPoints lp = it->GetLockPoints(); const bool validLP{TestLockPointValidity(m_chain, lp)}; CCoinsViewMemPool view_mempool(&CoinsTip(), *m_mempool); - if (!CheckFinalTx(m_chain.Tip(), tx, flags) - || !CheckSequenceLocks(m_chain.Tip(), view_mempool, tx, flags, &lp, validLP)) { - // Note if CheckSequenceLocks fails the LockPoints may still be invalid - // So it's critical that we remove the tx and not depend on the LockPoints. - should_remove = true; - } else if (it->GetSpendsCoinbase()) { + // CheckSequenceLocks checks if the transaction will be final in the next block to be + // created on top of the new chain. We use useExistingLockPoints=false so that, instead of + // using the information in lp (which might now refer to a block that no longer exists in + // the chain), it will update lp to contain LockPoints relevant to the new chain. + if (!CheckSequenceLocks(m_chain.Tip(), view_mempool, tx, flags, &lp, validLP)) { + // If CheckSequenceLocks fails, remove the tx and don't depend on the LockPoints. + return true; + } else if (!validLP) { + // If CheckSequenceLocks succeeded, it also updated the LockPoints. + // Now update the mempool entry lockpoints as well. + m_mempool->mapTx.modify(it, [&lp](CTxMemPoolEntry& e) { e.UpdateLockPoints(lp); }); + } + + // If the transaction spends any coinbase outputs, it must be mature. + if (it->GetSpendsCoinbase()) { for (const CTxIn& txin : tx.vin) { auto it2 = m_mempool->mapTx.find(txin.prevout.hash); if (it2 != m_mempool->mapTx.end()) continue; - const Coin &coin = CoinsTip().AccessCoin(txin.prevout); + const Coin& coin{CoinsTip().AccessCoin(txin.prevout)}; assert(!coin.IsSpent()); const auto mempool_spend_height{m_chain.Tip()->nHeight + 1}; - if (coin.IsSpent() || (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY)) { - should_remove = true; - break; + if (coin.IsCoinBase() && mempool_spend_height - coin.nHeight < COINBASE_MATURITY) { + return true; } } } - // CheckSequenceLocks updates lp. Update the mempool entry LockPoints. - if (!validLP) m_mempool->mapTx.modify(it, update_lock_points(lp)); - return should_remove; + // Transaction is still valid and cached LockPoints are updated. + return false; }; // We also need to remove any now-immature transactions - m_mempool->removeForReorg(m_chain, check_final_and_mature); + m_mempool->removeForReorg(m_chain, filter_final_and_mature); // Re-limit mempool size, in case we added any transactions LimitMempoolSize( *m_mempool, @@ -605,10 +621,10 @@ private: // Submit all transactions to the mempool and call ConsensusScriptChecks to add to the script // cache - should only be called after successful validation of all transactions in the package. - // The package may end up partially-submitted after size limitting; returns true if all + // The package may end up partially-submitted after size limiting; returns true if all // transactions are successfully added to the mempool, false otherwise. - bool FinalizePackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state, - std::map<const uint256, const MempoolAcceptResult>& results) + bool SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, PackageValidationState& package_state, + std::map<const uint256, const MempoolAcceptResult>& results) EXCLUSIVE_LOCKS_REQUIRED(cs_main, m_pool.cs); // Compare a package's feerate against minimum allowed. @@ -1047,12 +1063,17 @@ bool MemPoolAccept::Finalize(const ATMPArgs& args, Workspace& ws) return true; } -bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, - PackageValidationState& package_state, - std::map<const uint256, const MempoolAcceptResult>& results) +bool MemPoolAccept::SubmitPackage(const ATMPArgs& args, std::vector<Workspace>& workspaces, + PackageValidationState& package_state, + std::map<const uint256, const MempoolAcceptResult>& results) { AssertLockHeld(cs_main); AssertLockHeld(m_pool.cs); + // Sanity check: none of the transactions should be in the mempool, and none of the transactions + // should have a same-txid-different-witness equivalent in the mempool. + assert(std::all_of(workspaces.cbegin(), workspaces.cend(), [this](const auto& ws){ + return !m_pool.exists(GenTxid::Txid(ws.m_ptx->GetHash())); })); + bool all_submitted = true; // ConsensusScriptChecks adds to the script cache and is therefore consensus-critical; // CheckInputsFromMempoolAndCache asserts that transactions only spend coins available from the @@ -1062,18 +1083,24 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace> if (!ConsensusScriptChecks(args, ws)) { results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); // Since PolicyScriptChecks() passed, this should never fail. - all_submitted = Assume(false); + all_submitted = false; + package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR, + strprintf("BUG! PolicyScriptChecks succeeded but ConsensusScriptChecks failed: %s", + ws.m_ptx->GetHash().ToString())); } // Re-calculate mempool ancestors to call addUnchecked(). They may have changed since the // last calculation done in PreChecks, since package ancestors have already been submitted. - std::string err_string; + std::string unused_err_string; if(!m_pool.CalculateMemPoolAncestors(*ws.m_entry, ws.m_ancestors, m_limit_ancestors, m_limit_ancestor_size, m_limit_descendants, - m_limit_descendant_size, err_string)) { + m_limit_descendant_size, unused_err_string)) { results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); // Since PreChecks() and PackageMempoolChecks() both enforce limits, this should never fail. - all_submitted = Assume(false); + all_submitted = false; + package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR, + strprintf("BUG! Mempool ancestors or descendants were underestimated: %s", + ws.m_ptx->GetHash().ToString())); } // If we call LimitMempoolSize() for each individual Finalize(), the mempool will not take // the transaction's descendant feerate into account because it hasn't seen them yet. Also, @@ -1083,7 +1110,9 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace> if (!Finalize(args, ws)) { results.emplace(ws.m_ptx->GetWitnessHash(), MempoolAcceptResult::Failure(ws.m_state)); // Since LimitMempoolSize() won't be called, this should never fail. - all_submitted = Assume(false); + all_submitted = false; + package_state.Invalid(PackageValidationResult::PCKG_MEMPOOL_ERROR, + strprintf("BUG! Adding to mempool failed: %s", ws.m_ptx->GetHash().ToString())); } } @@ -1092,7 +1121,6 @@ bool MemPoolAccept::FinalizePackage(const ATMPArgs& args, std::vector<Workspace> LimitMempoolSize(m_pool, m_active_chainstate.CoinsTip(), gArgs.GetIntArg("-maxmempool", DEFAULT_MAX_MEMPOOL_SIZE) * 1000000, std::chrono::hours{gArgs.GetIntArg("-mempoolexpiry", DEFAULT_MEMPOOL_EXPIRY)}); - if (!all_submitted) return false; // Find the wtxids of the transactions that made it into the mempool. Allow partial submission, // but don't report success unless they all made it into the mempool. @@ -1197,8 +1225,8 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptMultipleTransactions(const std:: if (args.m_test_accept) return PackageMempoolAcceptResult(package_state, std::move(results)); - if (!FinalizePackage(args, workspaces, package_state, results)) { - package_state.Invalid(PackageValidationResult::PCKG_TX, "submission failed"); + if (!SubmitPackage(args, workspaces, package_state, results)) { + // PackageValidationState filled in by SubmitPackage(). return PackageMempoolAcceptResult(package_state, std::move(results)); } @@ -1223,11 +1251,13 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package, return PackageMempoolAcceptResult(package_state, {}); } - const auto& child = package[package.size() - 1]; + // IsChildWithParents() guarantees the package is > 1 transactions. + assert(package.size() > 1); // The package must be 1 child with all of its unconfirmed parents. The package is expected to // be sorted, so the last transaction is the child. + const auto& child = package.back(); std::unordered_set<uint256, SaltedTxidHasher> unconfirmed_parent_txids; - std::transform(package.cbegin(), package.end() - 1, + std::transform(package.cbegin(), package.cend() - 1, std::inserter(unconfirmed_parent_txids, unconfirmed_parent_txids.end()), [](const auto& tx) { return tx->GetHash(); }); @@ -1259,10 +1289,14 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package, LOCK(m_pool.cs); std::map<const uint256, const MempoolAcceptResult> results; - // As node operators are free to set their mempool policies however they please, it's possible - // for package transaction(s) to already be in the mempool, and we don't want to reject the - // entire package in that case (as that could be a censorship vector). Filter the transactions - // that are already in mempool and add their information to results, since we already have them. + // Node operators are free to set their mempool policies however they please, nodes may receive + // transactions in different orders, and malicious counterparties may try to take advantage of + // policy differences to pin or delay propagation of transactions. As such, it's possible for + // some package transaction(s) to already be in the mempool, and we don't want to reject the + // entire package in that case (as that could be a censorship vector). De-duplicate the + // transactions that are already in the mempool, and only call AcceptMultipleTransactions() with + // the new transactions. This ensures we don't double-count transaction counts and sizes when + // checking ancestor/descendant limits, or double-count transaction fees for fee-related policy. std::vector<CTransactionRef> txns_new; for (const auto& tx : package) { const auto& wtxid = tx->GetWitnessHash(); @@ -1283,9 +1317,10 @@ PackageMempoolAcceptResult MemPoolAccept::AcceptPackage(const Package& package, // transaction for the mempool one. Note that we are ignoring the validity of the // package transaction passed in. // TODO: allow witness replacement in packages. - auto iter = m_pool.GetIter(wtxid); + auto iter = m_pool.GetIter(txid); assert(iter != std::nullopt); - results.emplace(txid, MempoolAcceptResult::MempoolTx(iter.value()->GetTxSize(), iter.value()->GetFee())); + // Provide the wtxid of the mempool tx so that the caller can look it up in the mempool. + results.emplace(wtxid, MempoolAcceptResult::MempoolTxDifferentWitness(iter.value()->GetTx().GetWitnessHash())); } else { // Transaction does not already exist in the mempool. txns_new.push_back(tx); @@ -1352,12 +1387,12 @@ PackageMempoolAcceptResult ProcessNewPackage(CChainState& active_chainstate, CTx }(); // Uncache coins pertaining to transactions that were not submitted to the mempool. - // Ensure the coins cache is still within limits. if (test_accept || result.m_state.IsInvalid()) { for (const COutPoint& hashTx : coins_to_uncache) { active_chainstate.CoinsTip().Uncache(hashTx); } } + // Ensure the coins cache is still within limits. BlockValidationState state_dummy; active_chainstate.FlushStateToDisk(state_dummy, FlushStateMode::PERIODIC); return result; @@ -3528,7 +3563,10 @@ bool ChainstateManager::ProcessNewBlockHeaders(const std::vector<CBlockHeader>& } if (NotifyHeaderTip(ActiveChainstate())) { if (ActiveChainstate().IsInitialBlockDownload() && ppindex && *ppindex) { - LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", (*ppindex)->nHeight, 100.0/((*ppindex)->nHeight+(GetAdjustedTime() - (*ppindex)->GetBlockTime()) / Params().GetConsensus().nPowTargetSpacing) * (*ppindex)->nHeight); + const CBlockIndex& last_accepted{**ppindex}; + const int64_t blocks_left{(GetTime() - last_accepted.GetBlockTime()) / chainparams.GetConsensus().nPowTargetSpacing}; + const double progress{100.0 * last_accepted.nHeight / (last_accepted.nHeight + blocks_left)}; + LogPrintf("Synchronizing blockheaders, height: %d (~%.2f%%)\n", last_accepted.nHeight, progress); } } return true; diff --git a/src/validation.h b/src/validation.h index edbd68f783..6e9912cada 100644 --- a/src/validation.h +++ b/src/validation.h @@ -163,8 +163,12 @@ struct MempoolAcceptResult { VALID, //!> Fully validated, valid. INVALID, //!> Invalid. MEMPOOL_ENTRY, //!> Valid, transaction was already in the mempool. + DIFFERENT_WITNESS, //!> Not validated. A same-txid-different-witness tx (see m_other_wtxid) already exists in the mempool and was not replaced. }; + /** Result type. Present in all MempoolAcceptResults. */ const ResultType m_result_type; + + /** Contains information about why the transaction failed. */ const TxValidationState m_state; // The following fields are only present when m_result_type = ResultType::VALID or MEMPOOL_ENTRY @@ -175,6 +179,10 @@ struct MempoolAcceptResult { /** Raw base fees in satoshis. */ const std::optional<CAmount> m_base_fees; + // The following field is only present when m_result_type = ResultType::DIFFERENT_WITNESS + /** The wtxid of the transaction in the mempool which has the same txid but different witness. */ + const std::optional<uint256> m_other_wtxid; + static MempoolAcceptResult Failure(TxValidationState state) { return MempoolAcceptResult(state); } @@ -187,6 +195,10 @@ struct MempoolAcceptResult { return MempoolAcceptResult(vsize, fees); } + static MempoolAcceptResult MempoolTxDifferentWitness(const uint256& other_wtxid) { + return MempoolAcceptResult(other_wtxid); + } + // Private constructors. Use static methods MempoolAcceptResult::Success, etc. to construct. private: /** Constructor for failure case */ @@ -203,6 +215,10 @@ private: /** Constructor for already-in-mempool case. It wouldn't replace any transactions. */ explicit MempoolAcceptResult(int64_t vsize, CAmount fees) : m_result_type(ResultType::MEMPOOL_ENTRY), m_vsize{vsize}, m_base_fees(fees) {} + + /** Constructor for witness-swapped case. */ + explicit MempoolAcceptResult(const uint256& other_wtxid) + : m_result_type(ResultType::DIFFERENT_WITNESS), m_other_wtxid(other_wtxid) {} }; /** @@ -212,7 +228,7 @@ struct PackageMempoolAcceptResult { const PackageValidationState m_state; /** - * Map from (w)txid to finished MempoolAcceptResults. The client is responsible + * Map from wtxid to finished MempoolAcceptResults. The client is responsible * for keeping track of the transaction objects themselves. If a result is not * present, it means validation was unfinished for that transaction. If there * was a package-wide error (see result in m_state), m_tx_results will be empty. diff --git a/src/wallet/load.cpp b/src/wallet/load.cpp index 2d47673705..e6f96074d5 100644 --- a/src/wallet/load.cpp +++ b/src/wallet/load.cpp @@ -29,7 +29,7 @@ bool VerifyWallets(WalletContext& context) fs::path wallet_dir = fs::PathFromString(args.GetArg("-walletdir", "")); boost::system::error_code error; // The canonical path cleans the path, preventing >1 Berkeley environment instances for the same directory - fs::path canonical_wallet_dir = fs::canonical(wallet_dir, error); + fs::path canonical_wallet_dir = fs::canonical(wallet_dir, error).remove_trailing_separator(); if (error || !fs::exists(wallet_dir)) { chain.initError(strprintf(_("Specified -walletdir \"%s\" does not exist"), fs::PathToString(wallet_dir))); return false; diff --git a/test/functional/interface_rest.py b/test/functional/interface_rest.py index 2842b2534d..06aa5608bb 100755 --- a/test/functional/interface_rest.py +++ b/test/functional/interface_rest.py @@ -22,6 +22,10 @@ from test_framework.util import ( from test_framework.messages import BLOCK_HEADER_SIZE +INVALID_PARAM = "abc" +UNKNOWN_PARAM = "0000000000000000000000000000000000000000000000000000000000000000" + + class ReqType(Enum): JSON = 1 BIN = 2 @@ -103,6 +107,12 @@ class RESTTest (BitcoinTestFramework): n, = filter_output_indices_by_value(json_obj['vout'], Decimal('0.1')) spending = (txid, n) + # Test /tx with an invalid and an unknown txid + resp = self.test_rest_request(uri=f"/tx/{INVALID_PARAM}", ret_type=RetType.OBJ, status=400) + assert_equal(resp.read().decode('utf-8').rstrip(), f"Invalid hash: {INVALID_PARAM}") + resp = self.test_rest_request(uri=f"/tx/{UNKNOWN_PARAM}", ret_type=RetType.OBJ, status=404) + assert_equal(resp.read().decode('utf-8').rstrip(), f"{UNKNOWN_PARAM} not found") + self.log.info("Query an unspent TXO using the /getutxos URI") self.generatetoaddress(self.nodes[1], 1, not_related_address) @@ -205,8 +215,8 @@ class RESTTest (BitcoinTestFramework): bb_hash = self.nodes[0].getbestblockhash() # Check result if block does not exists - assert_equal(self.test_rest_request('/headers/1/0000000000000000000000000000000000000000000000000000000000000000'), []) - self.test_rest_request('/block/0000000000000000000000000000000000000000000000000000000000000000', status=404, ret_type=RetType.OBJ) + assert_equal(self.test_rest_request(f"/headers/1/{UNKNOWN_PARAM}"), []) + self.test_rest_request(f"/block/{UNKNOWN_PARAM}", status=404, ret_type=RetType.OBJ) # Check result if block is not in the active chain self.nodes[0].invalidateblock(bb_hash) @@ -250,8 +260,8 @@ class RESTTest (BitcoinTestFramework): assert_equal(blockhash, bb_hash) # Check invalid blockhashbyheight requests - resp = self.test_rest_request("/blockhashbyheight/abc", ret_type=RetType.OBJ, status=400) - assert_equal(resp.read().decode('utf-8').rstrip(), "Invalid height: abc") + resp = self.test_rest_request(f"/blockhashbyheight/{INVALID_PARAM}", ret_type=RetType.OBJ, status=400) + assert_equal(resp.read().decode('utf-8').rstrip(), f"Invalid height: {INVALID_PARAM}") resp = self.test_rest_request("/blockhashbyheight/1000000", ret_type=RetType.OBJ, status=404) assert_equal(resp.read().decode('utf-8').rstrip(), "Block height out of range") resp = self.test_rest_request("/blockhashbyheight/-1", ret_type=RetType.OBJ, status=400) diff --git a/test/functional/mempool_updatefromblock.py b/test/functional/mempool_updatefromblock.py index 16c15e3f74..51de582ce0 100755 --- a/test/functional/mempool_updatefromblock.py +++ b/test/functional/mempool_updatefromblock.py @@ -17,7 +17,7 @@ from test_framework.util import assert_equal class MempoolUpdateFromBlockTest(BitcoinTestFramework): def set_test_params(self): self.num_nodes = 1 - self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000']] + self.extra_args = [['-limitdescendantsize=1000', '-limitancestorsize=1000', '-limitancestorcount=100']] def skip_test_if_missing_module(self): self.skip_if_no_wallet() diff --git a/test/functional/wallet_multiwallet.py b/test/functional/wallet_multiwallet.py index 0b868dde6c..317121eb68 100755 --- a/test/functional/wallet_multiwallet.py +++ b/test/functional/wallet_multiwallet.py @@ -141,7 +141,7 @@ class MultiWalletTest(BitcoinTestFramework): # should raise rpc error if wallet path can't be created err_code = -4 if self.options.descriptors else -1 - assert_raises_rpc_error(err_code, "boost::filesystem::create_directory:", self.nodes[0].createwallet, "w8/bad") + assert_raises_rpc_error(err_code, "boost::filesystem::create_director", self.nodes[0].createwallet, "w8/bad") # check that all requested wallets were created self.stop_node(0) diff --git a/test/lint/commit-script-check.sh b/test/lint/commit-script-check.sh index 6a8a15d05c..9449b393f1 100755 --- a/test/lint/commit-script-check.sh +++ b/test/lint/commit-script-check.sh @@ -17,6 +17,11 @@ if test -z "$1"; then exit 1 fi +if ! sed --help 2>&1 | grep -q 'GNU'; then + echo "Error: the installed sed package is not compatible. Please make sure you have GNU sed installed in your system."; + exit 1; +fi + RET=0 PREV_BRANCH=$(git name-rev --name-only HEAD) PREV_HEAD=$(git rev-parse HEAD) diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan index 6e4ac80927..393278bd6a 100644 --- a/test/sanitizer_suppressions/ubsan +++ b/test/sanitizer_suppressions/ubsan @@ -62,7 +62,6 @@ unsigned-integer-overflow:txmempool.cpp unsigned-integer-overflow:util/strencodings.cpp unsigned-integer-overflow:validation.cpp implicit-integer-sign-change:addrman.h -implicit-integer-sign-change:arith_uint256.cpp implicit-integer-sign-change:bech32.cpp implicit-integer-sign-change:common/bloom.cpp implicit-integer-sign-change:chain.cpp @@ -78,28 +77,17 @@ implicit-integer-sign-change:prevector.h implicit-integer-sign-change:script/bitcoinconsensus.cpp implicit-integer-sign-change:script/interpreter.cpp implicit-integer-sign-change:serialize.h -implicit-integer-sign-change:test/arith_uint256_tests.cpp -implicit-integer-sign-change:test/coins_tests.cpp -implicit-integer-sign-change:test/pow_tests.cpp -implicit-integer-sign-change:test/prevector_tests.cpp -implicit-integer-sign-change:test/sighash_tests.cpp -implicit-integer-sign-change:test/skiplist_tests.cpp implicit-integer-sign-change:test/streams_tests.cpp -implicit-integer-sign-change:test/transaction_tests.cpp implicit-integer-sign-change:txmempool.cpp implicit-integer-sign-change:zmq/zmqpublishnotifier.cpp implicit-signed-integer-truncation,implicit-integer-sign-change:chain.h -implicit-signed-integer-truncation,implicit-integer-sign-change:test/skiplist_tests.cpp implicit-signed-integer-truncation:addrman.cpp implicit-signed-integer-truncation:addrman.h implicit-signed-integer-truncation:chain.h implicit-signed-integer-truncation:crypto/ implicit-signed-integer-truncation:node/miner.cpp implicit-signed-integer-truncation:net.cpp -implicit-signed-integer-truncation:net_processing.cpp implicit-signed-integer-truncation:streams.h -implicit-signed-integer-truncation:test/arith_uint256_tests.cpp -implicit-signed-integer-truncation:test/skiplist_tests.cpp implicit-signed-integer-truncation:torcontrol.cpp implicit-unsigned-integer-truncation:crypto/ shift-base:arith_uint256.cpp |