aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml6
-rw-r--r--CONTRIBUTING.md4
-rwxr-xr-xci/test/00_setup_env_arm.sh2
-rwxr-xr-xci/test/00_setup_env_native_nowallet.sh2
-rwxr-xr-xci/test/00_setup_env_native_qt5.sh2
-rwxr-xr-xci/test/00_setup_env_native_valgrind.sh3
-rwxr-xr-xci/test/00_setup_env_s390x.sh4
-rw-r--r--configure.ac33
-rw-r--r--contrib/guix/INSTALL.md2
-rw-r--r--contrib/guix/README.md2
-rw-r--r--doc/build-unix.md2
-rw-r--r--doc/build-windows.md40
-rw-r--r--doc/dependencies.md1
-rw-r--r--doc/developer-notes.md2
-rw-r--r--doc/release-notes-12677.md8
-rw-r--r--doc/release-notes.md4
-rw-r--r--doc/tor.md9
-rw-r--r--src/Makefile.am11
-rw-r--r--src/addrman.cpp26
-rw-r--r--src/bitcoin-cli.cpp31
-rw-r--r--src/compat/glibc_compat.cpp62
-rw-r--r--src/index/base.h3
-rw-r--r--src/index/txindex.cpp163
-rw-r--r--src/index/txindex.h5
-rw-r--r--src/init.cpp4
-rw-r--r--src/interfaces/chain.h2
-rw-r--r--src/key.h1
-rw-r--r--src/miner.cpp2
-rw-r--r--src/net.cpp22
-rw-r--r--src/net_processing.cpp8
-rw-r--r--src/node/blockstorage.cpp10
-rw-r--r--src/node/interfaces.cpp4
-rw-r--r--src/rpc/client.cpp1
-rw-r--r--src/rpc/net.cpp14
-rw-r--r--src/test/README.md26
-rw-r--r--src/test/data/README.md2
-rw-r--r--src/test/fuzz/addrman.cpp25
-rw-r--r--src/txdb.cpp23
-rw-r--r--src/txdb.h11
-rw-r--r--src/txmempool.cpp6
-rw-r--r--src/txmempool.h4
-rw-r--r--src/validation.cpp2
-rw-r--r--src/validation.h8
-rw-r--r--src/wallet/rpcwallet.cpp13
-rw-r--r--src/wallet/transaction.h29
-rw-r--r--test/functional/README.md2
-rwxr-xr-xtest/functional/feature_addrman.py52
-rwxr-xr-xtest/functional/feature_asmap.py29
-rwxr-xr-xtest/functional/feature_bip68_sequence.py5
-rwxr-xr-xtest/functional/feature_coinstatsindex.py12
-rwxr-xr-xtest/functional/feature_csv_activation.py1
-rwxr-xr-xtest/functional/feature_maxuploadtarget.py1
-rwxr-xr-xtest/functional/feature_rbf.py2
-rwxr-xr-xtest/functional/interface_bitcoin_cli.py11
-rwxr-xr-xtest/functional/mempool_packages.py20
-rwxr-xr-xtest/functional/mempool_reorg.py2
-rwxr-xr-xtest/functional/mempool_spend_coinbase.py8
-rwxr-xr-xtest/functional/p2p_blocksonly.py24
-rwxr-xr-xtest/functional/rpc_blockchain.py2
-rwxr-xr-xtest/functional/rpc_net.py35
-rwxr-xr-xtest/functional/test_framework/p2p.py2
-rw-r--r--test/functional/test_framework/util.py5
-rw-r--r--test/functional/test_framework/wallet.py13
63 files changed, 382 insertions, 488 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 20ff61049d..f15afca005 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -75,7 +75,7 @@ task:
<< : *CIRRUS_EPHEMERAL_WORKER_TEMPLATE_ENV
task:
- name: "Win64 native [unit tests, no functional tests] [msvc]"
+ name: "Win64 native [msvc]"
<< : *FILTER_TEMPLATE
windows_container:
cpu: 4
@@ -93,6 +93,7 @@ task:
QTBASEDIR: 'C:\Qt5.12.11_x64_static_vs2019_160900'
x64_NATIVE_TOOLS: '"C:\Program Files (x86)\Microsoft Visual Studio\2019\BuildTools\VC\Auxiliary\Build\vcvars64.bat"'
IgnoreWarnIntDirInTempDetected: 'true'
+ EXCLUDE_TESTS: 'feature_addrman.py,feature_bip68_sequence.py,feature_fee_estimation.py,mining_prioritisetransaction.py,p2p_getaddr_caching.py,p2p_invalid_locator.py,p2p_invalid_tx.py,rpc_misc.py,rpc_net.py,wallet_avoidreuse.py,wallet_descriptor.py,wallet_groups.py,wallet_keypool.py'
merge_script:
- git config --global user.email "ci@ci.ci"
- git config --global user.name "ci"
@@ -146,7 +147,8 @@ task:
- python test\util\test_runner.py
- python test\util\rpcauth-test.py
functional_tests_script:
- - python test\functional\test_runner.py --ci --quiet --combinedlogslen=4000 --jobs=4 --timeout-factor=8 rpc_help feature_config_args rpc_signer feature_presegwit_node_upgrade "tool_wallet.py --descriptors" --failfast # TODO enable '--extended' and remove cherry-picked test list
+ # TODO enable '--extended' and drop '--exclude'.
+ - python test\functional\test_runner.py --nocleanup --ci --quiet --combinedlogslen=4000 --jobs=4 --timeout-factor=8 --exclude %EXCLUDE_TESTS% --failfast
task:
name: 'ARM [unit tests, no functional tests] [bullseye]'
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index e0d3671b07..acf5cc08d1 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -60,8 +60,8 @@ Most communication about Bitcoin Core development happens on IRC, in the
`#bitcoin-core-dev` channel on Libera Chat. The easiest way to participate on IRC is
with the web client, [web.libera.chat](https://web.libera.chat/#bitcoin-core-dev). Chat
history logs can be found
-on [http://www.erisian.com.au/bitcoin-core-dev/](http://www.erisian.com.au/bitcoin-core-dev/)
-and [http://gnusha.org/bitcoin-core-dev/](http://gnusha.org/bitcoin-core-dev/).
+on [https://www.erisian.com.au/bitcoin-core-dev/](https://www.erisian.com.au/bitcoin-core-dev/)
+and [https://gnusha.org/bitcoin-core-dev/](https://gnusha.org/bitcoin-core-dev/).
Discussion about codebase improvements happens in GitHub issues and pull
requests.
diff --git a/ci/test/00_setup_env_arm.sh b/ci/test/00_setup_env_arm.sh
index 93f8017d98..f18052fe37 100755
--- a/ci/test/00_setup_env_arm.sh
+++ b/ci/test/00_setup_env_arm.sh
@@ -25,4 +25,4 @@ export RUN_FUNCTIONAL_TESTS=false
export GOAL="install"
# -Wno-psabi is to disable ABI warnings: "note: parameter passing for argument of type ... changed in GCC 7.1"
# This could be removed once the ABI change warning does not show up by default
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CXXFLAGS=-Wno-psabi"
+export BITCOIN_CONFIG="--enable-reduce-exports CXXFLAGS=-Wno-psabi"
diff --git a/ci/test/00_setup_env_native_nowallet.sh b/ci/test/00_setup_env_native_nowallet.sh
index d167c9198a..5edc6ae2da 100755
--- a/ci/test/00_setup_env_native_nowallet.sh
+++ b/ci/test/00_setup_env_native_nowallet.sh
@@ -11,4 +11,4 @@ export DOCKER_NAME_TAG=ubuntu:18.04 # Use bionic to have one config run the tes
export PACKAGES="python3-zmq clang-5.0 llvm-5.0" # Use clang-5 to test C++17 compatibility, see doc/dependencies.md
export DEP_OPTS="NO_WALLET=1"
export GOAL="install"
-export BITCOIN_CONFIG="--enable-glibc-back-compat --enable-reduce-exports CC=clang-5.0 CXX=clang++-5.0"
+export BITCOIN_CONFIG="--enable-reduce-exports CC=clang-5.0 CXX=clang++-5.0"
diff --git a/ci/test/00_setup_env_native_qt5.sh b/ci/test/00_setup_env_native_qt5.sh
index b3e967c898..7d73f2d0b2 100755
--- a/ci/test/00_setup_env_native_qt5.sh
+++ b/ci/test/00_setup_env_native_qt5.sh
@@ -15,5 +15,5 @@ export RUN_UNIT_TESTS_SEQUENTIAL="true"
export RUN_UNIT_TESTS="false"
export GOAL="install"
export PREVIOUS_RELEASES_TO_DOWNLOAD="v0.15.2 v0.16.3 v0.17.2 v0.18.1 v0.19.1 v0.20.1"
-export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-glibc-back-compat --enable-reduce-exports
+export BITCOIN_CONFIG="--enable-zmq --with-libs=no --with-gui=qt5 --enable-reduce-exports
--enable-debug --disable-fuzz-binary CFLAGS=\"-g0 -O2 -funsigned-char\" CXXFLAGS=\"-g0 -O2 -funsigned-char\""
diff --git a/ci/test/00_setup_env_native_valgrind.sh b/ci/test/00_setup_env_native_valgrind.sh
index e079a7057c..78af869e70 100755
--- a/ci/test/00_setup_env_native_valgrind.sh
+++ b/ci/test/00_setup_env_native_valgrind.sh
@@ -6,10 +6,11 @@
export LC_ALL=C.UTF-8
+export DOCKER_NAME_TAG="ubuntu:20.04"
export CONTAINER_NAME=ci_native_valgrind
export PACKAGES="valgrind clang llvm python3-zmq libevent-dev bsdmainutils libboost-dev libboost-system-dev libboost-filesystem-dev libboost-test-dev libdb5.3++-dev libminiupnpc-dev libnatpmp-dev libzmq3-dev libsqlite3-dev"
export USE_VALGRIND=1
export NO_DEPENDS=1
-export TEST_RUNNER_EXTRA="--exclude rpc_bind" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547
+export TEST_RUNNER_EXTRA="--exclude rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547
export GOAL="install"
export BITCOIN_CONFIG="--enable-zmq --with-incompatible-bdb --with-gui=no CC=clang CXX=clang++" # TODO enable GUI
diff --git a/ci/test/00_setup_env_s390x.sh b/ci/test/00_setup_env_s390x.sh
index 51a0fd9117..fd253123e6 100755
--- a/ci/test/00_setup_env_s390x.sh
+++ b/ci/test/00_setup_env_s390x.sh
@@ -18,9 +18,9 @@ if [ -n "$QEMU_USER_CMD" ]; then
fi
# Use debian to avoid 404 apt errors
export CONTAINER_NAME=ci_s390x
-export DOCKER_NAME_TAG="debian:buster"
-export RUN_UNIT_TESTS=true
+export DOCKER_NAME_TAG="debian:bookworm"
export TEST_RUNNER_ENV="LC_ALL=C"
+export TEST_RUNNER_EXTRA="--exclude rpc_bind,feature_bind_extra" # Excluded for now, see https://github.com/bitcoin/bitcoin/issues/17765#issuecomment-602068547
export RUN_FUNCTIONAL_TESTS=true
export GOAL="install"
export BITCOIN_CONFIG="--enable-reduce-exports --with-incompatible-bdb"
diff --git a/configure.ac b/configure.ac
index e50d53f6dd..0dc480e6c1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -240,15 +240,9 @@ AC_ARG_ENABLE([lcov-branch-coverage],
[use_lcov_branch=yes],
[use_lcov_branch=no])
-AC_ARG_ENABLE([glibc-back-compat],
- [AS_HELP_STRING([--enable-glibc-back-compat],
- [enable backwards compatibility with glibc])],
- [use_glibc_compat=$enableval],
- [use_glibc_compat=no])
-
AC_ARG_ENABLE([threadlocal],
[AS_HELP_STRING([--enable-threadlocal],
- [enable features that depend on the c++ thread_local keyword (currently just thread names in debug logs). (default is to enabled if there is platform support and glibc-back-compat is not enabled)])],
+ [enable features that depend on the c++ thread_local keyword (currently just thread names in debug logs). (default is to enable if there is platform support)])],
[use_thread_local=$enableval],
[use_thread_local=auto])
@@ -863,17 +857,7 @@ if test x$ac_cv_sys_large_files != x &&
CPPFLAGS="$CPPFLAGS -D_LARGE_FILES=$ac_cv_sys_large_files"
fi
-if test x$use_glibc_compat != xno; then
- AX_CHECK_LINK_FLAG([[-Wl,--wrap=__divmoddi4]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=__divmoddi4"])
- AX_CHECK_LINK_FLAG([[-Wl,--wrap=log2f]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--wrap=log2f"])
- case $host in
- powerpc64* | ppc64*)
- AX_CHECK_LINK_FLAG([[-Wl,--no-tls-get-addr-optimize]], [COMPAT_LDFLAGS="$COMPAT_LDFLAGS -Wl,--no-tls-get-addr-optimize"])
- ;;
- esac
-else
- AC_SEARCH_LIBS([clock_gettime],[rt])
-fi
+AC_SEARCH_LIBS([clock_gettime],[rt])
if test "x$enable_gprof" = xyes; then
dnl -pg is incompatible with -pie. Since hardening and profiling together doesn't make sense,
@@ -1065,7 +1049,7 @@ AC_COMPILE_IFELSE([AC_LANG_SOURCE([
dnl thread_local is currently disabled when building with glibc back compat.
dnl Our minimum supported glibc is 2.17, however support for thread_local
dnl did not arrive in glibc until 2.18.
-if test "x$use_thread_local" = xyes || { test "x$use_thread_local" = xauto && test "x$use_glibc_compat" = xno; }; then
+if test "x$use_thread_local" = xyes || test "x$use_thread_local" = xauto; then
TEMP_LDFLAGS="$LDFLAGS"
LDFLAGS="$TEMP_LDFLAGS $PTHREAD_CFLAGS"
AC_MSG_CHECKING([for thread_local support])
@@ -1245,13 +1229,14 @@ AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[
[ AC_MSG_RESULT(no); HAVE_WEAK_GETAUXVAL=0 ]
)
+have_any_system=no
AC_MSG_CHECKING([for std::system])
AC_LINK_IFELSE(
[ AC_LANG_PROGRAM(
[[ #include <cstdlib> ]],
[[ int nErr = std::system(""); ]]
)],
- [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_STD__SYSTEM, 1, Define to 1 if std::system is available.)],
+ [ AC_MSG_RESULT(yes); have_any_system=yes],
[ AC_MSG_RESULT(no) ]
)
@@ -1261,11 +1246,13 @@ AC_LINK_IFELSE(
[[ ]],
[[ int nErr = ::_wsystem(""); ]]
)],
- [ AC_MSG_RESULT(yes); AC_DEFINE(HAVE_WSYSTEM, 1, Define to 1 if ::wsystem is available.)],
+ [ AC_MSG_RESULT(yes); have_any_system=yes],
[ AC_MSG_RESULT(no) ]
)
-AC_DEFINE([HAVE_SYSTEM], [HAVE_STD__SYSTEM || HAVE_WSYSTEM], [std::system or ::wsystem])
+if test "x$have_any_system" != "xno"; then
+ AC_DEFINE(HAVE_SYSTEM, 1, Define to 1 if std::system or ::wsystem is available.)
+fi
LEVELDB_CPPFLAGS=
LIBLEVELDB=
@@ -1806,7 +1793,6 @@ AM_CONDITIONAL([ENABLE_BENCH],[test x$use_bench = xyes])
AM_CONDITIONAL([USE_QRCODE], [test x$use_qr = xyes])
AM_CONDITIONAL([USE_LCOV],[test x$use_lcov = xyes])
AM_CONDITIONAL([USE_LIBEVENT],[test x$use_libevent = xyes])
-AM_CONDITIONAL([GLIBC_BACK_COMPAT],[test x$use_glibc_compat = xyes])
AM_CONDITIONAL([HARDEN],[test x$use_hardening = xyes])
AM_CONDITIONAL([ENABLE_SSE42],[test x$enable_sse42 = xyes])
AM_CONDITIONAL([ENABLE_SSE41],[test x$enable_sse41 = xyes])
@@ -1849,7 +1835,6 @@ AC_SUBST(DEBUG_CPPFLAGS)
AC_SUBST(WARN_CXXFLAGS)
AC_SUBST(NOWARN_CXXFLAGS)
AC_SUBST(DEBUG_CXXFLAGS)
-AC_SUBST(COMPAT_LDFLAGS)
AC_SUBST(ERROR_CXXFLAGS)
AC_SUBST(GPROF_CXXFLAGS)
AC_SUBST(GPROF_LDFLAGS)
diff --git a/contrib/guix/INSTALL.md b/contrib/guix/INSTALL.md
index 63aa3e02b2..68aae18731 100644
--- a/contrib/guix/INSTALL.md
+++ b/contrib/guix/INSTALL.md
@@ -358,7 +358,7 @@ This is especially notable because Ubuntu Focal packages `libgit2 v0.28.4`, and
Should you be in this situation, you need to build both `libgit2 v1.1.x` and
`guile-git` from source.
-Source: http://logs.guix.gnu.org/guix/2020-11-12.log#232527
+Source: https://logs.guix.gnu.org/guix/2020-11-12.log#232527
##### `{scheme,guile}-bytestructures` v1.0.8 and v1.0.9 are broken for Guile v2.2
diff --git a/contrib/guix/README.md b/contrib/guix/README.md
index f34043da58..51a034c26e 100644
--- a/contrib/guix/README.md
+++ b/contrib/guix/README.md
@@ -467,7 +467,7 @@ start over.
- `/root/.cache/guix/`
- `/root/.guix-profile/`
-[b17e]: http://bootstrappable.org/
+[b17e]: https://bootstrappable.org/
[r12e/source-date-epoch]: https://reproducible-builds.org/docs/source-date-epoch/
[guix/install.sh]: https://git.savannah.gnu.org/cgit/guix.git/plain/etc/guix-install.sh
diff --git a/doc/build-unix.md b/doc/build-unix.md
index 44b6ad5968..02c36eea7c 100644
--- a/doc/build-unix.md
+++ b/doc/build-unix.md
@@ -348,7 +348,7 @@ To build executables for ARM:
make HOST=arm-linux-gnueabihf NO_QT=1
cd ..
./autogen.sh
- CONFIG_SITE=$PWD/depends/arm-linux-gnueabihf/share/config.site ./configure --enable-glibc-back-compat --enable-reduce-exports LDFLAGS=-static-libstdc++
+ CONFIG_SITE=$PWD/depends/arm-linux-gnueabihf/share/config.site ./configure --enable-reduce-exports LDFLAGS=-static-libstdc++
make
diff --git a/doc/build-windows.md b/doc/build-windows.md
index f88b9739de..0b895eadfb 100644
--- a/doc/build-windows.md
+++ b/doc/build-windows.md
@@ -5,11 +5,9 @@ Below are some notes on how to build Bitcoin Core for Windows.
The options known to work for building Bitcoin Core on Windows are:
-* On Linux, using the [Mingw-w64](https://mingw-w64.org/doku.php) cross compiler tool chain. Ubuntu Bionic 18.04 is required
-and is the platform used to build the Bitcoin Core Windows release binaries.
-* On Windows, using [Windows
-Subsystem for Linux (WSL)](https://docs.microsoft.com/windows/wsl/about) and the Mingw-w64 cross compiler tool chain.
-* On Windows, using a native compiler tool chain such as [Visual Studio](https://www.visualstudio.com). See [README.md](/build_msvc/README.md).
+* On Linux, using the [Mingw-w64](https://www.mingw-w64.org/) cross compiler tool chain.
+* On Windows, using [Windows Subsystem for Linux (WSL)](https://docs.microsoft.com/windows/wsl/about) and Mingw-w64.
+* On Windows, using [Microsoft Visual Studio](https://www.visualstudio.com). See [README.md](/build_msvc/README.md).
Other options which may work, but which have not been extensively tested are (please contribute instructions):
@@ -18,40 +16,12 @@ Other options which may work, but which have not been extensively tested are (pl
Installing Windows Subsystem for Linux
---------------------------------------
-With Windows 10, Microsoft has released a new feature named the [Windows
-Subsystem for Linux (WSL)](https://docs.microsoft.com/windows/wsl/about). This
-feature allows you to run a bash shell directly on Windows in an Ubuntu-based
-environment. Within this environment you can cross compile for Windows without
-the need for a separate Linux VM or server. Note that while WSL can be installed with
-other Linux variants, such as OpenSUSE, the following instructions have only been
-tested with Ubuntu.
-
-This feature is not supported in versions of Windows prior to Windows 10 or on
-Windows Server SKUs. In addition, it is available [only for 64-bit versions of
-Windows](https://docs.microsoft.com/windows/wsl/install-win10).
-
-Full instructions to install WSL are available on the above link.
-To install WSL on Windows 10 with Fall Creators Update installed (version >= 16215.0) do the following:
-
-1. Enable the Windows Subsystem for Linux feature
- * Open the Windows Features dialog (`OptionalFeatures.exe`)
- * Enable 'Windows Subsystem for Linux'
- * Click 'OK' and restart if necessary
-2. Install Ubuntu
- * Open Microsoft Store and search for "Ubuntu 18.04" or use [this link](https://www.microsoft.com/store/productId/9N9TNGVNDL3Q)
- * Click Install
-3. Complete Installation
- * Open a cmd prompt and type "Ubuntu1804"
- * Create a new UNIX user account (this is a separate account from your Windows account)
-
-After the bash shell is active, you can follow the instructions below, starting
-with the "Cross-compilation" section. Compiling the 64-bit version is
-recommended, but it is possible to compile the 32-bit version.
+Follow the upstream installation instructions, available [here](https://docs.microsoft.com/windows/wsl/install-win10).
Cross-compilation for Ubuntu and Windows Subsystem for Linux
------------------------------------------------------------
-The steps below can be performed on Ubuntu (including in a VM) or WSL. The depends system
+The steps below can be performed on Ubuntu or WSL. The depends system
will also work on other Linux distributions, however the commands for
installing the toolchain will be different.
diff --git a/doc/dependencies.md b/doc/dependencies.md
index b7634718e8..abdbeee3ce 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -12,6 +12,7 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| fontconfig | [2.12.1](https://www.freedesktop.org/software/fontconfig/release/) | | No | Yes | |
| FreeType | [2.7.1](https://download.savannah.gnu.org/releases/freetype) | | No | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) (Android only) |
| GCC | | [7+](https://gcc.gnu.org/) (C++17 support) | | | |
+| glibc | | [2.17](https://www.gnu.org/software/libc/) | | | | |
| HarfBuzz-NG | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) |
| libevent | [2.1.12-stable](https://github.com/libevent/libevent/releases) | [2.0.21](https://github.com/bitcoin/bitcoin/pull/18676) | No | | |
| libnatpmp | git commit [4536032...](https://github.com/miniupnp/libnatpmp/tree/4536032ae32268a45c073a4d5e91bbab4534773a) | | No | | |
diff --git a/doc/developer-notes.md b/doc/developer-notes.md
index 3e13adeec0..ffb6632e21 100644
--- a/doc/developer-notes.md
+++ b/doc/developer-notes.md
@@ -349,7 +349,7 @@ make cov
Profiling is a good way to get a precise idea of where time is being spent in
code. One tool for doing profiling on Linux platforms is called
-[`perf`](http://www.brendangregg.com/perf.html), and has been integrated into
+[`perf`](https://www.brendangregg.com/perf.html), and has been integrated into
the functional test framework. Perf can observe a running process and sample
(at some frequency) where its execution is.
diff --git a/doc/release-notes-12677.md b/doc/release-notes-12677.md
new file mode 100644
index 0000000000..d6fea9eae7
--- /dev/null
+++ b/doc/release-notes-12677.md
@@ -0,0 +1,8 @@
+Notable changes
+===============
+
+Updated RPCs
+------------
+
+- `listunspent` now includes `ancestorcount`, `ancestorsize`, and
+`ancestorfees` for each transaction output that is still in the mempool.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 82dfc5c701..915bda2ea3 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -91,6 +91,10 @@ Tools and Utilities
- Update `-getinfo` to return data in a user-friendly format that also reduces vertical space. (#21832)
+- CLI `-addrinfo` now returns a single field for the number of `onion` addresses
+ known to the node instead of separate `torv2` and `torv3` fields, as support
+ for Tor V2 addresses was removed from Bitcoin Core in 22.0. (#22544)
+
Wallet
------
diff --git a/doc/tor.md b/doc/tor.md
index a3ec1987aa..8dc82ca91e 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -23,10 +23,9 @@ There are several ways to see your local onion address in Bitcoin Core:
You may set the `-debug=tor` config logging option to have additional
information in the debug log about your Tor configuration.
-CLI `-addrinfo` returns the number of addresses known to your node per network
-type, including Tor v2 and v3. This is useful to see how many onion addresses
-are known to your node for `-onlynet=onion` and how many Tor v3 addresses it
-knows when upgrading to Bitcoin Core v22.0 and up that supports Tor v3 only.
+CLI `-addrinfo` returns the number of addresses known to your node per
+network. This can be useful to see how many onion peers your node knows,
+e.g. for `-onlynet=onion`.
## 1. Run Bitcoin Core behind a Tor proxy
@@ -134,7 +133,7 @@ You can also check the group of the cookie file. On most Linux systems, the Tor
auth cookie will usually be `/run/tor/control.authcookie`:
```
-stat -c '%G' /run/tor/control.authcookie
+TORGROUP=$(stat -c '%G' /run/tor/control.authcookie)
```
Once you have determined the `${TORGROUP}` and selected the `${USER}` that will
diff --git a/src/Makefile.am b/src/Makefile.am
index eea98c7f22..52c8b85357 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -571,7 +571,7 @@ libbitcoin_common_a_SOURCES = \
# util: shared between all executables.
# This library *must* be included to make sure that the glibc
-# backward-compatibility objects and their sanity checks are linked.
+# sanity checks are linked.
libbitcoin_util_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_util_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
libbitcoin_util_a_SOURCES = \
@@ -619,11 +619,6 @@ if USE_LIBEVENT
libbitcoin_util_a_SOURCES += util/url.cpp
endif
-if GLIBC_BACK_COMPAT
-libbitcoin_util_a_SOURCES += compat/glibc_compat.cpp
-AM_LDFLAGS += $(COMPAT_LDFLAGS)
-endif
-
# cli: shared between bitcoin-cli and bitcoin-qt
libbitcoin_cli_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES)
libbitcoin_cli_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
@@ -751,10 +746,6 @@ if BUILD_BITCOIN_LIBS
include_HEADERS = script/bitcoinconsensus.h
libbitcoinconsensus_la_SOURCES = support/cleanse.cpp $(crypto_libbitcoin_crypto_base_a_SOURCES) $(libbitcoin_consensus_a_SOURCES)
-if GLIBC_BACK_COMPAT
- libbitcoinconsensus_la_SOURCES += compat/glibc_compat.cpp
-endif
-
libbitcoinconsensus_la_LDFLAGS = $(AM_LDFLAGS) -no-undefined $(RELDFLAGS)
libbitcoinconsensus_la_LIBADD = $(LIBSECP256K1)
libbitcoinconsensus_la_CPPFLAGS = $(AM_CPPFLAGS) -I$(builddir)/obj -I$(srcdir)/secp256k1/include -DBUILD_BITCOIN_INTERNAL
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 772c34ae77..a1e8cb1bf1 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -11,6 +11,7 @@
#include <netaddress.h>
#include <serialize.h>
#include <streams.h>
+#include <util/check.h>
#include <cmath>
#include <optional>
@@ -488,11 +489,14 @@ void CAddrMan::MakeTried(CAddrInfo& info, int nId)
AssertLockHeld(cs);
// remove the entry from all new buckets
- for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; bucket++) {
- int pos = info.GetBucketPosition(nKey, true, bucket);
+ const int start_bucket{info.GetNewBucket(nKey, m_asmap)};
+ for (int n = 0; n < ADDRMAN_NEW_BUCKET_COUNT; ++n) {
+ const int bucket{(start_bucket + n) % ADDRMAN_NEW_BUCKET_COUNT};
+ const int pos{info.GetBucketPosition(nKey, true, bucket)};
if (vvNew[bucket][pos] == nId) {
vvNew[bucket][pos] = -1;
info.nRefCount--;
+ if (info.nRefCount == 0) break;
}
}
nNew--;
@@ -564,22 +568,10 @@ void CAddrMan::Good_(const CService& addr, bool test_before_evict, int64_t nTime
if (info.fInTried)
return;
- // find a bucket it is in now
- int nRnd = insecure_rand.randrange(ADDRMAN_NEW_BUCKET_COUNT);
- int nUBucket = -1;
- for (unsigned int n = 0; n < ADDRMAN_NEW_BUCKET_COUNT; n++) {
- int nB = (n + nRnd) % ADDRMAN_NEW_BUCKET_COUNT;
- int nBpos = info.GetBucketPosition(nKey, true, nB);
- if (vvNew[nB][nBpos] == nId) {
- nUBucket = nB;
- break;
- }
- }
-
- // if no bucket is found, something bad happened;
- // TODO: maybe re-add the node, but for now, just bail out
- if (nUBucket == -1)
+ // if it is not in new, something bad happened
+ if (!Assume(info.nRefCount > 0)) {
return;
+ }
// which tried bucket to move the entry to
int tried_bucket = info.GetTriedBucket(nKey, m_asmap);
diff --git a/src/bitcoin-cli.cpp b/src/bitcoin-cli.cpp
index e75ba81b54..d6e7298fd0 100644
--- a/src/bitcoin-cli.cpp
+++ b/src/bitcoin-cli.cpp
@@ -242,7 +242,7 @@ public:
class AddrinfoRequestHandler : public BaseRequestHandler
{
private:
- static constexpr std::array m_networks{"ipv4", "ipv6", "torv2", "torv3", "i2p"};
+ static constexpr std::array m_networks{"ipv4", "ipv6", "onion", "i2p"};
int8_t NetworkStringToId(const std::string& str) const
{
for (size_t i = 0; i < m_networks.size(); ++i) {
@@ -268,13 +268,10 @@ public:
if (!nodes.empty() && nodes.at(0)["network"].isNull()) {
throw std::runtime_error("-addrinfo requires bitcoind server to be running v22.0 and up");
}
- // Count the number of peers we know by network, including torv2 versus torv3.
+ // Count the number of peers known to our node, by network.
std::array<uint64_t, m_networks.size()> counts{{}};
for (const UniValue& node : nodes) {
std::string network_name{node["network"].get_str()};
- if (network_name == "onion") {
- network_name = node["address"].get_str().size() > 22 ? "torv3" : "torv2";
- }
const int8_t network_id{NetworkStringToId(network_name)};
if (network_id == UNKNOWN_NETWORK) continue;
++counts.at(network_id);
@@ -340,7 +337,7 @@ public:
connections.pushKV("total", batch[ID_NETWORKINFO]["result"]["connections"]);
result.pushKV("connections", connections);
- result.pushKV("proxy", batch[ID_NETWORKINFO]["result"]["networks"][0]["proxy"]);
+ result.pushKV("networks", batch[ID_NETWORKINFO]["result"]["networks"]);
result.pushKV("difficulty", batch[ID_BLOCKCHAININFO]["result"]["difficulty"]);
result.pushKV("chain", UniValue(batch[ID_BLOCKCHAININFO]["result"]["chain"]));
if (!batch[ID_WALLETINFO]["result"].isNull()) {
@@ -989,8 +986,26 @@ static void ParseGetInfoResult(UniValue& result)
RESET);
result_string += strprintf("Version: %s\n", result["version"].getValStr());
result_string += strprintf("Time offset (s): %s\n", result["timeoffset"].getValStr());
- const std::string proxy = result["proxy"].getValStr();
- result_string += strprintf("Proxy: %s\n", proxy.empty() ? "N/A" : proxy);
+
+ // proxies
+ std::map<std::string, std::vector<std::string>> proxy_networks;
+ std::vector<std::string> ordered_proxies;
+
+ for (const UniValue& network : result["networks"].getValues()) {
+ const std::string proxy = network["proxy"].getValStr();
+ if (proxy.empty()) continue;
+ // Add proxy to ordered_proxy if has not been processed
+ if (proxy_networks.find(proxy) == proxy_networks.end()) ordered_proxies.push_back(proxy);
+
+ proxy_networks[proxy].push_back(network["name"].getValStr());
+ }
+
+ std::vector<std::string> formatted_proxies;
+ for (const std::string& proxy : ordered_proxies) {
+ formatted_proxies.emplace_back(strprintf("%s (%s)", proxy, Join(proxy_networks.find(proxy)->second, ", ")));
+ }
+ result_string += strprintf("Proxies: %s\n", formatted_proxies.empty() ? "n/a" : Join(formatted_proxies, ", "));
+
result_string += strprintf("Min tx relay fee rate (%s/kvB): %s\n\n", CURRENCY_UNIT, result["relayfee"].getValStr());
if (!result["has_wallet"].isNull()) {
diff --git a/src/compat/glibc_compat.cpp b/src/compat/glibc_compat.cpp
deleted file mode 100644
index ff581d4a9e..0000000000
--- a/src/compat/glibc_compat.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-// Copyright (c) 2009-2020 The Bitcoin Core developers
-// Distributed under the MIT software license, see the accompanying
-// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-
-#if defined(HAVE_CONFIG_H)
-#include <config/bitcoin-config.h>
-#endif
-
-#include <cstddef>
-#include <cstdint>
-
-#if defined(__i386__) || defined(__arm__)
-
-extern "C" int64_t __udivmoddi4(uint64_t u, uint64_t v, uint64_t* rp);
-
-extern "C" int64_t __wrap___divmoddi4(int64_t u, int64_t v, int64_t* rp)
-{
- int32_t c1 = 0, c2 = 0;
- int64_t uu = u, vv = v;
- int64_t w;
- int64_t r;
-
- if (uu < 0) {
- c1 = ~c1, c2 = ~c2, uu = -uu;
- }
- if (vv < 0) {
- c1 = ~c1, vv = -vv;
- }
-
- w = __udivmoddi4(uu, vv, (uint64_t*)&r);
- if (c1)
- w = -w;
- if (c2)
- r = -r;
-
- *rp = r;
- return w;
-}
-#endif
-
-extern "C" float log2f_old(float x);
-#ifdef __i386__
-__asm(".symver log2f_old,log2f@GLIBC_2.1");
-#elif defined(__amd64__)
-__asm(".symver log2f_old,log2f@GLIBC_2.2.5");
-#elif defined(__arm__)
-__asm(".symver log2f_old,log2f@GLIBC_2.4");
-#elif defined(__aarch64__)
-__asm(".symver log2f_old,log2f@GLIBC_2.17");
-#elif defined(__powerpc64__)
-# ifdef WORDS_BIGENDIAN
-__asm(".symver log2f_old,log2f@GLIBC_2.3");
-# else
-__asm(".symver log2f_old,log2f@GLIBC_2.17");
-# endif
-#elif defined(__riscv)
-__asm(".symver log2f_old,log2f@GLIBC_2.27");
-#endif
-extern "C" float __wrap_log2f(float x)
-{
- return log2f_old(x);
-}
diff --git a/src/index/base.h b/src/index/base.h
index df4bdff1ea..1390e3e570 100644
--- a/src/index/base.h
+++ b/src/index/base.h
@@ -6,11 +6,10 @@
#define BITCOIN_INDEX_BASE_H
#include <dbwrapper.h>
-#include <primitives/block.h>
-#include <primitives/transaction.h>
#include <threadinterrupt.h>
#include <validationinterface.h>
+class CBlock;
class CBlockIndex;
class CChainState;
diff --git a/src/index/txindex.cpp b/src/index/txindex.cpp
index cde9821f3d..209785d487 100644
--- a/src/index/txindex.cpp
+++ b/src/index/txindex.cpp
@@ -2,18 +2,14 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
-#include <index/disktxpos.h>
#include <index/txindex.h>
+
+#include <index/disktxpos.h>
#include <node/blockstorage.h>
-#include <node/ui_interface.h>
-#include <shutdown.h>
#include <util/system.h>
-#include <util/translation.h>
#include <validation.h>
-constexpr uint8_t DB_BEST_BLOCK{'B'};
constexpr uint8_t DB_TXINDEX{'t'};
-constexpr uint8_t DB_TXINDEX_BLOCK{'T'};
std::unique_ptr<TxIndex> g_txindex;
@@ -30,10 +26,6 @@ public:
/// Write a batch of transaction positions to the DB.
bool WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_pos);
-
- /// Migrate txindex data from the block tree DB, where it may be for older nodes that have not
- /// been upgraded yet to the new database.
- bool MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator);
};
TxIndex::DB::DB(size_t n_cache_size, bool f_memory, bool f_wipe) :
@@ -54,163 +46,12 @@ bool TxIndex::DB::WriteTxs(const std::vector<std::pair<uint256, CDiskTxPos>>& v_
return WriteBatch(batch);
}
-/*
- * Safely persist a transfer of data from the old txindex database to the new one, and compact the
- * range of keys updated. This is used internally by MigrateData.
- */
-static void WriteTxIndexMigrationBatches(CDBWrapper& newdb, CDBWrapper& olddb,
- CDBBatch& batch_newdb, CDBBatch& batch_olddb,
- const std::pair<uint8_t, uint256>& begin_key,
- const std::pair<uint8_t, uint256>& end_key)
-{
- // Sync new DB changes to disk before deleting from old DB.
- newdb.WriteBatch(batch_newdb, /*fSync=*/ true);
- olddb.WriteBatch(batch_olddb);
- olddb.CompactRange(begin_key, end_key);
-
- batch_newdb.Clear();
- batch_olddb.Clear();
-}
-
-bool TxIndex::DB::MigrateData(CBlockTreeDB& block_tree_db, const CBlockLocator& best_locator)
-{
- // The prior implementation of txindex was always in sync with block index
- // and presence was indicated with a boolean DB flag. If the flag is set,
- // this means the txindex from a previous version is valid and in sync with
- // the chain tip. The first step of the migration is to unset the flag and
- // write the chain hash to a separate key, DB_TXINDEX_BLOCK. After that, the
- // index entries are copied over in batches to the new database. Finally,
- // DB_TXINDEX_BLOCK is erased from the old database and the block hash is
- // written to the new database.
- //
- // Unsetting the boolean flag ensures that if the node is downgraded to a
- // previous version, it will not see a corrupted, partially migrated index
- // -- it will see that the txindex is disabled. When the node is upgraded
- // again, the migration will pick up where it left off and sync to the block
- // with hash DB_TXINDEX_BLOCK.
- bool f_legacy_flag = false;
- block_tree_db.ReadFlag("txindex", f_legacy_flag);
- if (f_legacy_flag) {
- if (!block_tree_db.Write(DB_TXINDEX_BLOCK, best_locator)) {
- return error("%s: cannot write block indicator", __func__);
- }
- if (!block_tree_db.WriteFlag("txindex", false)) {
- return error("%s: cannot write block index db flag", __func__);
- }
- }
-
- CBlockLocator locator;
- if (!block_tree_db.Read(DB_TXINDEX_BLOCK, locator)) {
- return true;
- }
-
- int64_t count = 0;
- LogPrintf("Upgrading txindex database... [0%%]\n");
- uiInterface.ShowProgress(_("Upgrading txindex database").translated, 0, true);
- int report_done = 0;
- const size_t batch_size = 1 << 24; // 16 MiB
-
- CDBBatch batch_newdb(*this);
- CDBBatch batch_olddb(block_tree_db);
-
- std::pair<uint8_t, uint256> key;
- std::pair<uint8_t, uint256> begin_key{DB_TXINDEX, uint256()};
- std::pair<uint8_t, uint256> prev_key = begin_key;
-
- bool interrupted = false;
- std::unique_ptr<CDBIterator> cursor(block_tree_db.NewIterator());
- for (cursor->Seek(begin_key); cursor->Valid(); cursor->Next()) {
- if (ShutdownRequested()) {
- interrupted = true;
- break;
- }
-
- if (!cursor->GetKey(key)) {
- return error("%s: cannot get key from valid cursor", __func__);
- }
- if (key.first != DB_TXINDEX) {
- break;
- }
-
- // Log progress every 10%.
- if (++count % 256 == 0) {
- // Since txids are uniformly random and traversed in increasing order, the high 16 bits
- // of the hash can be used to estimate the current progress.
- const uint256& txid = key.second;
- uint32_t high_nibble =
- (static_cast<uint32_t>(*(txid.begin() + 0)) << 8) +
- (static_cast<uint32_t>(*(txid.begin() + 1)) << 0);
- int percentage_done = (int)(high_nibble * 100.0 / 65536.0 + 0.5);
-
- uiInterface.ShowProgress(_("Upgrading txindex database").translated, percentage_done, true);
- if (report_done < percentage_done/10) {
- LogPrintf("Upgrading txindex database... [%d%%]\n", percentage_done);
- report_done = percentage_done/10;
- }
- }
-
- CDiskTxPos value;
- if (!cursor->GetValue(value)) {
- return error("%s: cannot parse txindex record", __func__);
- }
- batch_newdb.Write(key, value);
- batch_olddb.Erase(key);
-
- if (batch_newdb.SizeEstimate() > batch_size || batch_olddb.SizeEstimate() > batch_size) {
- // NOTE: it's OK to delete the key pointed at by the current DB cursor while iterating
- // because LevelDB iterators are guaranteed to provide a consistent view of the
- // underlying data, like a lightweight snapshot.
- WriteTxIndexMigrationBatches(*this, block_tree_db,
- batch_newdb, batch_olddb,
- prev_key, key);
- prev_key = key;
- }
- }
-
- // If these final DB batches complete the migration, write the best block
- // hash marker to the new database and delete from the old one. This signals
- // that the former is fully caught up to that point in the blockchain and
- // that all txindex entries have been removed from the latter.
- if (!interrupted) {
- batch_olddb.Erase(DB_TXINDEX_BLOCK);
- batch_newdb.Write(DB_BEST_BLOCK, locator);
- }
-
- WriteTxIndexMigrationBatches(*this, block_tree_db,
- batch_newdb, batch_olddb,
- begin_key, key);
-
- if (interrupted) {
- LogPrintf("[CANCELLED].\n");
- return false;
- }
-
- uiInterface.ShowProgress("", 100, false);
-
- LogPrintf("[DONE].\n");
- return true;
-}
-
TxIndex::TxIndex(size_t n_cache_size, bool f_memory, bool f_wipe)
: m_db(std::make_unique<TxIndex::DB>(n_cache_size, f_memory, f_wipe))
{}
TxIndex::~TxIndex() {}
-bool TxIndex::Init()
-{
- LOCK(cs_main);
-
- // Attempt to migrate txindex from the old database to the new one. Even if
- // chain_tip is null, the node could be reindexing and we still want to
- // delete txindex records in the old database.
- if (!m_db->MigrateData(*m_chainstate->m_blockman.m_block_tree_db, m_chainstate->m_chain.GetLocator())) {
- return false;
- }
-
- return BaseIndex::Init();
-}
-
bool TxIndex::WriteBlock(const CBlock& block, const CBlockIndex* pindex)
{
// Exclude genesis block transaction because outputs are not spendable.
diff --git a/src/index/txindex.h b/src/index/txindex.h
index 8202c3c951..59375bc204 100644
--- a/src/index/txindex.h
+++ b/src/index/txindex.h
@@ -5,9 +5,7 @@
#ifndef BITCOIN_INDEX_TXINDEX_H
#define BITCOIN_INDEX_TXINDEX_H
-#include <chain.h>
#include <index/base.h>
-#include <txdb.h>
/**
* TxIndex is used to look up transactions included in the blockchain by hash.
@@ -23,9 +21,6 @@ private:
const std::unique_ptr<DB> m_db;
protected:
- /// Override base class init to migrate from old database.
- bool Init() override;
-
bool WriteBlock(const CBlock& block, const CBlockIndex* pindex) override;
BaseIndex::DB& GetDB() const override;
diff --git a/src/init.cpp b/src/init.cpp
index 636b089cda..ff36ec805c 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1567,6 +1567,10 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
// ********************************************************* Step 8: start indexers
if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
+ if (const auto error{CheckLegacyTxindex(*Assert(chainman.m_blockman.m_block_tree_db))}) {
+ return InitError(*error);
+ }
+
g_txindex = std::make_unique<TxIndex>(nTxIndexCache, false, fReindex);
if (!g_txindex->Start(chainman.ActiveChainstate())) {
return false;
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index eceede3c8f..9a97cad1f8 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -177,7 +177,7 @@ public:
std::string& err_string) = 0;
//! Calculate mempool ancestor and descendant counts for the given transaction.
- virtual void getTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) = 0;
+ virtual void getTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants, size_t* ancestorsize = nullptr, CAmount* ancestorfees = nullptr) = 0;
//! Get the node's package limits.
//! Currently only returns the ancestor and descendant count limits, but could be enhanced to
diff --git a/src/key.h b/src/key.h
index 92cbc1e899..9b94baa026 100644
--- a/src/key.h
+++ b/src/key.h
@@ -17,7 +17,6 @@
/**
- * secure_allocator is defined in allocators.h
* CPrivKey is a serialized private key, with all parameters included
* (SIZE bytes)
*/
diff --git a/src/miner.cpp b/src/miner.cpp
index 168ade5507..38c7b4b8cc 100644
--- a/src/miner.cpp
+++ b/src/miner.cpp
@@ -237,7 +237,7 @@ void BlockAssembler::AddToBlock(CTxMemPool::txiter iter)
bool fPrintPriority = gArgs.GetBoolArg("-printpriority", DEFAULT_PRINTPRIORITY);
if (fPrintPriority) {
- LogPrintf("fee %s txid %s\n",
+ LogPrintf("fee rate %s txid %s\n",
CFeeRate(iter->GetModifiedFee(), iter->GetTxSize()).ToString(),
iter->GetTx().GetHash().ToString());
}
diff --git a/src/net.cpp b/src/net.cpp
index c72cd75ba7..cc8f4c4316 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -192,8 +192,8 @@ CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
static int GetnScore(const CService& addr)
{
LOCK(cs_mapLocalHost);
- if (mapLocalHost.count(addr) == 0) return 0;
- return mapLocalHost[addr].nScore;
+ const auto it = mapLocalHost.find(addr);
+ return (it != mapLocalHost.end()) ? it->second.nScore : 0;
}
// Is our peer's addrLocal potentially useful as an external IP source?
@@ -245,10 +245,10 @@ bool AddLocal(const CService& addr, int nScore)
{
LOCK(cs_mapLocalHost);
- bool fAlready = mapLocalHost.count(addr) > 0;
- LocalServiceInfo &info = mapLocalHost[addr];
- if (!fAlready || nScore >= info.nScore) {
- info.nScore = nScore + (fAlready ? 1 : 0);
+ const auto [it, is_newly_added] = mapLocalHost.emplace(addr, LocalServiceInfo());
+ LocalServiceInfo &info = it->second;
+ if (is_newly_added || nScore >= info.nScore) {
+ info.nScore = nScore + (is_newly_added ? 0 : 1);
info.nPort = addr.GetPort();
}
}
@@ -290,12 +290,10 @@ bool IsReachable(const CNetAddr &addr)
/** vote for a local address */
bool SeenLocal(const CService& addr)
{
- {
- LOCK(cs_mapLocalHost);
- if (mapLocalHost.count(addr) == 0)
- return false;
- mapLocalHost[addr].nScore++;
- }
+ LOCK(cs_mapLocalHost);
+ const auto it = mapLocalHost.find(addr);
+ if (it == mapLocalHost.end()) return false;
+ ++it->second.nScore;
return true;
}
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 3ad34e83ba..80655c61e7 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -2909,13 +2909,13 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
return;
}
- // We won't accept tx inv's if we're in blocks-only mode, or this is a
+ // Reject tx INVs when the -blocksonly setting is enabled, or this is a
// block-relay-only peer
- bool fBlocksOnly = m_ignore_incoming_txs || (pfrom.m_tx_relay == nullptr);
+ bool reject_tx_invs{m_ignore_incoming_txs || (pfrom.m_tx_relay == nullptr)};
// Allow peers with relay permission to send data other than blocks in blocks only mode
if (pfrom.HasPermission(NetPermissionFlags::Relay)) {
- fBlocksOnly = false;
+ reject_tx_invs = false;
}
LOCK(cs_main);
@@ -2954,7 +2954,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LogPrint(BCLog::NET, "got inv: %s %s peer=%d\n", inv.ToString(), fAlreadyHave ? "have" : "new", pfrom.GetId());
pfrom.AddKnownTx(inv.hash);
- if (fBlocksOnly) {
+ if (reject_tx_invs) {
LogPrint(BCLog::NET, "transaction (%s) inv sent in violation of protocol, disconnecting peer=%d\n", inv.hash.ToString(), pfrom.GetId());
pfrom.fDisconnect = true;
return;
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 90f7ba191d..5ddcf95c84 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -394,18 +394,14 @@ bool ReadBlockFromDisk(CBlock& block, const FlatFilePos& pos, const Consensus::P
bool ReadBlockFromDisk(CBlock& block, const CBlockIndex* pindex, const Consensus::Params& consensusParams)
{
- FlatFilePos blockPos;
- {
- LOCK(cs_main);
- blockPos = pindex->GetBlockPos();
- }
+ const FlatFilePos block_pos{WITH_LOCK(cs_main, return pindex->GetBlockPos())};
- if (!ReadBlockFromDisk(block, blockPos, consensusParams)) {
+ if (!ReadBlockFromDisk(block, block_pos, consensusParams)) {
return false;
}
if (block.GetHash() != pindex->GetBlockHash()) {
return error("ReadBlockFromDisk(CBlock&, CBlockIndex*): GetHash() doesn't match index for %s at %s",
- pindex->ToString(), pindex->GetBlockPos().ToString());
+ pindex->ToString(), block_pos.ToString());
}
return true;
}
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index c62d7e5d0b..d7860f0115 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -575,11 +575,11 @@ public:
// that Chain clients do not need to know about.
return TransactionError::OK == err;
}
- void getTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) override
+ void getTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants, size_t* ancestorsize, CAmount* ancestorfees) override
{
ancestors = descendants = 0;
if (!m_node.mempool) return;
- m_node.mempool->GetTransactionAncestry(txid, ancestors, descendants);
+ m_node.mempool->GetTransactionAncestry(txid, ancestors, descendants, ancestorsize, ancestorfees);
}
void getPackageLimits(unsigned int& limit_ancestor_count, unsigned int& limit_descendant_count) override
{
diff --git a/src/rpc/client.cpp b/src/rpc/client.cpp
index 4357ab2bb3..d6943e066a 100644
--- a/src/rpc/client.cpp
+++ b/src/rpc/client.cpp
@@ -192,6 +192,7 @@ static const CRPCConvertParam vRPCConvertParams[] =
{ "unloadwallet", 1, "load_on_startup"},
{ "getnodeaddresses", 0, "count"},
{ "addpeeraddress", 1, "port"},
+ { "addpeeraddress", 2, "tried"},
{ "stop", 0, "wait" },
};
// clang-format on
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 0f554ec5e7..227eec722f 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -921,6 +921,7 @@ static RPCHelpMan addpeeraddress()
{
{"address", RPCArg::Type::STR, RPCArg::Optional::NO, "The IP address of the peer"},
{"port", RPCArg::Type::NUM, RPCArg::Optional::NO, "The port of the peer"},
+ {"tried", RPCArg::Type::BOOL, RPCArg::Default{false}, "If true, attempt to add the peer to the tried addresses table"},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -929,8 +930,8 @@ static RPCHelpMan addpeeraddress()
},
},
RPCExamples{
- HelpExampleCli("addpeeraddress", "\"1.2.3.4\" 8333")
- + HelpExampleRpc("addpeeraddress", "\"1.2.3.4\", 8333")
+ HelpExampleCli("addpeeraddress", "\"1.2.3.4\" 8333 true")
+ + HelpExampleRpc("addpeeraddress", "\"1.2.3.4\", 8333, true")
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
@@ -941,6 +942,7 @@ static RPCHelpMan addpeeraddress()
const std::string& addr_string{request.params[0].get_str()};
const uint16_t port{static_cast<uint16_t>(request.params[1].get_int())};
+ const bool tried{request.params[2].isTrue()};
UniValue obj(UniValue::VOBJ);
CNetAddr net_addr;
@@ -951,7 +953,13 @@ static RPCHelpMan addpeeraddress()
address.nTime = GetAdjustedTime();
// The source address is set equal to the address. This is equivalent to the peer
// announcing itself.
- if (node.addrman->Add({address}, address)) success = true;
+ if (node.addrman->Add({address}, address)) {
+ success = true;
+ if (tried) {
+ // Attempt to move the address to the tried addresses table.
+ node.addrman->Good(address);
+ }
+ }
}
obj.pushKV("success", success);
diff --git a/src/test/README.md b/src/test/README.md
index 57cda26d7c..d03411c3ed 100644
--- a/src/test/README.md
+++ b/src/test/README.md
@@ -74,3 +74,29 @@ start debugging, just like you would with any other program:
```bash
gdb src/test/test_bitcoin
```
+
+#### Segmentation faults
+
+If you hit a segmentation fault during a test run, you can diagnose where the fault
+is happening by running `gdb ./src/test/test_bitcoin` and then using the `bt` command
+within gdb.
+
+Another tool that can be used to resolve segmentation faults is
+[valgrind](https://valgrind.org/).
+
+If for whatever reason you want to produce a core dump file for this fault, you can do
+that as well. By default, the boost test runner will intercept system errors and not
+produce a core file. To bypass this, add `--catch_system_errors=no` to the
+`test_bitcoin` arguments and ensure that your ulimits are set properly (e.g. `ulimit -c
+unlimited`).
+
+Running the tests and hitting a segmentation fault should now produce a file called `core`
+(on Linux platforms, the file name will likely depend on the contents of
+`/proc/sys/kernel/core_pattern`).
+
+You can then explore the core dump using
+``` bash
+gdb src/test/test_bitcoin core
+
+(gbd) bt # produce a backtrace for where a segfault occurred
+```
diff --git a/src/test/data/README.md b/src/test/data/README.md
index 2463daa42a..a05d9c668b 100644
--- a/src/test/data/README.md
+++ b/src/test/data/README.md
@@ -8,5 +8,5 @@ License
The data files in this directory are distributed under the MIT software
license, see the accompanying file COPYING or
-http://www.opensource.org/licenses/mit-license.php.
+https://www.opensource.org/licenses/mit-license.php.
diff --git a/src/test/fuzz/addrman.cpp b/src/test/fuzz/addrman.cpp
index fdbfb3b93b..95c5a99c1b 100644
--- a/src/test/fuzz/addrman.cpp
+++ b/src/test/fuzz/addrman.cpp
@@ -85,7 +85,7 @@ public:
// 0, 1, 2, 3 corresponding to 0%, 100%, 50%, 33%
const size_t n = m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(0, 3);
- const size_t num_sources = m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(10, 50);
+ const size_t num_sources = m_fuzzed_data_provider.ConsumeIntegralInRange<size_t>(1, 50);
CNetAddr prev_source;
// Use insecure_rand inside the loops instead of m_fuzzed_data_provider because when
// the latter is exhausted it just returns 0.
@@ -96,31 +96,12 @@ public:
for (size_t j = 0; j < num_addresses; ++j) {
const auto addr = CAddress{CService{RandAddr(), 8333}, NODE_NETWORK};
const auto time_penalty = insecure_rand.randrange(100000001);
-#if 1
- // 2.83 sec to fill.
- if (n > 0 && mapInfo.size() % n == 0 && mapAddr.find(addr) == mapAddr.end()) {
- // Add to the "tried" table (if the bucket slot is free).
- const CAddrInfo dummy{addr, source};
- const int bucket = dummy.GetTriedBucket(nKey, m_asmap);
- const int bucket_pos = dummy.GetBucketPosition(nKey, false, bucket);
- if (vvTried[bucket][bucket_pos] == -1) {
- int id;
- CAddrInfo* addr_info = Create(addr, source, &id);
- vvTried[bucket][bucket_pos] = id;
- addr_info->fInTried = true;
- ++nTried;
- }
- } else {
- // Add to the "new" table.
- Add_(addr, source, time_penalty);
- }
-#else
- // 261.91 sec to fill.
Add_(addr, source, time_penalty);
+
if (n > 0 && mapInfo.size() % n == 0) {
Good_(addr, false, GetTime());
}
-#endif
+
// Add 10% of the addresses from more than one source.
if (insecure_rand.randrange(10) == 0 && prev_source.IsValid()) {
Add_(addr, prev_source, time_penalty);
diff --git a/src/txdb.cpp b/src/txdb.cpp
index 4b76bee5ab..cfa864668a 100644
--- a/src/txdb.cpp
+++ b/src/txdb.cpp
@@ -5,6 +5,7 @@
#include <txdb.h>
+#include <chain.h>
#include <node/ui_interface.h>
#include <pow.h>
#include <random.h>
@@ -27,6 +28,28 @@ static constexpr uint8_t DB_FLAG{'F'};
static constexpr uint8_t DB_REINDEX_FLAG{'R'};
static constexpr uint8_t DB_LAST_BLOCK{'l'};
+// Keys used in previous version that might still be found in the DB:
+static constexpr uint8_t DB_TXINDEX_BLOCK{'T'};
+// uint8_t DB_TXINDEX{'t'}
+
+std::optional<bilingual_str> CheckLegacyTxindex(CBlockTreeDB& block_tree_db)
+{
+ CBlockLocator ignored{};
+ if (block_tree_db.Read(DB_TXINDEX_BLOCK, ignored)) {
+ return _("The -txindex upgrade started by a previous version can not be completed. Restart with the previous version or run a full -reindex.");
+ }
+ bool txindex_legacy_flag{false};
+ block_tree_db.ReadFlag("txindex", txindex_legacy_flag);
+ if (txindex_legacy_flag) {
+ // Disable legacy txindex and warn once about occupied disk space
+ if (!block_tree_db.WriteFlag("txindex", false)) {
+ return Untranslated("Failed to write block index db flag 'txindex'='0'");
+ }
+ return _("The block index db contains a legacy 'txindex'. To clear the occupied disk space, run a full -reindex, otherwise ignore this error. This error message will not be displayed again.");
+ }
+ return std::nullopt;
+}
+
namespace {
struct CoinEntry {
diff --git a/src/txdb.h b/src/txdb.h
index 845d80788f..1bdce71126 100644
--- a/src/txdb.h
+++ b/src/txdb.h
@@ -8,17 +8,20 @@
#include <coins.h>
#include <dbwrapper.h>
-#include <chain.h>
-#include <primitives/block.h>
#include <memory>
+#include <optional>
#include <string>
#include <utility>
#include <vector>
+class CBlockFileInfo;
class CBlockIndex;
-class CCoinsViewDBCursor;
class uint256;
+namespace Consensus {
+struct Params;
+};
+struct bilingual_str;
//! -dbcache default (MiB)
static const int64_t nDefaultDbCache = 450;
@@ -86,4 +89,6 @@ public:
bool LoadBlockIndexGuts(const Consensus::Params& consensusParams, std::function<CBlockIndex*(const uint256&)> insertBlockIndex);
};
+std::optional<bilingual_str> CheckLegacyTxindex(CBlockTreeDB& block_tree_db);
+
#endif // BITCOIN_TXDB_H
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index d5a888ac67..3cf62f3c0e 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -924,7 +924,7 @@ void CTxMemPool::PrioritiseTransaction(const uint256& hash, const CAmount& nFeeD
++nTransactionsUpdated;
}
}
- LogPrintf("PrioritiseTransaction: %s feerate += %s\n", hash.ToString(), FormatMoney(nFeeDelta));
+ LogPrintf("PrioritiseTransaction: %s fee += %s\n", hash.ToString(), FormatMoney(nFeeDelta));
}
void CTxMemPool::ApplyDelta(const uint256& hash, CAmount &nFeeDelta) const
@@ -1174,12 +1174,14 @@ uint64_t CTxMemPool::CalculateDescendantMaximum(txiter entry) const {
return maximum;
}
-void CTxMemPool::GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const {
+void CTxMemPool::GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants, size_t* const ancestorsize, CAmount* const ancestorfees) const {
LOCK(cs);
auto it = mapTx.find(txid);
ancestors = descendants = 0;
if (it != mapTx.end()) {
ancestors = it->GetCountWithAncestors();
+ if (ancestorsize) *ancestorsize = it->GetSizeWithAncestors();
+ if (ancestorfees) *ancestorfees = it->GetModFeesWithAncestors();
descendants = CalculateDescendantMaximum(it);
}
}
diff --git a/src/txmempool.h b/src/txmempool.h
index 0a84a6e6b1..d1308aeeed 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -748,8 +748,10 @@ public:
/**
* Calculate the ancestor and descendant count for the given transaction.
* The counts include the transaction itself.
+ * When ancestors is non-zero (ie, the transaction itself is in the mempool),
+ * ancestorsize and ancestorfees will also be set to the appropriate values.
*/
- void GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants) const;
+ void GetTransactionAncestry(const uint256& txid, size_t& ancestors, size_t& descendants, size_t* ancestorsize = nullptr, CAmount* ancestorfees = nullptr) const;
/** @returns true if the mempool is fully loaded */
bool IsLoaded() const;
diff --git a/src/validation.cpp b/src/validation.cpp
index 8696f1af85..cc87f98913 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -192,7 +192,7 @@ bool CheckFinalTx(const CBlockIndex* active_chain_tip, const CTransaction &tx, i
// CheckFinalTx() uses active_chain_tip.Height()+1 to evaluate
// nLockTime because when IsFinalTx() is called within
- // CBlock::AcceptBlock(), the height of the block *being*
+ // AcceptBlock(), the height of the block *being*
// evaluated is what is used. Thus if we want to know if a
// transaction can be part of the *next* block, we need to call
// IsFinalTx() with one more than active_chain_tip.Height().
diff --git a/src/validation.h b/src/validation.h
index d4fcac1d48..078b988052 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -11,7 +11,9 @@
#endif
#include <amount.h>
+#include <arith_uint256.h>
#include <attributes.h>
+#include <chain.h>
#include <coins.h>
#include <consensus/validation.h>
#include <crypto/common.h> // for ReadLE64
@@ -21,10 +23,11 @@
#include <policy/packages.h>
#include <protocol.h> // For CMessageHeader::MessageStartChars
#include <script/script_error.h>
+#include <serialize.h>
#include <sync.h>
-#include <txmempool.h> // For CTxMemPool::cs
#include <txdb.h>
-#include <serialize.h>
+#include <txmempool.h> // For CTxMemPool::cs
+#include <uint256.h>
#include <util/check.h>
#include <util/hasher.h>
#include <util/translation.h>
@@ -42,7 +45,6 @@
class CChainState;
class BlockValidationState;
-class CBlockIndex;
class CBlockTreeDB;
class CBlockUndo;
class CChainParams;
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index e922f4ede9..7d194ae262 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -2960,6 +2960,9 @@ static RPCHelpMan listunspent()
{RPCResult::Type::STR, "scriptPubKey", "the script key"},
{RPCResult::Type::STR_AMOUNT, "amount", "the transaction output amount in " + CURRENCY_UNIT},
{RPCResult::Type::NUM, "confirmations", "The number of confirmations"},
+ {RPCResult::Type::NUM, "ancestorcount", /* optional */ true, "The number of in-mempool ancestor transactions, including this one (if transaction is in the mempool)"},
+ {RPCResult::Type::NUM, "ancestorsize", /* optional */ true, "The virtual transaction size of in-mempool ancestors, including this one (if transaction is in the mempool)"},
+ {RPCResult::Type::STR_AMOUNT, "ancestorfees", /* optional */ true, "The total fees of in-mempool ancestors (including this one) with fee deltas used for mining priority in " + CURRENCY_ATOM + " (if transaction is in the mempool)"},
{RPCResult::Type::STR_HEX, "redeemScript", "The redeemScript if scriptPubKey is P2SH"},
{RPCResult::Type::STR, "witnessScript", "witnessScript if the scriptPubKey is P2WSH or P2SH-P2WSH"},
{RPCResult::Type::BOOL, "spendable", "Whether we have the private keys to spend this output"},
@@ -3126,6 +3129,16 @@ static RPCHelpMan listunspent()
entry.pushKV("scriptPubKey", HexStr(scriptPubKey));
entry.pushKV("amount", ValueFromAmount(out.tx->tx->vout[out.i].nValue));
entry.pushKV("confirmations", out.nDepth);
+ if (!out.nDepth) {
+ size_t ancestor_count, descendant_count, ancestor_size;
+ CAmount ancestor_fees;
+ pwallet->chain().getTransactionAncestry(out.tx->GetHash(), ancestor_count, descendant_count, &ancestor_size, &ancestor_fees);
+ if (ancestor_count) {
+ entry.pushKV("ancestorcount", uint64_t(ancestor_count));
+ entry.pushKV("ancestorsize", uint64_t(ancestor_size));
+ entry.pushKV("ancestorfees", uint64_t(ancestor_fees));
+ }
+ }
entry.pushKV("spendable", out.fSpendable);
entry.pushKV("solvable", out.fSolvable);
if (out.fSolvable) {
diff --git a/src/wallet/transaction.h b/src/wallet/transaction.h
index 094221adf2..0cd91b9ebe 100644
--- a/src/wallet/transaction.h
+++ b/src/wallet/transaction.h
@@ -19,25 +19,6 @@
typedef std::map<std::string, std::string> mapValue_t;
-
-static inline void ReadOrderPos(int64_t& nOrderPos, mapValue_t& mapValue)
-{
- if (!mapValue.count("n"))
- {
- nOrderPos = -1; // TODO: calculate elsewhere
- return;
- }
- nOrderPos = atoi64(mapValue["n"]);
-}
-
-
-static inline void WriteOrderPos(const int64_t& nOrderPos, mapValue_t& mapValue)
-{
- if (nOrderPos == -1)
- return;
- mapValue["n"] = ToString(nOrderPos);
-}
-
/** Legacy class used for deserializing vtxPrev for backwards compatibility.
* vtxPrev was removed in commit 93a18a3650292afbb441a47d1fa1b94aeb0164e3,
* but old wallet.dat files may still contain vtxPrev vectors of CMerkleTxs.
@@ -192,7 +173,9 @@ public:
mapValue_t mapValueCopy = mapValue;
mapValueCopy["fromaccount"] = "";
- WriteOrderPos(nOrderPos, mapValueCopy);
+ if (nOrderPos != -1) {
+ mapValueCopy["n"] = ToString(nOrderPos);
+ }
if (nTimeSmart) {
mapValueCopy["timesmart"] = strprintf("%u", nTimeSmart);
}
@@ -232,8 +215,10 @@ public:
setConfirmed();
}
- ReadOrderPos(nOrderPos, mapValue);
- nTimeSmart = mapValue.count("timesmart") ? (unsigned int)atoi64(mapValue["timesmart"]) : 0;
+ const auto it_op = mapValue.find("n");
+ nOrderPos = (it_op != mapValue.end()) ? atoi64(it_op->second) : -1;
+ const auto it_ts = mapValue.find("timesmart");
+ nTimeSmart = (it_ts != mapValue.end()) ? static_cast<unsigned int>(atoi64(it_ts->second)) : 0;
mapValue.erase("fromaccount");
mapValue.erase("spent");
diff --git a/test/functional/README.md b/test/functional/README.md
index d830ba0334..926810cf03 100644
--- a/test/functional/README.md
+++ b/test/functional/README.md
@@ -188,5 +188,5 @@ perf report -i /path/to/datadir/send-big-msgs.perf.data.xxxx --stdio | c++filt |
#### See also:
- [Installing perf](https://askubuntu.com/q/50145)
-- [Perf examples](http://www.brendangregg.com/perf.html)
+- [Perf examples](https://www.brendangregg.com/perf.html)
- [Hotspot](https://github.com/KDAB/hotspot): a GUI for perf output analysis
diff --git a/test/functional/feature_addrman.py b/test/functional/feature_addrman.py
index ee421c89b5..42afd74ac9 100755
--- a/test/functional/feature_addrman.py
+++ b/test/functional/feature_addrman.py
@@ -14,22 +14,30 @@ from test_framework.test_node import ErrorMatch
from test_framework.util import assert_equal
-def serialize_addrman(*, format=1, lowest_compatible=3):
+def serialize_addrman(
+ *,
+ format=1,
+ lowest_compatible=3,
+ net_magic="regtest",
+ len_new=None,
+ len_tried=None,
+ mock_checksum=None,
+):
new = []
tried = []
INCOMPATIBILITY_BASE = 32
- r = MAGIC_BYTES["regtest"]
+ r = MAGIC_BYTES[net_magic]
r += struct.pack("B", format)
r += struct.pack("B", INCOMPATIBILITY_BASE + lowest_compatible)
r += ser_uint256(1)
- r += struct.pack("i", len(new))
- r += struct.pack("i", len(tried))
+ r += struct.pack("i", len_new or len(new))
+ r += struct.pack("i", len_tried or len(tried))
ADDRMAN_NEW_BUCKET_COUNT = 1 << 10
r += struct.pack("i", ADDRMAN_NEW_BUCKET_COUNT ^ (1 << 30))
for _ in range(ADDRMAN_NEW_BUCKET_COUNT):
r += struct.pack("i", 0)
checksum = hash256(r)
- r += checksum
+ r += mock_checksum or checksum
return r
@@ -70,7 +78,7 @@ class AddrmanTest(BitcoinTestFramework):
match=ErrorMatch.FULL_REGEX,
)
- self.log.info("Check that corrupt addrman cannot be read")
+ self.log.info("Check that corrupt addrman cannot be read (EOF)")
self.stop_node(0)
with open(peers_dat, "wb") as f:
f.write(serialize_addrman()[:-1])
@@ -79,6 +87,38 @@ class AddrmanTest(BitcoinTestFramework):
match=ErrorMatch.FULL_REGEX,
)
+ self.log.info("Check that corrupt addrman cannot be read (magic)")
+ self.stop_node(0)
+ write_addrman(peers_dat, net_magic="signet")
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg=init_error("Invalid network magic number"),
+ match=ErrorMatch.FULL_REGEX,
+ )
+
+ self.log.info("Check that corrupt addrman cannot be read (checksum)")
+ self.stop_node(0)
+ write_addrman(peers_dat, mock_checksum=b"ab" * 32)
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg=init_error("Checksum mismatch, data corrupted"),
+ match=ErrorMatch.FULL_REGEX,
+ )
+
+ self.log.info("Check that corrupt addrman cannot be read (len_tried)")
+ self.stop_node(0)
+ write_addrman(peers_dat, len_tried=-1)
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg=init_error("Corrupt CAddrMan serialization: nTried=-1, should be in \\[0, 16384\\]:.*"),
+ match=ErrorMatch.FULL_REGEX,
+ )
+
+ self.log.info("Check that corrupt addrman cannot be read (len_new)")
+ self.stop_node(0)
+ write_addrman(peers_dat, len_new=-1)
+ self.nodes[0].assert_start_raises_init_error(
+ expected_msg=init_error("Corrupt CAddrMan serialization: nNew=-1, should be in \\[0, 65536\\]:.*"),
+ match=ErrorMatch.FULL_REGEX,
+ )
+
self.log.info("Check that missing addrman is recreated")
self.stop_node(0)
os.remove(peers_dat)
diff --git a/test/functional/feature_asmap.py b/test/functional/feature_asmap.py
index 704dd6126b..2dc1e3a7cb 100755
--- a/test/functional/feature_asmap.py
+++ b/test/functional/feature_asmap.py
@@ -14,9 +14,11 @@ Verify node behaviour and debug log when launching bitcoind in these cases:
4. `bitcoind -asmap/-asmap=` with no file specified, using the default asmap
-5. `bitcoind -asmap` with no file specified and a missing default asmap file
+5. `bitcoind -asmap` restart with an addrman containing new and tried entries
-6. `bitcoind -asmap` with an empty (unparsable) default asmap file
+6. `bitcoind -asmap` with no file specified and a missing default asmap file
+
+7. `bitcoind -asmap` with an empty (unparsable) default asmap file
The tests are order-independent.
@@ -37,6 +39,12 @@ def expected_messages(filename):
class AsmapTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
+ self.extra_args = [["-checkaddrman=1"]] # Do addrman checks on all operations.
+
+ def fill_addrman(self, node_id):
+ """Add 2 tried addresses to the addrman, followed by 2 new addresses."""
+ for addr, tried in [[0, True], [1, True], [2, False], [3, False]]:
+ self.nodes[node_id].addpeeraddress(address=f"101.{addr}.0.0", tried=tried, port=8333)
def test_without_asmap_arg(self):
self.log.info('Test bitcoind with no -asmap arg passed')
@@ -72,6 +80,22 @@ class AsmapTest(BitcoinTestFramework):
self.start_node(0, [arg])
os.remove(self.default_asmap)
+ def test_asmap_interaction_with_addrman_containing_entries(self):
+ self.log.info("Test bitcoind -asmap restart with addrman containing new and tried entries")
+ self.stop_node(0)
+ shutil.copyfile(self.asmap_raw, self.default_asmap)
+ self.start_node(0, ["-asmap", "-checkaddrman=1"])
+ self.fill_addrman(node_id=0)
+ self.restart_node(0, ["-asmap", "-checkaddrman=1"])
+ with self.node.assert_debug_log(
+ expected_msgs=[
+ "Addrman checks started: new 2, tried 2, total 4",
+ "Addrman checks completed successfully",
+ ]
+ ):
+ self.node.getnodeaddresses() # getnodeaddresses re-runs the addrman checks
+ os.remove(self.default_asmap)
+
def test_default_asmap_with_missing_file(self):
self.log.info('Test bitcoind -asmap with missing default map file')
self.stop_node(0)
@@ -97,6 +121,7 @@ class AsmapTest(BitcoinTestFramework):
self.test_asmap_with_absolute_path()
self.test_asmap_with_relative_path()
self.test_default_asmap()
+ self.test_asmap_interaction_with_addrman_containing_entries()
self.test_default_asmap_with_missing_file()
self.test_empty_asmap()
diff --git a/test/functional/feature_bip68_sequence.py b/test/functional/feature_bip68_sequence.py
index 85522164d2..09cda8444a 100755
--- a/test/functional/feature_bip68_sequence.py
+++ b/test/functional/feature_bip68_sequence.py
@@ -41,10 +41,7 @@ class BIP68Test(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.extra_args = [
- [
- "-acceptnonstdtxn=1",
- "-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
- ],
+ ["-acceptnonstdtxn=1"],
["-acceptnonstdtxn=0"],
]
diff --git a/test/functional/feature_coinstatsindex.py b/test/functional/feature_coinstatsindex.py
index 146e776b07..c592d7bd69 100755
--- a/test/functional/feature_coinstatsindex.py
+++ b/test/functional/feature_coinstatsindex.py
@@ -164,7 +164,7 @@ class CoinStatsIndexTest(BitcoinTestFramework):
# Generate and send another tx with an OP_RETURN output (which is unspendable)
tx2 = CTransaction()
tx2.vin.append(CTxIn(COutPoint(int(tx1_txid, 16), n), b''))
- tx2.vout.append(CTxOut(int(20.99 * COIN), CScript([OP_RETURN] + [OP_FALSE]*30)))
+ tx2.vout.append(CTxOut(int(Decimal('20.99') * COIN), CScript([OP_RETURN] + [OP_FALSE]*30)))
tx2_hex = self.nodes[0].signrawtransactionwithwallet(tx2.serialize().hex())['hex']
self.nodes[0].sendrawtransaction(tx2_hex)
@@ -175,16 +175,16 @@ class CoinStatsIndexTest(BitcoinTestFramework):
for hash_option in index_hash_options:
# Check all amounts were registered correctly
res6 = index_node.gettxoutsetinfo(hash_option, 108)
- assert_equal(res6['total_unspendable_amount'], Decimal('70.98999999'))
+ assert_equal(res6['total_unspendable_amount'], Decimal('70.99000000'))
assert_equal(res6['block_info'], {
- 'unspendable': Decimal('20.98999999'),
+ 'unspendable': Decimal('20.99000000'),
'prevout_spent': 111,
'new_outputs_ex_coinbase': Decimal('89.99993620'),
- 'coinbase': Decimal('50.01006381'),
+ 'coinbase': Decimal('50.01006380'),
'unspendables': {
'genesis_block': 0,
'bip30': 0,
- 'scripts': Decimal('20.98999999'),
+ 'scripts': Decimal('20.99000000'),
'unclaimed_rewards': 0
}
})
@@ -206,7 +206,7 @@ class CoinStatsIndexTest(BitcoinTestFramework):
for hash_option in index_hash_options:
res7 = index_node.gettxoutsetinfo(hash_option, 109)
- assert_equal(res7['total_unspendable_amount'], Decimal('80.98999999'))
+ assert_equal(res7['total_unspendable_amount'], Decimal('80.99000000'))
assert_equal(res7['block_info'], {
'unspendable': 10,
'prevout_spent': 0,
diff --git a/test/functional/feature_csv_activation.py b/test/functional/feature_csv_activation.py
index 2bfe22267b..d2b3fe45d1 100755
--- a/test/functional/feature_csv_activation.py
+++ b/test/functional/feature_csv_activation.py
@@ -94,7 +94,6 @@ class BIP68_112_113Test(BitcoinTestFramework):
self.num_nodes = 1
self.setup_clean_chain = True
self.extra_args = [[
- '-peertimeout=999999', # bump because mocktime might cause a disconnect otherwise
'-whitelist=noban@127.0.0.1',
'-par=1', # Use only one script thread to get the exact reject reason for testing
]]
diff --git a/test/functional/feature_maxuploadtarget.py b/test/functional/feature_maxuploadtarget.py
index 12bcc2ffc5..ac4d40638e 100755
--- a/test/functional/feature_maxuploadtarget.py
+++ b/test/functional/feature_maxuploadtarget.py
@@ -38,7 +38,6 @@ class MaxUploadTest(BitcoinTestFramework):
self.extra_args = [[
"-maxuploadtarget=800",
"-acceptnonstdtxn=1",
- "-peertimeout=9999", # bump because mocktime might cause a disconnect otherwise
]]
self.supports_cli = False
diff --git a/test/functional/feature_rbf.py b/test/functional/feature_rbf.py
index cb7556feb4..b941061963 100755
--- a/test/functional/feature_rbf.py
+++ b/test/functional/feature_rbf.py
@@ -46,7 +46,7 @@ class ReplaceByFeeTest(BitcoinTestFramework):
# the pre-mined test framework chain contains coinbase outputs to the
# MiniWallet's default address ADDRESS_BCRT1_P2WSH_OP_TRUE in blocks
# 76-100 (see method BitcoinTestFramework._initialize_chain())
- self.wallet.scan_blocks(start=76, num=2)
+ self.wallet.rescan_utxos()
self.log.info("Running test simple doublespend...")
self.test_simple_doublespend()
diff --git a/test/functional/interface_bitcoin_cli.py b/test/functional/interface_bitcoin_cli.py
index 89503adda3..c28186cde7 100755
--- a/test/functional/interface_bitcoin_cli.py
+++ b/test/functional/interface_bitcoin_cli.py
@@ -57,7 +57,7 @@ def cli_get_info_string_to_dict(cli_get_info_string):
if key == 'Wallet' and value == '""':
# Set default wallet("") to empty string
value = ''
- if key == "Proxy" and value == "N/A":
+ if key == "Proxies" and value == "n/a":
# Set N/A to empty string to represent no proxy
value = ''
cli_get_info[key.strip()] = value.strip()
@@ -127,10 +127,17 @@ class TestBitcoinCli(BitcoinTestFramework):
assert_equal(int(cli_get_info['Time offset (s)']), network_info['timeoffset'])
expected_network_info = f"in {network_info['connections_in']}, out {network_info['connections_out']}, total {network_info['connections']}"
assert_equal(cli_get_info["Network"], expected_network_info)
- assert_equal(cli_get_info['Proxy'], network_info['networks'][0]['proxy'])
+ assert_equal(cli_get_info['Proxies'], network_info['networks'][0]['proxy'])
assert_equal(Decimal(cli_get_info['Difficulty']), blockchain_info['difficulty'])
assert_equal(cli_get_info['Chain'], blockchain_info['chain'])
+ self.log.info("Test -getinfo and bitcoin-cli return all proxies")
+ self.restart_node(0, extra_args=["-proxy=127.0.0.1:9050", "-i2psam=127.0.0.1:7656"])
+ network_info = self.nodes[0].getnetworkinfo()
+ cli_get_info_string = self.nodes[0].cli('-getinfo').send_cli()
+ cli_get_info = cli_get_info_string_to_dict(cli_get_info_string)
+ assert_equal(cli_get_info["Proxies"], "127.0.0.1:9050 (ipv4, ipv6, onion), 127.0.0.1:7656 (i2p)")
+
if self.is_wallet_compiled():
self.log.info("Test -getinfo and bitcoin-cli getwalletinfo return expected wallet info")
assert_equal(Decimal(cli_get_info['Balance']), BALANCE)
diff --git a/test/functional/mempool_packages.py b/test/functional/mempool_packages.py
index b9344ad6da..c042961937 100755
--- a/test/functional/mempool_packages.py
+++ b/test/functional/mempool_packages.py
@@ -51,12 +51,17 @@ class MempoolPackagesTest(BitcoinTestFramework):
txid = utxo[0]['txid']
vout = utxo[0]['vout']
value = utxo[0]['amount']
+ assert 'ancestorcount' not in utxo[0]
+ assert 'ancestorsize' not in utxo[0]
+ assert 'ancestorfees' not in utxo[0]
fee = Decimal("0.0001")
# MAX_ANCESTORS transactions off a confirmed tx should be fine
chain = []
witness_chain = []
- for _ in range(MAX_ANCESTORS):
+ ancestor_vsize = 0
+ ancestor_fees = Decimal(0)
+ for i in range(MAX_ANCESTORS):
(txid, sent_value) = chain_transaction(self.nodes[0], [txid], [0], value, fee, 1)
value = sent_value
chain.append(txid)
@@ -65,6 +70,15 @@ class MempoolPackagesTest(BitcoinTestFramework):
witnesstx = self.nodes[0].decoderawtransaction(fulltx, True)
witness_chain.append(witnesstx['hash'])
+ # Check that listunspent ancestor{count, size, fees} yield the correct results
+ wallet_unspent = self.nodes[0].listunspent(minconf=0)
+ this_unspent = next(utxo_info for utxo_info in wallet_unspent if utxo_info['txid'] == txid)
+ assert_equal(this_unspent['ancestorcount'], i + 1)
+ ancestor_vsize += self.nodes[0].getrawtransaction(txid=txid, verbose=True)['vsize']
+ assert_equal(this_unspent['ancestorsize'], ancestor_vsize)
+ ancestor_fees -= self.nodes[0].gettransaction(txid=txid)['fee']
+ assert_equal(this_unspent['ancestorfees'], ancestor_fees * COIN)
+
# Wait until mempool transactions have passed initial broadcast (sent inv and received getdata)
# Otherwise, getrawmempool may be inconsistent with getmempoolentry if unbroadcast changes in between
peer_inv_store.wait_for_broadcast(witness_chain)
@@ -77,9 +91,9 @@ class MempoolPackagesTest(BitcoinTestFramework):
descendant_fees = 0
descendant_vsize = 0
- ancestor_vsize = sum([mempool[tx]['vsize'] for tx in mempool])
+ assert_equal(ancestor_vsize, sum([mempool[tx]['vsize'] for tx in mempool]))
ancestor_count = MAX_ANCESTORS
- ancestor_fees = sum([mempool[tx]['fee'] for tx in mempool])
+ assert_equal(ancestor_fees, sum([mempool[tx]['fee'] for tx in mempool]))
descendants = []
ancestors = list(chain)
diff --git a/test/functional/mempool_reorg.py b/test/functional/mempool_reorg.py
index 0ee6af62f6..260b41ef12 100755
--- a/test/functional/mempool_reorg.py
+++ b/test/functional/mempool_reorg.py
@@ -31,7 +31,7 @@ class MempoolCoinbaseTest(BitcoinTestFramework):
self.log.info("Add 4 coinbase utxos to the miniwallet")
# Block 76 contains the first spendable coinbase txs.
first_block = 76
- wallet.scan_blocks(start=first_block, num=4)
+ wallet.rescan_utxos()
# Three scenarios for re-orging coinbase spends in the memory pool:
# 1. Direct coinbase spend : spend_1
diff --git a/test/functional/mempool_spend_coinbase.py b/test/functional/mempool_spend_coinbase.py
index e97595ed86..4e1dd80ba7 100755
--- a/test/functional/mempool_spend_coinbase.py
+++ b/test/functional/mempool_spend_coinbase.py
@@ -28,14 +28,14 @@ class MempoolSpendCoinbaseTest(BitcoinTestFramework):
chain_height = 198
self.nodes[0].invalidateblock(self.nodes[0].getblockhash(chain_height + 1))
assert_equal(chain_height, self.nodes[0].getblockcount())
+ wallet.rescan_utxos()
# Coinbase at height chain_height-100+1 ok in mempool, should
# get mined. Coinbase at height chain_height-100+2 is
# too immature to spend.
- wallet.scan_blocks(start=chain_height - 100 + 1, num=1)
- utxo_mature = wallet.get_utxo()
- wallet.scan_blocks(start=chain_height - 100 + 2, num=1)
- utxo_immature = wallet.get_utxo()
+ coinbase_txid = lambda h: self.nodes[0].getblock(self.nodes[0].getblockhash(h))['tx'][0]
+ utxo_mature = wallet.get_utxo(txid=coinbase_txid(chain_height - 100 + 1))
+ utxo_immature = wallet.get_utxo(txid=coinbase_txid(chain_height - 100 + 2))
spend_mature_id = wallet.send_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_mature)["txid"]
diff --git a/test/functional/p2p_blocksonly.py b/test/functional/p2p_blocksonly.py
index 480bff361d..94ae758d46 100755
--- a/test/functional/p2p_blocksonly.py
+++ b/test/functional/p2p_blocksonly.py
@@ -6,8 +6,7 @@
import time
-from test_framework.blocktools import COINBASE_MATURITY
-from test_framework.messages import msg_tx
+from test_framework.messages import msg_tx, msg_inv, CInv, MSG_WTX
from test_framework.p2p import P2PInterface, P2PTxInvStore
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
@@ -16,15 +15,13 @@ from test_framework.wallet import MiniWallet
class P2PBlocksOnly(BitcoinTestFramework):
def set_test_params(self):
- self.setup_clean_chain = True
self.num_nodes = 1
self.extra_args = [["-blocksonly"]]
def run_test(self):
self.miniwallet = MiniWallet(self.nodes[0])
# Add enough mature utxos to the wallet, so that all txs spend confirmed coins
- self.generate(self.miniwallet, 2)
- self.generate(self.nodes[0], COINBASE_MATURITY)
+ self.miniwallet.rescan_utxos()
self.blocksonly_mode_tests()
self.blocks_relay_conn_tests()
@@ -36,12 +33,19 @@ class P2PBlocksOnly(BitcoinTestFramework):
self.nodes[0].add_p2p_connection(P2PInterface())
tx, txid, wtxid, tx_hex = self.check_p2p_tx_violation()
+ self.log.info('Check that tx invs also violate the protocol')
+ self.nodes[0].add_p2p_connection(P2PInterface())
+ with self.nodes[0].assert_debug_log(['transaction (0000000000000000000000000000000000000000000000000000000000001234) inv sent in violation of protocol, disconnecting peer']):
+ self.nodes[0].p2ps[0].send_message(msg_inv([CInv(t=MSG_WTX, h=0x1234)]))
+ self.nodes[0].p2ps[0].wait_for_disconnect()
+ del self.nodes[0].p2ps[0]
+
self.log.info('Check that txs from rpc are not rejected and relayed to other peers')
tx_relay_peer = self.nodes[0].add_p2p_connection(P2PInterface())
assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], True)
assert_equal(self.nodes[0].testmempoolaccept([tx_hex])[0]['allowed'], True)
- with self.nodes[0].assert_debug_log(['received getdata for: wtx {} peer=1'.format(wtxid)]):
+ with self.nodes[0].assert_debug_log(['received getdata for: wtx {} peer'.format(wtxid)]):
self.nodes[0].sendrawtransaction(tx_hex)
tx_relay_peer.wait_for_tx(txid)
assert_equal(self.nodes[0].getmempoolinfo()['size'], 1)
@@ -83,7 +87,7 @@ class P2PBlocksOnly(BitcoinTestFramework):
# Ensure we disconnect if a block-relay-only connection sends us a transaction
self.nodes[0].add_outbound_p2p_connection(P2PInterface(), p2p_idx=0, connection_type="block-relay-only")
assert_equal(self.nodes[0].getpeerinfo()[0]['relaytxes'], False)
- _, txid, _, tx_hex = self.check_p2p_tx_violation(index=2)
+ _, txid, _, tx_hex = self.check_p2p_tx_violation()
self.log.info("Check that txs from RPC are not sent to blockrelay connection")
conn = self.nodes[0].add_outbound_p2p_connection(P2PTxInvStore(), p2p_idx=1, connection_type="block-relay-only")
@@ -96,11 +100,9 @@ class P2PBlocksOnly(BitcoinTestFramework):
conn.sync_send_with_ping()
assert(int(txid, 16) not in conn.get_invs())
- def check_p2p_tx_violation(self, index=1):
+ def check_p2p_tx_violation(self):
self.log.info('Check that txs from P2P are rejected and result in disconnect')
- input_txid = self.nodes[0].getblock(self.nodes[0].getblockhash(index), 2)['tx'][0]['txid']
- utxo_to_spend = self.miniwallet.get_utxo(txid=input_txid)
- spendtx = self.miniwallet.create_self_transfer(from_node=self.nodes[0], utxo_to_spend=utxo_to_spend)
+ spendtx = self.miniwallet.create_self_transfer(from_node=self.nodes[0])
with self.nodes[0].assert_debug_log(['transaction sent in violation of protocol peer=0']):
self.nodes[0].p2ps[0].send_message(msg_tx(spendtx['tx']))
diff --git a/test/functional/rpc_blockchain.py b/test/functional/rpc_blockchain.py
index e13de4395b..0600d8b9c5 100755
--- a/test/functional/rpc_blockchain.py
+++ b/test/functional/rpc_blockchain.py
@@ -406,7 +406,7 @@ class BlockchainTest(BitcoinTestFramework):
node = self.nodes[0]
miniwallet = MiniWallet(node)
- miniwallet.scan_blocks(num=5)
+ miniwallet.rescan_utxos()
fee_per_byte = Decimal('0.00000010')
fee_per_kb = 1000 * fee_per_byte
diff --git a/test/functional/rpc_net.py b/test/functional/rpc_net.py
index aa53e354a3..3fcca97cb7 100755
--- a/test/functional/rpc_net.py
+++ b/test/functional/rpc_net.py
@@ -239,7 +239,16 @@ class NetTest(BitcoinTestFramework):
assert_raises_rpc_error(-8, "Network not recognized: Foo", self.nodes[0].getnodeaddresses, 1, "Foo")
def test_addpeeraddress(self):
+ """RPC addpeeraddress sets the source address equal to the destination address.
+ If an address with the same /16 as an existing new entry is passed, it will be
+ placed in the same new bucket and have a 1/64 chance of the bucket positions
+ colliding (depending on the value of nKey in the addrman), in which case the
+ new address won't be added. The probability of collision can be reduced to
+ 1/2^16 = 1/65536 by using an address from a different /16. We avoid this here
+ by first testing adding a tried table entry before testing adding a new table one.
+ """
self.log.info("Test addpeeraddress")
+ self.restart_node(1, ["-checkaddrman=1"])
node = self.nodes[1]
self.log.debug("Test that addpeerinfo is a hidden RPC")
@@ -251,17 +260,25 @@ class NetTest(BitcoinTestFramework):
assert_equal(node.addpeeraddress(address="", port=8333), {"success": False})
assert_equal(node.getnodeaddresses(count=0), [])
- self.log.debug("Test that adding a valid address succeeds")
- assert_equal(node.addpeeraddress(address="1.2.3.4", port=8333), {"success": True})
- addrs = node.getnodeaddresses(count=0)
- assert_equal(len(addrs), 1)
- assert_equal(addrs[0]["address"], "1.2.3.4")
- assert_equal(addrs[0]["port"], 8333)
-
- self.log.debug("Test that adding the same address again when already present fails")
- assert_equal(node.addpeeraddress(address="1.2.3.4", port=8333), {"success": False})
+ self.log.debug("Test that adding a valid address to the tried table succeeds")
+ assert_equal(node.addpeeraddress(address="1.2.3.4", tried=True, port=8333), {"success": True})
+ with node.assert_debug_log(expected_msgs=["Addrman checks started: new 0, tried 1, total 1"]):
+ addrs = node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks
+ assert_equal(len(addrs), 1)
+ assert_equal(addrs[0]["address"], "1.2.3.4")
+ assert_equal(addrs[0]["port"], 8333)
+
+ self.log.debug("Test that adding an already-present tried address to the new and tried tables fails")
+ for value in [True, False]:
+ assert_equal(node.addpeeraddress(address="1.2.3.4", tried=value, port=8333), {"success": False})
assert_equal(len(node.getnodeaddresses(count=0)), 1)
+ self.log.debug("Test that adding a second address, this time to the new table, succeeds")
+ assert_equal(node.addpeeraddress(address="2.0.0.0", port=8333), {"success": True})
+ with node.assert_debug_log(expected_msgs=["Addrman checks started: new 1, tried 1, total 2"]):
+ addrs = node.getnodeaddresses(count=0) # getnodeaddresses re-runs the addrman checks
+ assert_equal(len(addrs), 2)
+
if __name__ == '__main__':
NetTest().main()
diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py
index b7d5bd8fab..ec563cc290 100755
--- a/test/functional/test_framework/p2p.py
+++ b/test/functional/test_framework/p2p.py
@@ -577,6 +577,8 @@ class NetworkThread(threading.Thread):
NetworkThread.listeners = {}
NetworkThread.protos = {}
+ if sys.platform == 'win32':
+ asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy())
NetworkThread.network_event_loop = asyncio.new_event_loop()
def run(self):
diff --git a/test/functional/test_framework/util.py b/test/functional/test_framework/util.py
index ec27fd7f85..d66499dbcb 100644
--- a/test/functional/test_framework/util.py
+++ b/test/functional/test_framework/util.py
@@ -364,6 +364,11 @@ def write_config(config_path, *, n, chain, extra_config="", disable_autoconnect=
f.write("dnsseed=0\n")
f.write("fixedseeds=0\n")
f.write("listenonion=0\n")
+ # Increase peertimeout to avoid disconnects while using mocktime.
+ # peertimeout is measured in wall clock time, so setting it to the
+ # duration of the longest test is sufficient. It can be overriden in
+ # tests.
+ f.write("peertimeout=999999\n")
f.write("printtoconsole=0\n")
f.write("upnp=0\n")
f.write("natpmp=0\n")
diff --git a/test/functional/test_framework/wallet.py b/test/functional/test_framework/wallet.py
index ba5b95f930..2d7f061912 100644
--- a/test/functional/test_framework/wallet.py
+++ b/test/functional/test_framework/wallet.py
@@ -79,12 +79,13 @@ class MiniWallet:
self._address = ADDRESS_BCRT1_P2WSH_OP_TRUE
self._scriptPubKey = bytes.fromhex(self._test_node.validateaddress(self._address)['scriptPubKey'])
- def scan_blocks(self, *, start=1, num):
- """Scan the blocks for self._address outputs and add them to self._utxos"""
- for i in range(start, start + num):
- block = self._test_node.getblock(blockhash=self._test_node.getblockhash(i), verbosity=2)
- for tx in block['tx']:
- self.scan_tx(tx)
+ def rescan_utxos(self):
+ """Drop all utxos and rescan the utxo set"""
+ self._utxos = []
+ res = self._test_node.scantxoutset(action="start", scanobjects=[f'raw({self._scriptPubKey.hex()})'])
+ assert_equal(True, res['success'])
+ for utxo in res['unspents']:
+ self._utxos.append({'txid': utxo['txid'], 'vout': utxo['vout'], 'value': utxo['amount']})
def scan_tx(self, tx):
"""Scan the tx for self._scriptPubKey outputs and add them to self._utxos"""