aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.cirrus.yml2
-rw-r--r--.gitignore2
-rw-r--r--COPYING4
-rw-r--r--Makefile.am26
-rw-r--r--REVIEWERS5
-rw-r--r--build_msvc/.gitignore2
-rw-r--r--build_msvc/bench_bitcoin/bench_bitcoin.vcxproj.in2
-rw-r--r--build_msvc/bitcoin-qt/bitcoin-qt.vcxproj2
-rw-r--r--build_msvc/bitcoin.sln2
-rw-r--r--build_msvc/bitcoind/bitcoind.vcxproj2
-rw-r--r--build_msvc/libbitcoin_node/libbitcoin_node.vcxproj.in (renamed from build_msvc/libbitcoin_server/libbitcoin_server.vcxproj.in)0
-rwxr-xr-xbuild_msvc/msvc-autogen.py2
-rw-r--r--build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj2
-rw-r--r--build_msvc/test_bitcoin/test_bitcoin.vcxproj2
-rwxr-xr-xci/test/00_setup_env_mac.sh2
-rw-r--r--configure.ac9
-rw-r--r--contrib/debian/copyright2
-rwxr-xr-xcontrib/devtools/security-check.py46
-rw-r--r--contrib/guix/manifest.scm22
-rw-r--r--contrib/macdeploy/background.svg34
-rw-r--r--contrib/macdeploy/background.tiffbin0 -> 18464 bytes
-rwxr-xr-xcontrib/macdeploy/macdeployqtplus2
-rw-r--r--depends/README.md2
-rw-r--r--depends/packages/libxcb.mk2
-rw-r--r--depends/packages/qt.mk1
-rw-r--r--doc/build-openbsd.md6
-rw-r--r--doc/build-osx.md5
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/release-notes.md2
-rw-r--r--src/Makefile.am19
-rw-r--r--src/Makefile.bench.include2
-rw-r--r--src/Makefile.qt.include4
-rw-r--r--src/Makefile.qttest.include2
-rw-r--r--src/Makefile.test.include4
-rw-r--r--src/Makefile.test_fuzz.include2
-rw-r--r--src/Makefile.test_util.include2
-rw-r--r--src/addrman.cpp37
-rw-r--r--src/addrman.h34
-rw-r--r--src/addrman_impl.h6
-rw-r--r--src/bitcoin-tx.cpp6
-rw-r--r--src/blockfilter.cpp31
-rw-r--r--src/core_write.cpp23
-rw-r--r--src/index/coinstatsindex.cpp2
-rw-r--r--src/init.cpp64
-rw-r--r--src/net.cpp38
-rw-r--r--src/net.h13
-rw-r--r--src/net_processing.cpp84
-rw-r--r--src/node/blockstorage.cpp370
-rw-r--r--src/node/blockstorage.h84
-rw-r--r--src/node/chainstate.cpp2
-rw-r--r--src/node/chainstate.h2
-rw-r--r--src/node/coinstats.cpp11
-rw-r--r--src/node/coinstats.h10
-rw-r--r--src/qt/android/src/org/bitcoincore/qt/BitcoinQtActivity.java6
-rw-r--r--src/qt/forms/receivecoinsdialog.ui2
-rw-r--r--src/qt/receivecoinsdialog.cpp2
-rw-r--r--src/rpc/blockchain.cpp66
-rw-r--r--src/rpc/blockchain.h1
-rw-r--r--src/rpc/rawtransaction.cpp9
-rw-r--r--src/rpc/server_util.cpp13
-rw-r--r--src/rpc/server_util.h7
-rw-r--r--src/test/addrman_tests.cpp476
-rw-r--r--src/test/fuzz/addition_overflow.cpp1
-rw-r--r--src/test/fuzz/crypto_chacha20_poly1305_aead.cpp1
-rw-r--r--src/test/fuzz/golomb_rice.cpp14
-rw-r--r--src/test/fuzz/integer.cpp1
-rw-r--r--src/test/fuzz/pow.cpp1
-rw-r--r--src/test/fuzz/util.cpp17
-rw-r--r--src/test/fuzz/util.h13
-rw-r--r--src/test/util/net.h18
-rw-r--r--src/test/util/setup_common.cpp32
-rw-r--r--src/test/util_tests.cpp33
-rw-r--r--src/txmempool.cpp15
-rw-r--r--src/txmempool.h10
-rw-r--r--src/util/golombrice.h31
-rw-r--r--src/util/overflow.h31
-rw-r--r--src/util/sock.cpp27
-rw-r--r--src/util/sock.h9
-rw-r--r--src/validation.cpp374
-rw-r--r--src/validation.h82
-rw-r--r--src/wallet/dump.cpp5
-rwxr-xr-xtest/functional/feature_fee_estimation.py200
-rwxr-xr-xtest/functional/feature_init.py74
-rwxr-xr-xtest/functional/feature_pruning.py11
-rwxr-xr-xtest/functional/feature_startupnotify.py41
-rwxr-xr-xtest/functional/mining_basic.py12
-rwxr-xr-xtest/functional/test_framework/test_node.py45
-rwxr-xr-xtest/functional/test_runner.py64
-rwxr-xr-xtest/functional/tool_wallet.py6
-rw-r--r--test/sanitizer_suppressions/lsan3
-rw-r--r--test/sanitizer_suppressions/ubsan55
-rw-r--r--test/util/data/bitcoin-util-test.json23
-rw-r--r--test/util/data/txcreatesignsegwit1.hex1
93 files changed, 1533 insertions, 1343 deletions
diff --git a/.cirrus.yml b/.cirrus.yml
index 9ae936766a..240e2cf705 100644
--- a/.cirrus.yml
+++ b/.cirrus.yml
@@ -205,7 +205,7 @@ task:
FILE_ENV: "./ci/test/00_setup_env_native_qt5.sh"
task:
- name: '[TSan, depends, no gui] [jammy]'
+ name: '[TSan, depends, gui] [jammy]'
<< : *GLOBAL_TASK_TEMPLATE
container:
image: ubuntu:jammy
diff --git a/.gitignore b/.gitignore
index 76fc450404..f84a53178e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -96,7 +96,6 @@ Makefile
!depends/Makefile
src/qt/bitcoin-qt
Bitcoin-Qt.app
-background.tiff*
# Qt Creator
Makefile.am.user
@@ -149,6 +148,5 @@ db4/
osx_volname
dist/
-*.background.tiff
/guix-build-*
diff --git a/COPYING b/COPYING
index c34f575b8c..b157c5fe49 100644
--- a/COPYING
+++ b/COPYING
@@ -1,7 +1,7 @@
The MIT License (MIT)
-Copyright (c) 2009-2021 The Bitcoin Core developers
-Copyright (c) 2009-2021 Bitcoin Developers
+Copyright (c) 2009-2022 The Bitcoin Core developers
+Copyright (c) 2009-2022 Bitcoin Developers
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/Makefile.am b/Makefile.am
index 9f50e51ce0..1412624d54 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -38,9 +38,7 @@ OSX_APP=Bitcoin-Qt.app
OSX_VOLNAME = $(subst $(space),-,$(PACKAGE_NAME))
OSX_DMG = $(OSX_VOLNAME).dmg
OSX_TEMP_ISO = $(OSX_DMG:.dmg=).temp.iso
-OSX_BACKGROUND_SVG=background.svg
-OSX_BACKGROUND_IMAGE=background.tiff
-OSX_BACKGROUND_IMAGE_DPIS=36 72
+OSX_BACKGROUND_IMAGE=$(top_srcdir)/contrib/macdeploy/background.tiff
OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus
OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/bitcoin.icns
OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed
@@ -66,7 +64,6 @@ WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/bitcoin.ico \
$(top_srcdir)/doc/README_windows.txt
OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_INSTALLER_ICONS) \
- $(top_srcdir)/contrib/macdeploy/$(OSX_BACKGROUND_SVG) \
$(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \
$(top_srcdir)/contrib/macdeploy/detached-sig-create.sh
@@ -127,20 +124,13 @@ osx_volname:
echo $(OSX_VOLNAME) >$@
if BUILD_DARWIN
-$(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING) $(OSX_BACKGROUND_IMAGE)
+$(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING)
$(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) -dmg
-$(OSX_BACKGROUND_IMAGE).png: contrib/macdeploy/$(OSX_BACKGROUND_SVG)
- sed 's/PACKAGE_NAME/$(PACKAGE_NAME)/' < "$<" | $(RSVG_CONVERT) -f png -d 36 -p 36 -o $@
-$(OSX_BACKGROUND_IMAGE)@2x.png: contrib/macdeploy/$(OSX_BACKGROUND_SVG)
- sed 's/PACKAGE_NAME/$(PACKAGE_NAME)/' < "$<" | $(RSVG_CONVERT) -f png -d 72 -p 72 -o $@
-$(OSX_BACKGROUND_IMAGE): $(OSX_BACKGROUND_IMAGE).png $(OSX_BACKGROUND_IMAGE)@2x.png
- tiffutil -cathidpicheck $^ -out $@
-
deploydir: $(OSX_DMG)
else !BUILD_DARWIN
APP_DIST_DIR=$(top_builddir)/dist
-APP_DIST_EXTRAS=$(APP_DIST_DIR)/.background/$(OSX_BACKGROUND_IMAGE) $(APP_DIST_DIR)/.DS_Store $(APP_DIST_DIR)/Applications
+APP_DIST_EXTRAS=$(APP_DIST_DIR)/.background/background.tiff $(APP_DIST_DIR)/.DS_Store $(APP_DIST_DIR)/Applications
$(APP_DIST_DIR)/Applications:
@rm -f $@
@@ -154,12 +144,9 @@ $(OSX_TEMP_ISO): $(APP_DIST_EXTRAS)
$(OSX_DMG): $(OSX_TEMP_ISO)
$(DMG) dmg "$<" "$@"
-dpi%.$(OSX_BACKGROUND_IMAGE): contrib/macdeploy/$(OSX_BACKGROUND_SVG)
- sed 's/PACKAGE_NAME/$(PACKAGE_NAME)/' < "$<" | $(RSVG_CONVERT) -f png -d $* -p $* | $(IMAGEMAGICK_CONVERT) - $@
-OSX_BACKGROUND_IMAGE_DPIFILES := $(foreach dpi,$(OSX_BACKGROUND_IMAGE_DPIS),dpi$(dpi).$(OSX_BACKGROUND_IMAGE))
-$(APP_DIST_DIR)/.background/$(OSX_BACKGROUND_IMAGE): $(OSX_BACKGROUND_IMAGE_DPIFILES)
+$(APP_DIST_DIR)/.background/background.tiff:
$(MKDIR_P) $(@D)
- $(TIFFCP) -c none $(OSX_BACKGROUND_IMAGE_DPIFILES) $@
+ cp $(OSX_BACKGROUND_IMAGE) $@
$(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Bitcoin-Qt: $(OSX_APP_BUILT) $(OSX_PACKAGING)
INSTALLNAMETOOL=$(INSTALLNAMETOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR)
@@ -336,6 +323,7 @@ EXTRA_DIST += \
test/util/data/txcreatescript4.json \
test/util/data/txcreatescript5.hex \
test/util/data/txcreatescript6.hex \
+ test/util/data/txcreatesignsegwit1.hex \
test/util/data/txcreatesignv1.hex \
test/util/data/txcreatesignv1.json \
test/util/data/txcreatesignv2.hex \
@@ -363,7 +351,7 @@ clean-docs:
clean-local: clean-docs
rm -rf coverage_percent.txt test_bitcoin.coverage/ total.coverage/ fuzz.coverage/ test/tmp/ cache/ $(OSX_APP)
rm -rf test/functional/__pycache__ test/functional/test_framework/__pycache__ test/cache share/rpcauth/__pycache__
- rm -rf osx_volname dist/ dpi36.background.tiff dpi72.background.tiff
+ rm -rf osx_volname dist/
test-security-check:
if TARGET_DARWIN
diff --git a/REVIEWERS b/REVIEWERS
index 0caacec440..e5dd3c9b91 100644
--- a/REVIEWERS
+++ b/REVIEWERS
@@ -134,3 +134,8 @@
/src/script/interpreter.* @sipa
/src/validation.* @sipa
/src/consensus/ @sipa
+
+# Tracing
+/doc/tracing.md @jb55 @0xB10C
+/src/util/trace.h @jb55 @0xB10C
+/contrib/tracing/ @jb55 @0xB10C
diff --git a/build_msvc/.gitignore b/build_msvc/.gitignore
index ae8120fdf3..f70eea5b04 100644
--- a/build_msvc/.gitignore
+++ b/build_msvc/.gitignore
@@ -13,7 +13,7 @@ packages/*
libbitcoin_cli/libbitcoin_cli.vcxproj
libbitcoin_common/libbitcoin_common.vcxproj
libbitcoin_crypto/libbitcoin_crypto.vcxproj
-libbitcoin_server/libbitcoin_server.vcxproj
+libbitcoin_node/libbitcoin_node.vcxproj
libbitcoin_util/libbitcoin_util.vcxproj
libbitcoin_wallet_tool/libbitcoin_wallet_tool.vcxproj
libbitcoin_wallet/libbitcoin_wallet.vcxproj
diff --git a/build_msvc/bench_bitcoin/bench_bitcoin.vcxproj.in b/build_msvc/bench_bitcoin/bench_bitcoin.vcxproj.in
index 128c1bd8e7..fc9d7cbed6 100644
--- a/build_msvc/bench_bitcoin/bench_bitcoin.vcxproj.in
+++ b/build_msvc/bench_bitcoin/bench_bitcoin.vcxproj.in
@@ -21,7 +21,7 @@
<ProjectReference Include="..\libbitcoin_crypto\libbitcoin_crypto.vcxproj">
<Project>{6190199c-6cf4-4dad-bfbd-93fa72a760c1}</Project>
</ProjectReference>
- <ProjectReference Include="..\libbitcoin_server\libbitcoin_server.vcxproj">
+ <ProjectReference Include="..\libbitcoin_node\libbitcoin_node.vcxproj">
<Project>{460fee33-1fe1-483f-b3bf-931ff8e969a5}</Project>
</ProjectReference>
<ProjectReference Include="..\libbitcoin_util\libbitcoin_util.vcxproj">
diff --git a/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj b/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj
index 2800a42767..0d6358e0d0 100644
--- a/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj
+++ b/build_msvc/bitcoin-qt/bitcoin-qt.vcxproj
@@ -28,7 +28,7 @@
<ProjectReference Include="..\libbitcoin_qt\libbitcoin_qt.vcxproj">
<Project>{2b4abff8-d1fd-4845-88c9-1f3c0a6512bf}</Project>
</ProjectReference>
- <ProjectReference Include="..\libbitcoin_server\libbitcoin_server.vcxproj">
+ <ProjectReference Include="..\libbitcoin_node\libbitcoin_node.vcxproj">
<Project>{460fee33-1fe1-483f-b3bf-931ff8e969a5}</Project>
</ProjectReference>
<ProjectReference Include="..\libbitcoin_util\libbitcoin_util.vcxproj">
diff --git a/build_msvc/bitcoin.sln b/build_msvc/bitcoin.sln
index b2ab64a34b..2a1ccf58fe 100644
--- a/build_msvc/bitcoin.sln
+++ b/build_msvc/bitcoin.sln
@@ -12,7 +12,7 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_common", "libbit
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_crypto", "libbitcoin_crypto\libbitcoin_crypto.vcxproj", "{6190199C-6CF4-4DAD-BFBD-93FA72A760C1}"
EndProject
-Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_server", "libbitcoin_server\libbitcoin_server.vcxproj", "{460FEE33-1FE1-483F-B3BF-931FF8E969A5}"
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libbitcoin_node", "libbitcoin_node\libbitcoin_node.vcxproj", "{460FEE33-1FE1-483F-B3BF-931FF8E969A5}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "libunivalue", "libunivalue\libunivalue.vcxproj", "{5724BA7D-A09A-4BA8-800B-C4C1561B3D69}"
EndProject
diff --git a/build_msvc/bitcoind/bitcoind.vcxproj b/build_msvc/bitcoind/bitcoind.vcxproj
index d56c359fe0..b1204d0d5d 100644
--- a/build_msvc/bitcoind/bitcoind.vcxproj
+++ b/build_msvc/bitcoind/bitcoind.vcxproj
@@ -24,7 +24,7 @@
<ProjectReference Include="..\libbitcoin_crypto\libbitcoin_crypto.vcxproj">
<Project>{6190199c-6cf4-4dad-bfbd-93fa72a760c1}</Project>
</ProjectReference>
- <ProjectReference Include="..\libbitcoin_server\libbitcoin_server.vcxproj">
+ <ProjectReference Include="..\libbitcoin_node\libbitcoin_node.vcxproj">
<Project>{460fee33-1fe1-483f-b3bf-931ff8e969a5}</Project>
</ProjectReference>
<ProjectReference Include="..\libbitcoin_util\libbitcoin_util.vcxproj">
diff --git a/build_msvc/libbitcoin_server/libbitcoin_server.vcxproj.in b/build_msvc/libbitcoin_node/libbitcoin_node.vcxproj.in
index 58e90dbaeb..58e90dbaeb 100644
--- a/build_msvc/libbitcoin_server/libbitcoin_server.vcxproj.in
+++ b/build_msvc/libbitcoin_node/libbitcoin_node.vcxproj.in
diff --git a/build_msvc/msvc-autogen.py b/build_msvc/msvc-autogen.py
index ed4fa7ec9a..2a70cd9332 100755
--- a/build_msvc/msvc-autogen.py
+++ b/build_msvc/msvc-autogen.py
@@ -15,7 +15,7 @@ libs = [
'libbitcoin_cli',
'libbitcoin_common',
'libbitcoin_crypto',
- 'libbitcoin_server',
+ 'libbitcoin_node',
'libbitcoin_util',
'libbitcoin_wallet_tool',
'libbitcoin_wallet',
diff --git a/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj b/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj
index f9948b6f13..d3be693e99 100644
--- a/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj
+++ b/build_msvc/test_bitcoin-qt/test_bitcoin-qt.vcxproj
@@ -40,7 +40,7 @@
<ProjectReference Include="..\libbitcoin_qt\libbitcoin_qt.vcxproj">
<Project>{2b4abff8-d1fd-4845-88c9-1f3c0a6512bf}</Project>
</ProjectReference>
- <ProjectReference Include="..\libbitcoin_server\libbitcoin_server.vcxproj">
+ <ProjectReference Include="..\libbitcoin_node\libbitcoin_node.vcxproj">
<Project>{460fee33-1fe1-483f-b3bf-931ff8e969a5}</Project>
</ProjectReference>
<ProjectReference Include="..\libbitcoin_util\libbitcoin_util.vcxproj">
diff --git a/build_msvc/test_bitcoin/test_bitcoin.vcxproj b/build_msvc/test_bitcoin/test_bitcoin.vcxproj
index 2fc0078b8d..4182448ec3 100644
--- a/build_msvc/test_bitcoin/test_bitcoin.vcxproj
+++ b/build_msvc/test_bitcoin/test_bitcoin.vcxproj
@@ -34,7 +34,7 @@
<ProjectReference Include="..\libbitcoin_crypto\libbitcoin_crypto.vcxproj">
<Project>{6190199c-6cf4-4dad-bfbd-93fa72a760c1}</Project>
</ProjectReference>
- <ProjectReference Include="..\libbitcoin_server\libbitcoin_server.vcxproj">
+ <ProjectReference Include="..\libbitcoin_node\libbitcoin_node.vcxproj">
<Project>{460fee33-1fe1-483f-b3bf-931ff8e969a5}</Project>
</ProjectReference>
<ProjectReference Include="..\libbitcoin_util\libbitcoin_util.vcxproj">
diff --git a/ci/test/00_setup_env_mac.sh b/ci/test/00_setup_env_mac.sh
index 26d1a44187..d70b993b99 100755
--- a/ci/test/00_setup_env_mac.sh
+++ b/ci/test/00_setup_env_mac.sh
@@ -9,7 +9,7 @@ export LC_ALL=C.UTF-8
export CONTAINER_NAME=ci_macos_cross
export DOCKER_NAME_TAG=ubuntu:20.04 # Check that Focal can cross-compile to macos
export HOST=x86_64-apple-darwin
-export PACKAGES="cmake imagemagick librsvg2-bin libz-dev libtiff-tools libtinfo5 python3-setuptools xorriso"
+export PACKAGES="cmake libz-dev libtinfo5 python3-setuptools xorriso"
export XCODE_VERSION=12.1
export XCODE_BUILD_ID=12A7403
export RUN_UNIT_TESTS=false
diff --git a/configure.ac b/configure.ac
index d21b3eabeb..acddd06569 100644
--- a/configure.ac
+++ b/configure.ac
@@ -4,7 +4,7 @@ define(_CLIENT_VERSION_MINOR, 99)
define(_CLIENT_VERSION_BUILD, 0)
define(_CLIENT_VERSION_RC, 0)
define(_CLIENT_VERSION_IS_RELEASE, false)
-define(_COPYRIGHT_YEAR, 2021)
+define(_COPYRIGHT_YEAR, 2022)
define(_COPYRIGHT_HOLDERS,[The %s developers])
define(_COPYRIGHT_HOLDERS_SUBSTITUTION,[[Bitcoin Core]])
AC_INIT([Bitcoin Core],m4_join([.], _CLIENT_VERSION_MAJOR, _CLIENT_VERSION_MINOR, _CLIENT_VERSION_BUILD)m4_if(_CLIENT_VERSION_RC, [0], [], [rc]_CLIENT_VERSION_RC),[https://github.com/bitcoin/bitcoin/issues],[bitcoin],[https://bitcoincore.org/])
@@ -684,7 +684,6 @@ case $host in
TARGET_OS=darwin
if test $cross_compiling != "yes"; then
BUILD_OS=darwin
- AC_PATH_PROGS([RSVG_CONVERT], [rsvg-convert rsvg], [rsvg-convert])
AC_CHECK_PROG([BREW], [brew], [brew])
if test "$BREW" = "brew"; then
dnl These Homebrew packages may be keg-only, meaning that they won't be found
@@ -746,9 +745,6 @@ case $host in
AC_PATH_TOOL([OTOOL], [otool], [otool])
AC_PATH_PROGS([XORRISOFS], [xorrisofs], [xorrisofs])
AC_PATH_PROGS([DMG], [dmg], [dmg])
- AC_PATH_PROGS([RSVG_CONVERT], [rsvg-convert rsvg], [rsvg-convert])
- AC_PATH_PROGS([IMAGEMAGICK_CONVERT], [convert], [convert])
- AC_PATH_PROGS([TIFFCP], [tiffcp], [tiffcp])
dnl libtool will try to strip the static lib, which is a problem for
dnl cross-builds because strip attempts to call a hard-coded ld,
@@ -1865,6 +1861,7 @@ AC_CONFIG_LINKS([contrib/devtools/symbol-check.py:contrib/devtools/symbol-check.
AC_CONFIG_LINKS([contrib/devtools/test-security-check.py:contrib/devtools/test-security-check.py])
AC_CONFIG_LINKS([contrib/devtools/test-symbol-check.py:contrib/devtools/test-symbol-check.py])
AC_CONFIG_LINKS([contrib/filter-lcov.py:contrib/filter-lcov.py])
+AC_CONFIG_LINKS([contrib/macdeploy/background.tiff:contrib/macdeploy/background.tiff])
AC_CONFIG_LINKS([test/functional/test_runner.py:test/functional/test_runner.py])
AC_CONFIG_LINKS([test/fuzz/test_runner.py:test/fuzz/test_runner.py])
AC_CONFIG_LINKS([test/util/test_runner.py:test/util/test_runner.py])
@@ -1939,7 +1936,7 @@ echo " gprof enabled = $enable_gprof"
echo " werror = $enable_werror"
echo " LTO = $enable_lto"
echo
-echo " target os = $TARGET_OS"
+echo " target os = $host_os"
echo " build os = $build_os"
echo
echo " CC = $CC"
diff --git a/contrib/debian/copyright b/contrib/debian/copyright
index 6d23f600c3..95a281ce05 100644
--- a/contrib/debian/copyright
+++ b/contrib/debian/copyright
@@ -5,7 +5,7 @@ Upstream-Contact: Satoshi Nakamoto <satoshin@gmx.com>
Source: https://github.com/bitcoin/bitcoin
Files: *
-Copyright: 2009-2021, Bitcoin Core Developers
+Copyright: 2009-2022, Bitcoin Core Developers
License: Expat
Comment: The Bitcoin Core Developers encompasses the current developers listed on bitcoin.org,
as well as the numerous contributors to the project.
diff --git a/contrib/devtools/security-check.py b/contrib/devtools/security-check.py
index 655f2c89c0..137fe377da 100755
--- a/contrib/devtools/security-check.py
+++ b/contrib/devtools/security-check.py
@@ -12,6 +12,10 @@ from typing import List
import lief #type:ignore
+# temporary constant, to be replaced with lief.ELF.ARCH.RISCV
+# https://github.com/lief-project/LIEF/pull/562
+LIEF_ELF_ARCH_RISCV = lief.ELF.ARCH(243)
+
def check_ELF_RELRO(binary) -> bool:
'''
Check for read-only relocations.
@@ -178,24 +182,24 @@ def check_control_flow(binary) -> bool:
return True
return False
-
-CHECKS = {
-lief.EXE_FORMATS.ELF: [
+BASE_ELF = [
('PIE', check_PIE),
('NX', check_NX),
('RELRO', check_ELF_RELRO),
('Canary', check_ELF_Canary),
('separate_code', check_ELF_separate_code),
-],
-lief.EXE_FORMATS.PE: [
+]
+
+BASE_PE = [
('PIE', check_PIE),
('DYNAMIC_BASE', check_PE_DYNAMIC_BASE),
('HIGH_ENTROPY_VA', check_PE_HIGH_ENTROPY_VA),
('NX', check_NX),
('RELOC_SECTION', check_PE_RELOC_SECTION),
('CONTROL_FLOW', check_PE_control_flow),
-],
-lief.EXE_FORMATS.MACHO: [
+]
+
+BASE_MACHO = [
('PIE', check_PIE),
('NOUNDEFS', check_MACHO_NOUNDEFS),
('NX', check_NX),
@@ -203,6 +207,21 @@ lief.EXE_FORMATS.MACHO: [
('Canary', check_MACHO_Canary),
('CONTROL_FLOW', check_control_flow),
]
+
+CHECKS = {
+ lief.EXE_FORMATS.ELF: {
+ lief.ARCHITECTURES.X86: BASE_ELF,
+ lief.ARCHITECTURES.ARM: BASE_ELF,
+ lief.ARCHITECTURES.ARM64: BASE_ELF,
+ lief.ARCHITECTURES.PPC: BASE_ELF,
+ LIEF_ELF_ARCH_RISCV: BASE_ELF,
+ },
+ lief.EXE_FORMATS.PE: {
+ lief.ARCHITECTURES.X86: BASE_PE,
+ },
+ lief.EXE_FORMATS.MACHO: {
+ lief.ARCHITECTURES.X86: BASE_MACHO,
+ }
}
if __name__ == '__main__':
@@ -211,13 +230,24 @@ if __name__ == '__main__':
try:
binary = lief.parse(filename)
etype = binary.format
+ arch = binary.abstract.header.architecture
+ binary.concrete
+
if etype == lief.EXE_FORMATS.UNKNOWN:
print(f'{filename}: unknown executable format')
retval = 1
continue
+ if arch == lief.ARCHITECTURES.NONE:
+ if binary.header.machine_type == LIEF_ELF_ARCH_RISCV:
+ arch = LIEF_ELF_ARCH_RISCV
+ else:
+ print(f'{filename}: unknown architecture')
+ retval = 1
+ continue
+
failed: List[str] = []
- for (name, func) in CHECKS[etype]:
+ for (name, func) in CHECKS[etype][arch]:
if not func(binary):
failed.append(name)
if failed:
diff --git a/contrib/guix/manifest.scm b/contrib/guix/manifest.scm
index 5805006053..6ecb324457 100644
--- a/contrib/guix/manifest.scm
+++ b/contrib/guix/manifest.scm
@@ -17,7 +17,6 @@
(gnu packages gcc)
(gnu packages gnome)
(gnu packages image)
- (gnu packages imagemagick)
(gnu packages installers)
(gnu packages linux)
(gnu packages llvm)
@@ -202,25 +201,6 @@ chain for " target " development."))
(package-with-extra-patches base-nsis
(search-our-patches "nsis-SConstruct-sde-support.patch")))
-(define-public font-tuffy
- (package
- (name "font-tuffy")
- (version "20120614")
- (source
- (origin
- (method url-fetch)
- (uri (string-append "http://tulrich.com/fonts/tuffy-" version ".tar.gz"))
- (file-name (string-append name "-" version ".tar.gz"))
- (sha256
- (base32
- "02vf72bgrp30vrbfhxjw82s115z27dwfgnmmzfb0n9wfhxxfpyf6"))))
- (build-system font-build-system)
- (home-page "http://tulrich.com/fonts/")
- (synopsis "The Tuffy Truetype Font Family")
- (description
- "Thatcher Ulrich's first outline font design. He started with the goal of producing a neutral, readable sans-serif text font. There are lots of \"expressive\" fonts out there, but he wanted to start with something very plain and clean, something he might want to actually use. ")
- (license license:public-domain)))
-
(define-public lief
(package
(name "python-lief")
@@ -653,5 +633,5 @@ inspecting signatures in Mach-O binaries.")
(else
(make-bitcoin-cross-toolchain target)))))
((string-contains target "darwin")
- (list clang-toolchain-10 binutils imagemagick libtiff librsvg font-tuffy cmake xorriso python-signapple))
+ (list clang-toolchain-10 binutils cmake xorriso python-signapple))
(else '())))))
diff --git a/contrib/macdeploy/background.svg b/contrib/macdeploy/background.svg
deleted file mode 100644
index 9c330af451..0000000000
--- a/contrib/macdeploy/background.svg
+++ /dev/null
@@ -1,34 +0,0 @@
-<?xml version="1.0" standalone="no"?>
-<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 20010904//EN"
- "http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd">
-<svg version="1.0" xmlns="http://www.w3.org/2000/svg" width="1000pt" height="640pt" viewBox="0 0 1000 640" preserveAspectRatio="xMidYMid meet">
- <!-- kate: space-indent off;
- Copyright (c) 2015 The Bitcoin Core developers
- Distributed under the MIT software license, see the accompanying
- file COPYING or http://www.opensource.org/licenses/mit-license.php.
- -->
- <style type="text/css"><![CDATA[
- text {
- font-family: "Tuffy";
- font-size: 86px;
- fill: gray;
- text-anchor: middle;
- }
- ]]></style>
- <defs>
- <linearGradient id="gradient" x1="0%" y1="0%" x2="100%" y2="100%">
- <stop offset="0%" style="stop-color:rgb(239,239,239);stop-opacity:1" />
- <stop offset="33%" style="stop-color:rgb(239,239,239);stop-opacity:1" />
- <stop offset="80%" style="stop-color:rgb(205,205,205);stop-opacity:1" />
- <stop offset="100%" style="stop-color:rgb(204,204,204);stop-opacity:1" />
- </linearGradient>
- </defs>
- <rect width="1000" height="640" style="fill:url(#gradient);stroke-width:0" />
- <g transform="translate(500,0) scale(0.9, 1)">
- <text x="0" y="114">PACKAGE_NAME</text>
- </g>
- <g transform="translate(0.000000,640.000000) scale(0.100000,-0.100000)"
- fill="#000000" stroke="none">
- <path d="M4995 3705 c-24 -23 -25 -29 -25 -165 l0 -140 -306 0 -306 0 -29 -29 c-29 -29 -29 -31 -29 -141 0 -110 0 -112 29 -141 l29 -29 306 0 306 0 0 -140 c0 -136 1 -142 25 -165 16 -17 35 -25 57 -25 29 0 72 32 306 226 180 149 274 233 278 250 13 53 -2 70 -278 299 -235 194 -277 225 -306 225 -22 0 -41 -8 -57 -25z" fixlter="url(#glow)"/>
- </g>
-</svg>
diff --git a/contrib/macdeploy/background.tiff b/contrib/macdeploy/background.tiff
new file mode 100644
index 0000000000..1fb088c837
--- /dev/null
+++ b/contrib/macdeploy/background.tiff
Binary files differ
diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus
index 055a932eee..3b76108034 100755
--- a/contrib/macdeploy/macdeployqtplus
+++ b/contrib/macdeploy/macdeployqtplus
@@ -575,7 +575,7 @@ if config.dmg is not None:
os.mkdir(os.path.dirname(bg_path))
if verbose:
print('background.tiff', "->", bg_path)
- shutil.copy2('background.tiff', bg_path)
+ shutil.copy2('contrib/macdeploy/background.tiff', bg_path)
os.symlink("/Applications", os.path.join(disk_root, "Applications"))
diff --git a/depends/README.md b/depends/README.md
index b851ade9c7..9f0b60adf8 100644
--- a/depends/README.md
+++ b/depends/README.md
@@ -46,7 +46,7 @@ The paths are automatically configured and no other options are needed unless ta
#### For macOS cross compilation
- sudo apt-get install curl librsvg2-bin libtiff-tools bsdmainutils cmake imagemagick libz-dev python3-setuptools libtinfo5 xorriso
+ sudo apt-get install curl bsdmainutils cmake libz-dev python3-setuptools libtinfo5 xorriso
Note: You must obtain the macOS SDK before proceeding with a cross-compile.
Under the depends directory, create a subdirectory named `SDKs`.
diff --git a/depends/packages/libxcb.mk b/depends/packages/libxcb.mk
index 99a7ee2fbd..fa30e80f5c 100644
--- a/depends/packages/libxcb.mk
+++ b/depends/packages/libxcb.mk
@@ -8,7 +8,7 @@ $(package)_dependencies=xcb_proto libXau
define $(package)_set_vars
$(package)_config_opts=--disable-static --disable-devel-docs --without-doxygen --without-launchd
$(package)_config_opts += --disable-dependency-tracking --enable-option-checking
-# Disable uneeded extensions.
+# Disable unneeded extensions.
# More info is available from: https://doc.qt.io/qt-5.15/linux-requirements.html
$(package)_config_opts += --disable-composite --disable-damage --disable-dpms
$(package)_config_opts += --disable-dri2 --disable-dri3 --disable-glx
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index 7d79129d96..6b71a9abda 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -173,7 +173,6 @@ $(package)_config_opts_android += -android-sdk $(ANDROID_SDK)
$(package)_config_opts_android += -android-ndk $(ANDROID_NDK)
$(package)_config_opts_android += -android-ndk-platform android-$(ANDROID_API_LEVEL)
$(package)_config_opts_android += -egl
-$(package)_config_opts_android += -qpa xcb
$(package)_config_opts_android += -no-dbus
$(package)_config_opts_android += -opengl es2
$(package)_config_opts_android += -qt-freetype
diff --git a/doc/build-openbsd.md b/doc/build-openbsd.md
index 6e54f67edc..275b7ce124 100644
--- a/doc/build-openbsd.md
+++ b/doc/build-openbsd.md
@@ -25,8 +25,8 @@ See [dependencies.md](dependencies.md) for a complete overview.
**Important**: From OpenBSD 6.2 onwards a C++11-supporting clang compiler is
part of the base image, and while building it is necessary to make sure that
this compiler is used and not ancient g++ 4.2.1. This is done by appending
-`CC=cc CC_FOR_BUILD=cc CXX=c++` to configuration commands. Mixing different
-compilers within the same executable will result in errors.
+`CC=cc CXX=c++` to configuration commands. Mixing different compilers within
+the same executable will result in errors.
### Building BerkeleyDB
@@ -84,7 +84,7 @@ To configure with wallet:
To configure without wallet:
```bash
-./configure --disable-wallet --with-gui=no --disable-external-signer CC=cc CC_FOR_BUILD=cc CXX=c++ MAKE=gmake
+./configure --disable-wallet --with-gui=no --disable-external-signer CC=cc CXX=c++ MAKE=gmake
```
To configure with GUI:
diff --git a/doc/build-osx.md b/doc/build-osx.md
index 467feff410..16dc224aed 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -35,7 +35,6 @@ The following dependencies are **optional** packages required for deploying:
Library | Purpose | Description
----------------------------------------------------|------------------|----------------------
-[librsvg](https://formulae.brew.sh/formula/librsvg) | Deploy Dependency| Library to render SVG files
[ds_store](https://pypi.org/project/ds-store/) | Deploy Dependency| Examine and modify .DS_Store files
[mac_alias](https://pypi.org/project/mac-alias/) | Deploy Dependency| Generate/Read binary alias and bookmark records
@@ -219,10 +218,6 @@ This command depends on a couple of python packages, so it is required that you
Ensuring that `python` is installed, you can install the deploy dependencies by running the following commands in your terminal:
``` bash
-brew install librsvg
-```
-
-``` bash
pip3 install ds_store mac_alias
```
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 24422f1d7b..f02d433dc5 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -16,7 +16,6 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| libevent | [2.1.12-stable](https://github.com/libevent/libevent/releases) | [2.0.21](https://github.com/bitcoin/bitcoin/pull/18676) | No | | |
| libnatpmp | git commit [4536032...](https://github.com/miniupnp/libnatpmp/tree/4536032ae32268a45c073a4d5e91bbab4534773a) | | No | | |
| libpng | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) |
-| librsvg | | | | | |
| MiniUPnPc | [2.2.2](https://miniupnp.tuxfamily.org/files) | | No | | |
| PCRE | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk) |
| Python (tests) | | [3.6](https://www.python.org/downloads) | | | |
@@ -46,5 +45,4 @@ Some dependencies are not needed in all configurations. The following are some f
* ZeroMQ is needed only with the `--with-zmq` option.
#### Other
-* librsvg is only needed if you need to run `make deploy` on (cross-compilation to) macOS.
* Not-Qt-bundled zlib is required to build the [DMG tool](../contrib/macdeploy/README.md#deterministic-macos-dmg-notes) from the libdmg-hfsplus project.
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 230a56d2cd..17b0ef545d 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -97,7 +97,7 @@ Updated RPCs
`gettransaction verbose=true` and REST endpoints `/rest/tx`, `/rest/getutxos`,
`/rest/block` no longer return the `addresses` and `reqSigs` fields, which
were previously deprecated in 22.0. (#22650)
-- The `getblock` RPC command now supports verbose level 3 containing transaction inputs
+- The `getblock` RPC command now supports verbosity level 3 containing transaction inputs'
`prevout` information. The existing `/rest/block/` REST endpoint is modified to contain
this information too. Every `vin` field will contain an additional `prevout` subfield
describing the spent output. `prevout` contains the following keys:
diff --git a/src/Makefile.am b/src/Makefile.am
index 4199f7e89d..b946a325c9 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -17,7 +17,7 @@ EXTRA_LIBRARIES =
BITCOIN_INCLUDES=-I$(builddir) -I$(srcdir)/$(MINISKETCH_INCLUDE_DIR_INT) -I$(srcdir)/secp256k1/include -I$(srcdir)/$(UNIVALUE_INCLUDE_DIR_INT) $(BDB_CPPFLAGS) $(BOOST_CPPFLAGS) $(LEVELDB_CPPFLAGS)
-LIBBITCOIN_SERVER=libbitcoin_server.a
+LIBBITCOIN_NODE=libbitcoin_node.a
LIBBITCOIN_COMMON=libbitcoin_common.a
LIBBITCOIN_CONSENSUS=libbitcoin_consensus.a
LIBBITCOIN_CLI=libbitcoin_cli.a
@@ -61,7 +61,7 @@ EXTRA_LIBRARIES += \
$(LIBBITCOIN_UTIL) \
$(LIBBITCOIN_COMMON) \
$(LIBBITCOIN_CONSENSUS) \
- $(LIBBITCOIN_SERVER) \
+ $(LIBBITCOIN_NODE) \
$(LIBBITCOIN_CLI) \
$(LIBBITCOIN_IPC) \
$(LIBBITCOIN_WALLET) \
@@ -248,6 +248,7 @@ BITCOIN_CORE_H = \
util/macros.h \
util/message.h \
util/moneystr.h \
+ util/overflow.h \
util/overloaded.h \
util/rbf.h \
util/readwritefile.h \
@@ -316,9 +317,9 @@ ipc/capnp/libbitcoin_ipc_a-ipc.$(OBJEXT): $(libbitcoin_ipc_mpgen_input:=.h)
# Contains code accessing mempool and chain state that is meant to be separated
# from wallet and gui code (see node/README.md). Shared code should go in
# libbitcoin_common or libbitcoin_util libraries, instead.
-libbitcoin_server_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(MINIUPNPC_CPPFLAGS) $(NATPMP_CPPFLAGS) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS)
-libbitcoin_server_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
-libbitcoin_server_a_SOURCES = \
+libbitcoin_node_a_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(MINIUPNPC_CPPFLAGS) $(NATPMP_CPPFLAGS) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS)
+libbitcoin_node_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
+libbitcoin_node_a_SOURCES = \
addrdb.cpp \
addrman.cpp \
banman.cpp \
@@ -381,10 +382,10 @@ libbitcoin_server_a_SOURCES = \
$(BITCOIN_CORE_H)
if ENABLE_WALLET
-libbitcoin_server_a_SOURCES += wallet/init.cpp
+libbitcoin_node_a_SOURCES += wallet/init.cpp
endif
if !ENABLE_WALLET
-libbitcoin_server_a_SOURCES += dummywallet.cpp
+libbitcoin_node_a_SOURCES += dummywallet.cpp
endif
if ENABLE_ZMQ
@@ -669,13 +670,13 @@ bitcoind_SOURCES = $(bitcoin_daemon_sources) init/bitcoind.cpp
bitcoind_CPPFLAGS = $(bitcoin_bin_cppflags)
bitcoind_CXXFLAGS = $(bitcoin_bin_cxxflags)
bitcoind_LDFLAGS = $(bitcoin_bin_ldflags)
-bitcoind_LDADD = $(LIBBITCOIN_SERVER) $(bitcoin_bin_ldadd)
+bitcoind_LDADD = $(LIBBITCOIN_NODE) $(bitcoin_bin_ldadd)
bitcoin_node_SOURCES = $(bitcoin_daemon_sources) init/bitcoin-node.cpp
bitcoin_node_CPPFLAGS = $(bitcoin_bin_cppflags)
bitcoin_node_CXXFLAGS = $(bitcoin_bin_cxxflags)
bitcoin_node_LDFLAGS = $(bitcoin_bin_ldflags)
-bitcoin_node_LDADD = $(LIBBITCOIN_SERVER) $(bitcoin_bin_ldadd) $(LIBBITCOIN_IPC) $(LIBMULTIPROCESS_LIBS)
+bitcoin_node_LDADD = $(LIBBITCOIN_NODE) $(bitcoin_bin_ldadd) $(LIBBITCOIN_IPC) $(LIBMULTIPROCESS_LIBS)
# bitcoin-cli binary #
bitcoin_cli_SOURCES = bitcoin-cli.cpp
diff --git a/src/Makefile.bench.include b/src/Makefile.bench.include
index 2a8e4a0aac..2feb31a9e9 100644
--- a/src/Makefile.bench.include
+++ b/src/Makefile.bench.include
@@ -51,7 +51,7 @@ nodist_bench_bench_bitcoin_SOURCES = $(GENERATED_BENCH_FILES)
bench_bench_bitcoin_CPPFLAGS = $(AM_CPPFLAGS) $(BITCOIN_INCLUDES) $(EVENT_CFLAGS) $(EVENT_PTHREADS_CFLAGS) -I$(builddir)/bench/
bench_bench_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
bench_bench_bitcoin_LDADD = \
- $(LIBBITCOIN_SERVER) \
+ $(LIBBITCOIN_NODE) \
$(LIBBITCOIN_WALLET) \
$(LIBBITCOIN_COMMON) \
$(LIBBITCOIN_UTIL) \
diff --git a/src/Makefile.qt.include b/src/Makefile.qt.include
index 6c0e6f7c6f..f4d4641feb 100644
--- a/src/Makefile.qt.include
+++ b/src/Makefile.qt.include
@@ -323,7 +323,7 @@ bitcoin_qt_sources = qt/main.cpp
if TARGET_WINDOWS
bitcoin_qt_sources += $(BITCOIN_QT_RC)
endif
-bitcoin_qt_ldadd = qt/libbitcoinqt.a $(LIBBITCOIN_SERVER)
+bitcoin_qt_ldadd = qt/libbitcoinqt.a $(LIBBITCOIN_NODE)
if ENABLE_WALLET
bitcoin_qt_ldadd += $(LIBBITCOIN_UTIL) $(LIBBITCOIN_WALLET)
endif
@@ -357,7 +357,7 @@ SECONDARY: $(QT_QM)
$(srcdir)/qt/bitcoinstrings.cpp: FORCE
@test -n $(XGETTEXT) || echo "xgettext is required for updating translations"
- $(AM_V_GEN) cd $(srcdir); XGETTEXT=$(XGETTEXT) COPYRIGHT_HOLDERS="$(COPYRIGHT_HOLDERS)" $(PYTHON) ../share/qt/extract_strings_qt.py $(libbitcoin_server_a_SOURCES) $(libbitcoin_wallet_a_SOURCES) $(libbitcoin_common_a_SOURCES) $(libbitcoin_zmq_a_SOURCES) $(libbitcoin_consensus_a_SOURCES) $(libbitcoin_util_a_SOURCES)
+ $(AM_V_GEN) cd $(srcdir); XGETTEXT=$(XGETTEXT) COPYRIGHT_HOLDERS="$(COPYRIGHT_HOLDERS)" $(PYTHON) ../share/qt/extract_strings_qt.py $(libbitcoin_node_a_SOURCES) $(libbitcoin_wallet_a_SOURCES) $(libbitcoin_common_a_SOURCES) $(libbitcoin_zmq_a_SOURCES) $(libbitcoin_consensus_a_SOURCES) $(libbitcoin_util_a_SOURCES)
translate: $(srcdir)/qt/bitcoinstrings.cpp $(QT_FORMS_UI) $(QT_FORMS_UI) $(BITCOIN_QT_BASE_CPP) qt/bitcoin.cpp $(BITCOIN_QT_WINDOWS_CPP) $(BITCOIN_QT_WALLET_CPP) $(BITCOIN_QT_H) $(BITCOIN_MM)
@test -n $(LUPDATE) || echo "lupdate is required for updating translations"
diff --git a/src/Makefile.qttest.include b/src/Makefile.qttest.include
index a0900f2691..b92d2cb6e2 100644
--- a/src/Makefile.qttest.include
+++ b/src/Makefile.qttest.include
@@ -44,7 +44,7 @@ endif # ENABLE_WALLET
nodist_qt_test_test_bitcoin_qt_SOURCES = $(TEST_QT_MOC_CPP)
-qt_test_test_bitcoin_qt_LDADD = $(LIBBITCOINQT) $(LIBBITCOIN_SERVER) $(LIBTEST_UTIL)
+qt_test_test_bitcoin_qt_LDADD = $(LIBBITCOINQT) $(LIBBITCOIN_NODE) $(LIBTEST_UTIL)
if ENABLE_WALLET
qt_test_test_bitcoin_qt_LDADD += $(LIBBITCOIN_UTIL) $(LIBBITCOIN_WALLET)
endif
diff --git a/src/Makefile.test.include b/src/Makefile.test.include
index 9fe2a3cf8a..801745d0c6 100644
--- a/src/Makefile.test.include
+++ b/src/Makefile.test.include
@@ -38,7 +38,7 @@ BITCOIN_TEST_SUITE = \
FUZZ_SUITE_LD_COMMON = \
$(LIBTEST_UTIL) \
$(LIBTEST_FUZZ) \
- $(LIBBITCOIN_SERVER) \
+ $(LIBBITCOIN_NODE) \
$(LIBBITCOIN_WALLET) \
$(LIBBITCOIN_COMMON) \
$(LIBBITCOIN_UTIL) \
@@ -197,7 +197,7 @@ if ENABLE_WALLET
test_test_bitcoin_LDADD += $(LIBBITCOIN_WALLET)
endif
-test_test_bitcoin_LDADD += $(LIBBITCOIN_SERVER) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) \
+test_test_bitcoin_LDADD += $(LIBBITCOIN_NODE) $(LIBBITCOIN_CLI) $(LIBBITCOIN_COMMON) $(LIBBITCOIN_UTIL) $(LIBBITCOIN_CONSENSUS) $(LIBBITCOIN_CRYPTO) $(LIBUNIVALUE) \
$(LIBLEVELDB) $(LIBLEVELDB_SSE42) $(LIBMEMENV) $(BOOST_LIBS) $(BOOST_UNIT_TEST_FRAMEWORK_LIB) $(LIBSECP256K1) $(EVENT_LIBS) $(EVENT_PTHREADS_LIBS) $(MINISKETCH_LIBS)
test_test_bitcoin_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
diff --git a/src/Makefile.test_fuzz.include b/src/Makefile.test_fuzz.include
index 2d772f2fca..9574454fd2 100644
--- a/src/Makefile.test_fuzz.include
+++ b/src/Makefile.test_fuzz.include
@@ -19,7 +19,7 @@ libtest_fuzz_a_SOURCES = \
test/fuzz/util.cpp \
$(TEST_FUZZ_H)
-LIBTEST_FUZZ += $(LIBBITCOIN_SERVER)
+LIBTEST_FUZZ += $(LIBBITCOIN_NODE)
LIBTEST_FUZZ += $(LIBBITCOIN_COMMON)
LIBTEST_FUZZ += $(LIBBITCOIN_UTIL)
LIBTEST_FUZZ += $(LIBBITCOIN_CRYPTO_BASE)
diff --git a/src/Makefile.test_util.include b/src/Makefile.test_util.include
index 0a3b99e7d2..92cb8a5ce6 100644
--- a/src/Makefile.test_util.include
+++ b/src/Makefile.test_util.include
@@ -35,7 +35,7 @@ libtest_util_a_SOURCES = \
test/util/wallet.cpp \
$(TEST_UTIL_H)
-LIBTEST_UTIL += $(LIBBITCOIN_SERVER)
+LIBTEST_UTIL += $(LIBBITCOIN_NODE)
LIBTEST_UTIL += $(LIBBITCOIN_COMMON)
LIBTEST_UTIL += $(LIBBITCOIN_UTIL)
LIBTEST_UTIL += $(LIBBITCOIN_CRYPTO_BASE)
diff --git a/src/addrman.cpp b/src/addrman.cpp
index 15c6f2943c..3a845b5b6e 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -930,6 +930,29 @@ std::pair<CAddress, int64_t> AddrManImpl::SelectTriedCollision_()
return {info_old, info_old.nLastTry};
}
+std::optional<AddressPosition> AddrManImpl::FindAddressEntry_(const CAddress& addr)
+{
+ AssertLockHeld(cs);
+
+ AddrInfo* addr_info = Find(addr);
+
+ if (!addr_info) return std::nullopt;
+
+ if(addr_info->fInTried) {
+ int bucket{addr_info->GetTriedBucket(nKey, m_asmap)};
+ return AddressPosition(/*tried=*/true,
+ /*multiplicity=*/1,
+ /*bucket=*/bucket,
+ /*position=*/addr_info->GetBucketPosition(nKey, false, bucket));
+ } else {
+ int bucket{addr_info->GetNewBucket(nKey, m_asmap)};
+ return AddressPosition(/*tried=*/false,
+ /*multiplicity=*/addr_info->nRefCount,
+ /*bucket=*/bucket,
+ /*position=*/addr_info->GetBucketPosition(nKey, true, bucket));
+ }
+}
+
void AddrManImpl::Check() const
{
AssertLockHeld(cs);
@@ -1116,6 +1139,15 @@ void AddrManImpl::SetServices(const CService& addr, ServiceFlags nServices)
Check();
}
+std::optional<AddressPosition> AddrManImpl::FindAddressEntry(const CAddress& addr)
+{
+ LOCK(cs);
+ Check();
+ auto entry = FindAddressEntry_(addr);
+ Check();
+ return entry;
+}
+
const std::vector<bool>& AddrManImpl::GetAsmap() const
{
return m_asmap;
@@ -1201,3 +1233,8 @@ const std::vector<bool>& AddrMan::GetAsmap() const
{
return m_impl->GetAsmap();
}
+
+std::optional<AddressPosition> AddrMan::FindAddressEntry(const CAddress& addr)
+{
+ return m_impl->FindAddressEntry(addr);
+}
diff --git a/src/addrman.h b/src/addrman.h
index 7f0936be8c..0646ef368d 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -22,6 +22,31 @@ class AddrManImpl;
/** Default for -checkaddrman */
static constexpr int32_t DEFAULT_ADDRMAN_CONSISTENCY_CHECKS{0};
+/** Test-only struct, capturing info about an address in AddrMan */
+struct AddressPosition {
+ // Whether the address is in the new or tried table
+ const bool tried;
+
+ // Addresses in the tried table should always have a multiplicity of 1.
+ // Addresses in the new table can have multiplicity between 1 and
+ // ADDRMAN_NEW_BUCKETS_PER_ADDRESS
+ const int multiplicity;
+
+ // If the address is in the new table, the bucket and position are
+ // populated based on the first source who sent the address.
+ // In certain edge cases, this may not be where the address is currently
+ // located.
+ const int bucket;
+ const int position;
+
+ bool operator==(AddressPosition other) {
+ return std::tie(tried, multiplicity, bucket, position) ==
+ std::tie(other.tried, other.multiplicity, other.bucket, other.position);
+ }
+ explicit AddressPosition(bool tried_in, int multiplicity_in, int bucket_in, int position_in)
+ : tried{tried_in}, multiplicity{multiplicity_in}, bucket{bucket_in}, position{position_in} {}
+};
+
/** Stochastic address manager
*
* Design goals:
@@ -142,6 +167,15 @@ public:
void SetServices(const CService& addr, ServiceFlags nServices);
const std::vector<bool>& GetAsmap() const;
+
+ /** Test-only function
+ * Find the address record in AddrMan and return information about its
+ * position.
+ * @param[in] addr The address record to look up.
+ * @return Information about the address record in AddrMan
+ * or nullopt if address is not found.
+ */
+ std::optional<AddressPosition> FindAddressEntry(const CAddress& addr);
};
#endif // BITCOIN_ADDRMAN_H
diff --git a/src/addrman_impl.h b/src/addrman_impl.h
index bd7caf473b..5e76f72342 100644
--- a/src/addrman_impl.h
+++ b/src/addrman_impl.h
@@ -137,9 +137,11 @@ public:
void SetServices(const CService& addr, ServiceFlags nServices)
EXCLUSIVE_LOCKS_REQUIRED(!cs);
+ std::optional<AddressPosition> FindAddressEntry(const CAddress& addr)
+ EXCLUSIVE_LOCKS_REQUIRED(!cs);
+
const std::vector<bool>& GetAsmap() const;
- friend class AddrManTest;
friend class AddrManDeterministic;
private:
@@ -266,6 +268,8 @@ private:
std::pair<CAddress, int64_t> SelectTriedCollision_() EXCLUSIVE_LOCKS_REQUIRED(cs);
+ std::optional<AddressPosition> FindAddressEntry_(const CAddress& addr) EXCLUSIVE_LOCKS_REQUIRED(cs);
+
//! Consistency check, taking into account m_consistency_check_ratio.
//! Will std::abort if an inconsistency is detected.
void Check() const EXCLUSIVE_LOCKS_REQUIRED(cs);
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index 8602e362c8..edec883264 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -630,7 +630,7 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
}
Coin newcoin;
newcoin.out.scriptPubKey = scriptPubKey;
- newcoin.out.nValue = 0;
+ newcoin.out.nValue = MAX_MONEY;
if (prevOut.exists("amount")) {
newcoin.out.nValue = AmountFromValue(prevOut["amount"]);
}
@@ -669,6 +669,10 @@ static void MutateTxSign(CMutableTransaction& tx, const std::string& flagStr)
if (!fHashSingle || (i < mergedTx.vout.size()))
ProduceSignature(keystore, MutableTransactionSignatureCreator(&mergedTx, i, amount, nHashType), prevPubKey, sigdata);
+ if (amount == MAX_MONEY && !sigdata.scriptWitness.IsNull()) {
+ throw std::runtime_error(strprintf("Missing amount for CTxOut with scriptPubKey=%s", HexStr(prevPubKey)));
+ }
+
UpdateInput(txin, sigdata);
}
diff --git a/src/blockfilter.cpp b/src/blockfilter.cpp
index 68cc6462d0..566254d839 100644
--- a/src/blockfilter.cpp
+++ b/src/blockfilter.cpp
@@ -24,37 +24,6 @@ static const std::map<BlockFilterType, std::string> g_filter_types = {
{BlockFilterType::BASIC, "basic"},
};
-// Map a value x that is uniformly distributed in the range [0, 2^64) to a
-// value uniformly distributed in [0, n) by returning the upper 64 bits of
-// x * n.
-//
-// See: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
-static uint64_t MapIntoRange(uint64_t x, uint64_t n)
-{
-#ifdef __SIZEOF_INT128__
- return (static_cast<unsigned __int128>(x) * static_cast<unsigned __int128>(n)) >> 64;
-#else
- // To perform the calculation on 64-bit numbers without losing the
- // result to overflow, split the numbers into the most significant and
- // least significant 32 bits and perform multiplication piece-wise.
- //
- // See: https://stackoverflow.com/a/26855440
- uint64_t x_hi = x >> 32;
- uint64_t x_lo = x & 0xFFFFFFFF;
- uint64_t n_hi = n >> 32;
- uint64_t n_lo = n & 0xFFFFFFFF;
-
- uint64_t ac = x_hi * n_hi;
- uint64_t ad = x_hi * n_lo;
- uint64_t bc = x_lo * n_hi;
- uint64_t bd = x_lo * n_lo;
-
- uint64_t mid34 = (bd >> 32) + (bc & 0xFFFFFFFF) + (ad & 0xFFFFFFFF);
- uint64_t upper64 = ac + (bc >> 32) + (ad >> 32) + (mid34 >> 32);
- return upper64;
-#endif
-}
-
uint64_t GCSFilter::HashToRange(const Element& element) const
{
uint64_t hash = CSipHasher(m_params.m_siphash_k0, m_params.m_siphash_k1)
diff --git a/src/core_write.cpp b/src/core_write.cpp
index 358f736a13..067f1e4f4e 100644
--- a/src/core_write.cpp
+++ b/src/core_write.cpp
@@ -208,22 +208,17 @@ void TxToUniv(const CTransaction& tx, const uint256& hashBlock, UniValue& entry,
const CTxOut& prev_txout = prev_coin.out;
amt_total_in += prev_txout.nValue;
- switch (verbosity) {
- case TxVerbosity::SHOW_TXID:
- case TxVerbosity::SHOW_DETAILS:
- break;
- case TxVerbosity::SHOW_DETAILS_AND_PREVOUT:
- UniValue o_script_pub_key(UniValue::VOBJ);
- ScriptPubKeyToUniv(prev_txout.scriptPubKey, o_script_pub_key, /* includeHex */ true);
+ if (verbosity == TxVerbosity::SHOW_DETAILS_AND_PREVOUT) {
+ UniValue o_script_pub_key(UniValue::VOBJ);
+ ScriptPubKeyToUniv(prev_txout.scriptPubKey, o_script_pub_key, /*include_hex=*/ true);
- UniValue p(UniValue::VOBJ);
- p.pushKV("generated", bool(prev_coin.fCoinBase));
- p.pushKV("height", uint64_t(prev_coin.nHeight));
- p.pushKV("value", ValueFromAmount(prev_txout.nValue));
- p.pushKV("scriptPubKey", o_script_pub_key);
- in.pushKV("prevout", p);
- break;
+ UniValue p(UniValue::VOBJ);
+ p.pushKV("generated", bool(prev_coin.fCoinBase));
+ p.pushKV("height", uint64_t(prev_coin.nHeight));
+ p.pushKV("value", ValueFromAmount(prev_txout.nValue));
+ p.pushKV("scriptPubKey", o_script_pub_key);
+ in.pushKV("prevout", p);
}
}
in.pushKV("sequence", (int64_t)txin.nSequence);
diff --git a/src/index/coinstatsindex.cpp b/src/index/coinstatsindex.cpp
index 9ab9209ca4..e4ee0e674b 100644
--- a/src/index/coinstatsindex.cpp
+++ b/src/index/coinstatsindex.cpp
@@ -321,7 +321,7 @@ bool CoinStatsIndex::LookUpStats(const CBlockIndex* block_index, CCoinsStats& co
coins_stats.hashSerialized = entry.muhash;
coins_stats.nTransactionOutputs = entry.transaction_output_count;
coins_stats.nBogoSize = entry.bogo_size;
- coins_stats.nTotalAmount = entry.total_amount;
+ coins_stats.total_amount = entry.total_amount;
coins_stats.total_subsidy = entry.total_subsidy;
coins_stats.total_unspendable_amount = entry.total_unspendable_amount;
coins_stats.total_prevout_spent_amount = entry.total_prevout_spent_amount;
diff --git a/src/init.cpp b/src/init.cpp
index b11eb762f2..ab5a53d6e9 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -1404,31 +1404,31 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
uiInterface.InitMessage(_("Loading block index…").translated);
const int64_t load_block_index_start_time = GetTimeMillis();
- std::optional<ChainstateLoadingError> rv;
+ std::optional<ChainstateLoadingError> maybe_load_error;
try {
- rv = LoadChainstate(fReset,
- chainman,
- Assert(node.mempool.get()),
- fPruneMode,
- chainparams.GetConsensus(),
- fReindexChainState,
- cache_sizes.block_tree_db,
- cache_sizes.coins_db,
- cache_sizes.coins,
- false,
- false,
- ShutdownRequested,
- []() {
- uiInterface.ThreadSafeMessageBox(
- _("Error reading from database, shutting down."),
- "", CClientUIInterface::MSG_ERROR);
- });
+ maybe_load_error = LoadChainstate(fReset,
+ chainman,
+ Assert(node.mempool.get()),
+ fPruneMode,
+ chainparams.GetConsensus(),
+ fReindexChainState,
+ cache_sizes.block_tree_db,
+ cache_sizes.coins_db,
+ cache_sizes.coins,
+ /*block_tree_db_in_memory=*/false,
+ /*coins_db_in_memory=*/false,
+ /*shutdown_requested=*/ShutdownRequested,
+ /*coins_error_cb=*/[]() {
+ uiInterface.ThreadSafeMessageBox(
+ _("Error reading from database, shutting down."),
+ "", CClientUIInterface::MSG_ERROR);
+ });
} catch (const std::exception& e) {
LogPrintf("%s\n", e.what());
- rv = ChainstateLoadingError::ERROR_GENERIC_BLOCKDB_OPEN_FAILED;
+ maybe_load_error = ChainstateLoadingError::ERROR_GENERIC_BLOCKDB_OPEN_FAILED;
}
- if (rv.has_value()) {
- switch (rv.value()) {
+ if (maybe_load_error.has_value()) {
+ switch (maybe_load_error.value()) {
case ChainstateLoadingError::ERROR_LOADING_BLOCK_DB:
strLoadError = _("Error loading block database");
break;
@@ -1462,7 +1462,7 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
break;
}
} else {
- std::optional<ChainstateLoadVerifyError> rv2;
+ std::optional<ChainstateLoadVerifyError> maybe_verify_error;
try {
uiInterface.InitMessage(_("Verifying blocks…").translated);
auto check_blocks = args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS);
@@ -1470,19 +1470,19 @@ bool AppInitMain(NodeContext& node, interfaces::BlockAndHeaderTipInfo* tip_info)
LogPrintf("Prune: pruned datadir may not have more than %d blocks; only checking available blocks\n",
MIN_BLOCKS_TO_KEEP);
}
- rv2 = VerifyLoadedChainstate(chainman,
- fReset,
- fReindexChainState,
- chainparams.GetConsensus(),
- check_blocks,
- args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL),
- static_cast<int64_t(*)()>(GetTime));
+ maybe_verify_error = VerifyLoadedChainstate(chainman,
+ fReset,
+ fReindexChainState,
+ chainparams.GetConsensus(),
+ check_blocks,
+ args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL),
+ /*get_unix_time_seconds=*/static_cast<int64_t(*)()>(GetTime));
} catch (const std::exception& e) {
LogPrintf("%s\n", e.what());
- rv2 = ChainstateLoadVerifyError::ERROR_GENERIC_FAILURE;
+ maybe_verify_error = ChainstateLoadVerifyError::ERROR_GENERIC_FAILURE;
}
- if (rv2.has_value()) {
- switch (rv2.value()) {
+ if (maybe_verify_error.has_value()) {
+ switch (maybe_verify_error.value()) {
case ChainstateLoadVerifyError::ERROR_BLOCK_FROM_FUTURE:
strLoadError = _("The block database contains a block which appears to be from the future. "
"This may be due to your computer's date and time being set incorrectly. "
diff --git a/src/net.cpp b/src/net.cpp
index 019e77fd7a..89a4aee5d9 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -1099,10 +1099,10 @@ bool CConnman::AttemptToEvictConnection()
void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
struct sockaddr_storage sockaddr;
socklen_t len = sizeof(sockaddr);
- SOCKET hSocket = accept(hListenSocket.socket, (struct sockaddr*)&sockaddr, &len);
+ auto sock = hListenSocket.sock->Accept((struct sockaddr*)&sockaddr, &len);
CAddress addr;
- if (hSocket == INVALID_SOCKET) {
+ if (!sock) {
const int nErr = WSAGetLastError();
if (nErr != WSAEWOULDBLOCK) {
LogPrintf("socket error accept failed: %s\n", NetworkErrorString(nErr));
@@ -1116,15 +1116,15 @@ void CConnman::AcceptConnection(const ListenSocket& hListenSocket) {
addr = CAddress{MaybeFlipIPv6toCJDNS(addr), NODE_NONE};
}
- const CAddress addr_bind{MaybeFlipIPv6toCJDNS(GetBindAddress(hSocket)), NODE_NONE};
+ const CAddress addr_bind{MaybeFlipIPv6toCJDNS(GetBindAddress(sock->Get())), NODE_NONE};
NetPermissionFlags permissionFlags = NetPermissionFlags::None;
hListenSocket.AddSocketPermissionFlags(permissionFlags);
- CreateNodeFromAcceptedSocket(hSocket, permissionFlags, addr_bind, addr);
+ CreateNodeFromAcceptedSocket(std::move(sock), permissionFlags, addr_bind, addr);
}
-void CConnman::CreateNodeFromAcceptedSocket(SOCKET hSocket,
+void CConnman::CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
NetPermissionFlags permissionFlags,
const CAddress& addr_bind,
const CAddress& addr)
@@ -1150,27 +1150,24 @@ void CConnman::CreateNodeFromAcceptedSocket(SOCKET hSocket,
if (!fNetworkActive) {
LogPrint(BCLog::NET, "connection from %s dropped: not accepting new connections\n", addr.ToString());
- CloseSocket(hSocket);
return;
}
- if (!IsSelectableSocket(hSocket))
+ if (!IsSelectableSocket(sock->Get()))
{
LogPrintf("connection from %s dropped: non-selectable socket\n", addr.ToString());
- CloseSocket(hSocket);
return;
}
// According to the internet TCP_NODELAY is not carried into accepted sockets
// on all platforms. Set it again here just to be sure.
- SetSocketNoDelay(hSocket);
+ SetSocketNoDelay(sock->Get());
// Don't accept connections from banned peers.
bool banned = m_banman && m_banman->IsBanned(addr);
if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::NoBan) && banned)
{
LogPrint(BCLog::NET, "connection from %s dropped (banned)\n", addr.ToString());
- CloseSocket(hSocket);
return;
}
@@ -1179,7 +1176,6 @@ void CConnman::CreateNodeFromAcceptedSocket(SOCKET hSocket,
if (!NetPermissions::HasFlag(permissionFlags, NetPermissionFlags::NoBan) && nInbound + 1 >= nMaxInbound && discouraged)
{
LogPrint(BCLog::NET, "connection from %s dropped (discouraged)\n", addr.ToString());
- CloseSocket(hSocket);
return;
}
@@ -1188,7 +1184,6 @@ void CConnman::CreateNodeFromAcceptedSocket(SOCKET hSocket,
if (!AttemptToEvictConnection()) {
// No connection to evict, disconnect the new connection
LogPrint(BCLog::NET, "failed to find an eviction candidate - connection dropped (full)\n");
- CloseSocket(hSocket);
return;
}
}
@@ -1202,7 +1197,7 @@ void CConnman::CreateNodeFromAcceptedSocket(SOCKET hSocket,
}
const bool inbound_onion = std::find(m_onion_binds.begin(), m_onion_binds.end(), addr_bind) != m_onion_binds.end();
- CNode* pnode = new CNode(id, nodeServices, hSocket, addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", ConnectionType::INBOUND, inbound_onion);
+ CNode* pnode = new CNode(id, nodeServices, sock->Release(), addr, CalculateKeyedNetGroup(addr), nonce, addr_bind, "", ConnectionType::INBOUND, inbound_onion);
pnode->AddRef();
pnode->m_permissionFlags = permissionFlags;
pnode->m_prefer_evict = discouraged;
@@ -1364,7 +1359,7 @@ bool CConnman::GenerateSelectSet(const std::vector<CNode*>& nodes,
std::set<SOCKET>& error_set)
{
for (const ListenSocket& hListenSocket : vhListenSocket) {
- recv_set.insert(hListenSocket.socket);
+ recv_set.insert(hListenSocket.sock->Get());
}
for (CNode* pnode : nodes) {
@@ -1646,7 +1641,7 @@ void CConnman::SocketHandlerListening(const std::set<SOCKET>& recv_set)
if (interruptNet) {
return;
}
- if (recv_set.count(listen_socket.socket) > 0) {
+ if (recv_set.count(listen_socket.sock->Get()) > 0) {
AcceptConnection(listen_socket);
}
}
@@ -2335,7 +2330,7 @@ void CConnman::ThreadI2PAcceptIncoming()
continue;
}
- CreateNodeFromAcceptedSocket(conn.sock->Release(), NetPermissionFlags::None,
+ CreateNodeFromAcceptedSocket(std::move(conn.sock), NetPermissionFlags::None,
CAddress{conn.me, NODE_NONE}, CAddress{conn.peer, NODE_NONE});
}
}
@@ -2397,7 +2392,7 @@ bool CConnman::BindListenPort(const CService& addrBind, bilingual_str& strError,
return false;
}
- vhListenSocket.push_back(ListenSocket(sock->Release(), permissions));
+ vhListenSocket.emplace_back(std::move(sock), permissions);
return true;
}
@@ -2706,15 +2701,6 @@ void CConnman::StopNodes()
DeleteNode(pnode);
}
- // Close listening sockets.
- for (ListenSocket& hListenSocket : vhListenSocket) {
- if (hListenSocket.socket != INVALID_SOCKET) {
- if (!CloseSocket(hListenSocket.socket)) {
- LogPrintf("CloseSocket(hListenSocket) failed with error %s\n", NetworkErrorString(WSAGetLastError()));
- }
- }
- }
-
for (CNode* pnode : m_nodes_disconnected) {
DeleteNode(pnode);
}
diff --git a/src/net.h b/src/net.h
index 977e6502ce..80fc93a5d0 100644
--- a/src/net.h
+++ b/src/net.h
@@ -25,6 +25,7 @@
#include <threadinterrupt.h>
#include <uint256.h>
#include <util/check.h>
+#include <util/sock.h>
#include <atomic>
#include <condition_variable>
@@ -947,9 +948,13 @@ public:
private:
struct ListenSocket {
public:
- SOCKET socket;
+ std::shared_ptr<Sock> sock;
inline void AddSocketPermissionFlags(NetPermissionFlags& flags) const { NetPermissions::AddFlag(flags, m_permissions); }
- ListenSocket(SOCKET socket_, NetPermissionFlags permissions_) : socket(socket_), m_permissions(permissions_) {}
+ ListenSocket(std::shared_ptr<Sock> sock_, NetPermissionFlags permissions_)
+ : sock{sock_}, m_permissions{permissions_}
+ {
+ }
+
private:
NetPermissionFlags m_permissions;
};
@@ -969,12 +974,12 @@ private:
/**
* Create a `CNode` object from a socket that has just been accepted and add the node to
* the `m_nodes` member.
- * @param[in] hSocket Connected socket to communicate with the peer.
+ * @param[in] sock Connected socket to communicate with the peer.
* @param[in] permissionFlags The peer's permissions.
* @param[in] addr_bind The address and port at our side of the connection.
* @param[in] addr The address and port at the peer's side of the connection.
*/
- void CreateNodeFromAcceptedSocket(SOCKET hSocket,
+ void CreateNodeFromAcceptedSocket(std::unique_ptr<Sock>&& sock,
NetPermissionFlags permissionFlags,
const CAddress& addr_bind,
const CAddress& addr);
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 51aa498fb0..43843624a1 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -57,14 +57,14 @@ static constexpr auto HEADERS_DOWNLOAD_TIMEOUT_PER_HEADER = 1ms;
* behind headers chain.
*/
static constexpr int32_t MAX_OUTBOUND_PEERS_TO_PROTECT_FROM_DISCONNECT = 4;
-/** Timeout for (unprotected) outbound peers to sync to our chainwork, in seconds */
-static constexpr int64_t CHAIN_SYNC_TIMEOUT = 20 * 60; // 20 minutes
+/** Timeout for (unprotected) outbound peers to sync to our chainwork */
+static constexpr auto CHAIN_SYNC_TIMEOUT{20min};
/** How frequently to check for stale tips */
static constexpr auto STALE_CHECK_INTERVAL{10min};
/** How frequently to check for extra outbound peers and disconnect */
static constexpr auto EXTRA_PEER_CHECK_INTERVAL{45s};
/** Minimum time an outbound-peer-eviction candidate must be connected for, in order to evict */
-static constexpr std::chrono::seconds MINIMUM_CONNECT_TIME{30};
+static constexpr auto MINIMUM_CONNECT_TIME{30s};
/** SHA256("main address relay")[0:8] */
static constexpr uint64_t RANDOMIZER_ID_ADDRESS_RELAY = 0x3cac0035b5866b90ULL;
/// Age after which a stale block will no longer be served if requested as
@@ -74,7 +74,7 @@ static constexpr int STALE_RELAY_AGE_LIMIT = 30 * 24 * 60 * 60;
/// limiting block relay. Set to one week, denominated in seconds.
static constexpr int HISTORICAL_BLOCK_AGE = 7 * 24 * 60 * 60;
/** Time between pings automatically sent out for latency probing and keepalive */
-static constexpr std::chrono::minutes PING_INTERVAL{2};
+static constexpr auto PING_INTERVAL{2min};
/** The maximum number of entries in a locator */
static const unsigned int MAX_LOCATOR_SZ = 101;
/** The maximum number of entries in an 'inv' protocol message */
@@ -88,19 +88,19 @@ static constexpr int32_t MAX_PEER_TX_REQUEST_IN_FLIGHT = 100;
* the actual transaction (from any peer) in response to requests for them. */
static constexpr int32_t MAX_PEER_TX_ANNOUNCEMENTS = 5000;
/** How long to delay requesting transactions via txids, if we have wtxid-relaying peers */
-static constexpr auto TXID_RELAY_DELAY = std::chrono::seconds{2};
+static constexpr auto TXID_RELAY_DELAY{2s};
/** How long to delay requesting transactions from non-preferred peers */
-static constexpr auto NONPREF_PEER_TX_DELAY = std::chrono::seconds{2};
+static constexpr auto NONPREF_PEER_TX_DELAY{2s};
/** How long to delay requesting transactions from overloaded peers (see MAX_PEER_TX_REQUEST_IN_FLIGHT). */
-static constexpr auto OVERLOADED_PEER_TX_DELAY = std::chrono::seconds{2};
-/** How long to wait (in microseconds) before downloading a transaction from an additional peer */
-static constexpr std::chrono::microseconds GETDATA_TX_INTERVAL{std::chrono::seconds{60}};
+static constexpr auto OVERLOADED_PEER_TX_DELAY{2s};
+/** How long to wait before downloading a transaction from an additional peer */
+static constexpr auto GETDATA_TX_INTERVAL{60s};
/** Limit to avoid sending big packets. Not used in processing incoming GETDATA for compatibility */
static const unsigned int MAX_GETDATA_SZ = 1000;
/** Number of blocks that can be requested at any given time from a single peer. */
static const int MAX_BLOCKS_IN_TRANSIT_PER_PEER = 16;
/** Time during which a peer must stall block download progress before being disconnected. */
-static constexpr auto BLOCK_STALLING_TIMEOUT = 2s;
+static constexpr auto BLOCK_STALLING_TIMEOUT{2s};
/** Number of headers sent in one getheaders result. We rely on the assumption that if a peer sends
* less than this number, we reached its tip. Changing this value is a protocol upgrade. */
static const unsigned int MAX_HEADERS_RESULTS = 2000;
@@ -125,16 +125,16 @@ static const int MAX_UNCONNECTING_HEADERS = 10;
/** Minimum blocks required to signal NODE_NETWORK_LIMITED */
static const unsigned int NODE_NETWORK_LIMITED_MIN_BLOCKS = 288;
/** Average delay between local address broadcasts */
-static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL = 24h;
+static constexpr auto AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL{24h};
/** Average delay between peer address broadcasts */
-static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL = 30s;
+static constexpr auto AVG_ADDRESS_BROADCAST_INTERVAL{30s};
/** Average delay between trickled inventory transmissions for inbound peers.
* Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */
-static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL = 5s;
+static constexpr auto INBOUND_INVENTORY_BROADCAST_INTERVAL{5s};
/** Average delay between trickled inventory transmissions for outbound peers.
* Use a smaller delay as there is less privacy concern for them.
* Blocks and peers with NetPermissionFlags::NoBan permission bypass this. */
-static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL = 2s;
+static constexpr auto OUTBOUND_INVENTORY_BROADCAST_INTERVAL{2s};
/** Maximum rate of inventory items to send per second.
* Limits the impact of low-fee transaction floods. */
static constexpr unsigned int INVENTORY_BROADCAST_PER_SECOND = 7;
@@ -148,9 +148,9 @@ static constexpr unsigned int INVENTORY_MAX_RECENT_RELAY = 3500;
* peers, and random variations in the broadcast mechanism. */
static_assert(INVENTORY_MAX_RECENT_RELAY >= INVENTORY_BROADCAST_PER_SECOND * UNCONDITIONAL_RELAY_DELAY / std::chrono::seconds{1}, "INVENTORY_RELAY_MAX too low");
/** Average delay between feefilter broadcasts in seconds. */
-static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL = 10min;
+static constexpr auto AVG_FEEFILTER_BROADCAST_INTERVAL{10min};
/** Maximum feefilter broadcast delay after significant change. */
-static constexpr auto MAX_FEEFILTER_CHANGE_DELAY = 5min;
+static constexpr auto MAX_FEEFILTER_CHANGE_DELAY{5min};
/** Maximum number of compact filters that may be requested with one getcfilters. See BIP 157. */
static constexpr uint32_t MAX_GETCFILTERS_SIZE = 1000;
/** Maximum number of cf hashes that may be requested with one getcfheaders. See BIP 157. */
@@ -329,7 +329,7 @@ private:
EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** Consider evicting an outbound peer based on the amount of time they've been behind our tip */
- void ConsiderEviction(CNode& pto, int64_t time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ void ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
/** If we have extra outbound peers, try to disconnect the one with the oldest block announcement */
void EvictExtraOutboundPeers(std::chrono::seconds now) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
@@ -727,7 +727,7 @@ struct CNodeState {
* - its chain tip has at least as much work as ours
*
* CHAIN_SYNC_TIMEOUT: if a peer's best known block has less work than our tip,
- * set a timeout CHAIN_SYNC_TIMEOUT seconds in the future:
+ * set a timeout CHAIN_SYNC_TIMEOUT in the future:
* - If at timeout their best known block now has more work than our tip
* when the timeout was set, then either reset the timeout or clear it
* (after comparing against our current tip's work)
@@ -742,7 +742,7 @@ struct CNodeState {
*/
struct ChainSyncTimeoutState {
//! A timeout used for checking whether our peer has sufficiently synced
- int64_t m_timeout{0};
+ std::chrono::seconds m_timeout{0s};
//! A header with the work we require on our peer's chain
const CBlockIndex* m_work_header{nullptr};
//! After timeout is reached, set to true after sending getheaders
@@ -949,10 +949,10 @@ bool PeerManagerImpl::TipMayBeStale()
{
AssertLockHeld(cs_main);
const Consensus::Params& consensusParams = m_chainparams.GetConsensus();
- if (count_seconds(m_last_tip_update) == 0) {
+ if (m_last_tip_update.load() == 0s) {
m_last_tip_update = GetTime<std::chrono::seconds>();
}
- return count_seconds(m_last_tip_update) < GetTime() - consensusParams.nPowTargetSpacing * 3 && mapBlocksInFlight.empty();
+ return m_last_tip_update.load() < GetTime<std::chrono::seconds>() - std::chrono::seconds{consensusParams.nPowTargetSpacing * 3} && mapBlocksInFlight.empty();
}
bool PeerManagerImpl::CanDirectFetch()
@@ -1137,7 +1137,7 @@ void PeerManagerImpl::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid,
// - TXID_RELAY_DELAY for txid announcements while wtxid peers are available
// - OVERLOADED_PEER_TX_DELAY for announcements from peers which have at least
// MAX_PEER_TX_REQUEST_IN_FLIGHT requests in flight (and don't have NetPermissionFlags::Relay).
- auto delay = std::chrono::microseconds{0};
+ auto delay{0us};
const bool preferred = state->fPreferredDownload;
if (!preferred) delay += NONPREF_PEER_TX_DELAY;
if (!gtxid.IsWtxid() && m_wtxid_relay_peers > 0) delay += TXID_RELAY_DELAY;
@@ -1191,7 +1191,7 @@ void PeerManagerImpl::ReattemptInitialBroadcast(CScheduler& scheduler)
// Schedule next run for 10-15 minutes in the future.
// We add randomness on every cycle to avoid the possibility of P2P fingerprinting.
- const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
+ const std::chrono::milliseconds delta = 10min + GetRandMillis(5min);
scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
}
@@ -1296,7 +1296,7 @@ bool PeerManagerImpl::GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats) c
// since pingtime does not update until the ping is complete, which might take a while.
// So, if a ping is taking an unusually long time in flight,
// the caller can immediately detect that this is happening.
- std::chrono::microseconds ping_wait{0};
+ auto ping_wait{0us};
if ((0 != peer->m_ping_nonce_sent) && (0 != peer->m_ping_start.load().count())) {
ping_wait = GetTime<std::chrono::microseconds>() - peer->m_ping_start.load();
}
@@ -1496,7 +1496,7 @@ void PeerManagerImpl::StartScheduledTasks(CScheduler& scheduler)
scheduler.scheduleEvery([this] { this->CheckForStaleTipAndEvictPeers(); }, std::chrono::seconds{EXTRA_PEER_CHECK_INTERVAL});
// schedule next run for 10-15 minutes in the future
- const std::chrono::milliseconds delta = std::chrono::minutes{10} + GetRandMillis(std::chrono::minutes{5});
+ const std::chrono::milliseconds delta = 10min + GetRandMillis(5min);
scheduler.scheduleFromNow([&] { ReattemptInitialBroadcast(scheduler); }, delta);
}
@@ -1746,8 +1746,8 @@ void PeerManagerImpl::RelayAddress(NodeId originator,
// Relay to a limited number of other nodes
// Use deterministic randomness to send to the same nodes for 24 hours
// at a time so the m_addr_knowns of the chosen nodes prevent repeats
- uint64_t hashAddr = addr.GetHash();
- const CSipHasher hasher = m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr << 32).Write((GetTime() + hashAddr) / (24 * 60 * 60));
+ const uint64_t hashAddr{addr.GetHash()};
+ const CSipHasher hasher{m_connman.GetDeterministicRandomizer(RANDOMIZER_ID_ADDRESS_RELAY).Write(hashAddr).Write((GetTime() + hashAddr) / (24 * 60 * 60))};
FastRandomContext insecure_rand;
// Relay reachable addresses to 2 peers. Unreachable addresses are relayed randomly to 1 or 2 peers.
@@ -1963,10 +1963,9 @@ void PeerManagerImpl::ProcessGetData(CNode& pfrom, Peer& peer, const std::atomic
std::vector<CInv> vNotFound;
const CNetMsgMaker msgMaker(pfrom.GetCommonVersion());
- const std::chrono::seconds now = GetTime<std::chrono::seconds>();
+ const auto now{GetTime<std::chrono::seconds>()};
// Get last mempool request time
- const std::chrono::seconds mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load()
- : std::chrono::seconds::min();
+ const auto mempool_req = pfrom.m_tx_relay != nullptr ? pfrom.m_tx_relay->m_last_mempool_req.load() : std::chrono::seconds::min();
// Process as many TX items from the front of the getdata queue as
// possible, since they're common and it's efficient to batch process
@@ -2890,7 +2889,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
int64_t nSince = nNow - 10 * 60;
// Update/increment addr rate limiting bucket.
- const auto current_time = GetTime<std::chrono::microseconds>();
+ const auto current_time{GetTime<std::chrono::microseconds>()};
if (peer->m_addr_token_bucket < MAX_ADDR_PROCESSING_TOKEN_BUCKET) {
// Don't increment bucket if it's already full
const auto time_diff = std::max(current_time - peer->m_addr_token_timestamp, 0us);
@@ -2976,7 +2975,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
LOCK(cs_main);
- const auto current_time = GetTime<std::chrono::microseconds>();
+ const auto current_time{GetTime<std::chrono::microseconds>()};
uint256* best_block{nullptr};
for (CInv& inv : vInv) {
@@ -3354,7 +3353,7 @@ void PeerManagerImpl::ProcessMessage(CNode& pfrom, const std::string& msg_type,
}
}
if (!fRejectedParents) {
- const auto current_time = GetTime<std::chrono::microseconds>();
+ const auto current_time{GetTime<std::chrono::microseconds>()};
for (const uint256& parent_txid : unique_parents) {
// Here, we only have the txid (and not wtxid) of the
@@ -4180,7 +4179,7 @@ bool PeerManagerImpl::ProcessMessages(CNode* pfrom, std::atomic<bool>& interrupt
return fMoreWork;
}
-void PeerManagerImpl::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
+void PeerManagerImpl::ConsiderEviction(CNode& pto, std::chrono::seconds time_in_seconds)
{
AssertLockHeld(cs_main);
@@ -4195,12 +4194,12 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
// unless it's invalid, in which case we should find that out and
// disconnect from them elsewhere).
if (state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= m_chainman.ActiveChain().Tip()->nChainWork) {
- if (state.m_chain_sync.m_timeout != 0) {
- state.m_chain_sync.m_timeout = 0;
+ if (state.m_chain_sync.m_timeout != 0s) {
+ state.m_chain_sync.m_timeout = 0s;
state.m_chain_sync.m_work_header = nullptr;
state.m_chain_sync.m_sent_getheaders = false;
}
- } else if (state.m_chain_sync.m_timeout == 0 || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
+ } else if (state.m_chain_sync.m_timeout == 0s || (state.m_chain_sync.m_work_header != nullptr && state.pindexBestKnownBlock != nullptr && state.pindexBestKnownBlock->nChainWork >= state.m_chain_sync.m_work_header->nChainWork)) {
// Our best block known by this peer is behind our tip, and we're either noticing
// that for the first time, OR this peer was able to catch up to some earlier point
// where we checked against our tip.
@@ -4208,7 +4207,7 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
state.m_chain_sync.m_timeout = time_in_seconds + CHAIN_SYNC_TIMEOUT;
state.m_chain_sync.m_work_header = m_chainman.ActiveChain().Tip();
state.m_chain_sync.m_sent_getheaders = false;
- } else if (state.m_chain_sync.m_timeout > 0 && time_in_seconds > state.m_chain_sync.m_timeout) {
+ } else if (state.m_chain_sync.m_timeout > 0s && time_in_seconds > state.m_chain_sync.m_timeout) {
// No evidence yet that our peer has synced to a chain with work equal to that
// of our tip, when we first detected it was behind. Send a single getheaders
// message to give the peer a chance to update us.
@@ -4221,7 +4220,7 @@ void PeerManagerImpl::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
LogPrint(BCLog::NET, "sending getheaders to outbound peer=%d to verify chain work (current best known block:%s, benchmark blockhash: %s)\n", pto.GetId(), state.pindexBestKnownBlock != nullptr ? state.pindexBestKnownBlock->GetBlockHash().ToString() : "<none>", state.m_chain_sync.m_work_header->GetBlockHash().ToString());
m_connman.PushMessage(&pto, msgMaker.Make(NetMsgType::GETHEADERS, m_chainman.ActiveChain().GetLocator(state.m_chain_sync.m_work_header->pprev), uint256()));
state.m_chain_sync.m_sent_getheaders = true;
- constexpr int64_t HEADERS_RESPONSE_TIME = 120; // 2 minutes
+ constexpr auto HEADERS_RESPONSE_TIME{2min};
// Bump the timeout to allow a response, which could clear the timeout
// (if the response shows the peer has synced), reset the timeout (if
// the peer syncs to the required work but not to our tip), or result
@@ -4348,7 +4347,8 @@ void PeerManagerImpl::CheckForStaleTipAndEvictPeers()
// Check whether our tip is stale, and if so, allow using an extra
// outbound peer
if (!fImporting && !fReindex && m_connman.GetNetworkActive() && m_connman.GetUseAddrmanOutgoing() && TipMayBeStale()) {
- LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n", count_seconds(now) - count_seconds(m_last_tip_update));
+ LogPrintf("Potential stale tip detected, will try using extra outbound peer (last tip update: %d seconds ago)\n",
+ count_seconds(now - m_last_tip_update.load()));
m_connman.SetTryNewOutboundPeer(true);
} else if (m_connman.GetTryNewOutboundPeer()) {
m_connman.SetTryNewOutboundPeer(false);
@@ -4570,7 +4570,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// If we get here, the outgoing message serialization version is set and can't change.
const CNetMsgMaker msgMaker(pto->GetCommonVersion());
- const auto current_time = GetTime<std::chrono::microseconds>();
+ const auto current_time{GetTime<std::chrono::microseconds>()};
if (pto->IsAddrFetchConn() && current_time - pto->m_connected > 10 * AVG_ADDRESS_BROADCAST_INTERVAL) {
LogPrint(BCLog::NET, "addrfetch connection timeout; disconnecting peer=%d\n", pto->GetId());
@@ -4969,7 +4969,7 @@ bool PeerManagerImpl::SendMessages(CNode* pto)
// Check that outbound peers have reasonable chains
// GetTime() is used by this anti-DoS logic so we can test this using mocktime
- ConsiderEviction(*pto, GetTime());
+ ConsiderEviction(*pto, GetTime<std::chrono::seconds>());
//
// Message: getdata (blocks)
diff --git a/src/node/blockstorage.cpp b/src/node/blockstorage.cpp
index 53bc2b5069..e9f78c2870 100644
--- a/src/node/blockstorage.cpp
+++ b/src/node/blockstorage.cpp
@@ -12,6 +12,7 @@
#include <fs.h>
#include <hash.h>
#include <pow.h>
+#include <reverse_iterator.h>
#include <shutdown.h>
#include <signet.h>
#include <streams.h>
@@ -47,6 +48,375 @@ static FILE* OpenUndoFile(const FlatFilePos& pos, bool fReadOnly = false);
static FlatFileSeq BlockFileSeq();
static FlatFileSeq UndoFileSeq();
+CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
+{
+ AssertLockHeld(cs_main);
+ BlockMap::const_iterator it = m_block_index.find(hash);
+ return it == m_block_index.end() ? nullptr : it->second;
+}
+
+CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
+{
+ AssertLockHeld(cs_main);
+
+ // Check for duplicate
+ uint256 hash = block.GetHash();
+ BlockMap::iterator it = m_block_index.find(hash);
+ if (it != m_block_index.end()) {
+ return it->second;
+ }
+
+ // Construct new block index object
+ CBlockIndex* pindexNew = new CBlockIndex(block);
+ // We assign the sequence id to blocks only when the full data is available,
+ // to avoid miners withholding blocks but broadcasting headers, to get a
+ // competitive advantage.
+ pindexNew->nSequenceId = 0;
+ BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
+ pindexNew->phashBlock = &((*mi).first);
+ BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
+ if (miPrev != m_block_index.end()) {
+ pindexNew->pprev = (*miPrev).second;
+ pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
+ pindexNew->BuildSkip();
+ }
+ pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
+ pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
+ pindexNew->RaiseValidity(BLOCK_VALID_TREE);
+ if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
+ pindexBestHeader = pindexNew;
+
+ setDirtyBlockIndex.insert(pindexNew);
+
+ return pindexNew;
+}
+
+void BlockManager::PruneOneBlockFile(const int fileNumber)
+{
+ AssertLockHeld(cs_main);
+ LOCK(cs_LastBlockFile);
+
+ for (const auto& entry : m_block_index) {
+ CBlockIndex* pindex = entry.second;
+ if (pindex->nFile == fileNumber) {
+ pindex->nStatus &= ~BLOCK_HAVE_DATA;
+ pindex->nStatus &= ~BLOCK_HAVE_UNDO;
+ pindex->nFile = 0;
+ pindex->nDataPos = 0;
+ pindex->nUndoPos = 0;
+ setDirtyBlockIndex.insert(pindex);
+
+ // Prune from m_blocks_unlinked -- any block we prune would have
+ // to be downloaded again in order to consider its chain, at which
+ // point it would be considered as a candidate for
+ // m_blocks_unlinked or setBlockIndexCandidates.
+ auto range = m_blocks_unlinked.equal_range(pindex->pprev);
+ while (range.first != range.second) {
+ std::multimap<CBlockIndex*, CBlockIndex*>::iterator _it = range.first;
+ range.first++;
+ if (_it->second == pindex) {
+ m_blocks_unlinked.erase(_it);
+ }
+ }
+ }
+ }
+
+ vinfoBlockFile[fileNumber].SetNull();
+ setDirtyFileInfo.insert(fileNumber);
+}
+
+void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
+{
+ assert(fPruneMode && nManualPruneHeight > 0);
+
+ LOCK2(cs_main, cs_LastBlockFile);
+ if (chain_tip_height < 0) {
+ return;
+ }
+
+ // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
+ unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
+ int count = 0;
+ for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
+ if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
+ continue;
+ }
+ PruneOneBlockFile(fileNumber);
+ setFilesToPrune.insert(fileNumber);
+ count++;
+ }
+ LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
+}
+
+void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
+{
+ LOCK2(cs_main, cs_LastBlockFile);
+ if (chain_tip_height < 0 || nPruneTarget == 0) {
+ return;
+ }
+ if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
+ return;
+ }
+
+ unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))};
+ uint64_t nCurrentUsage = CalculateCurrentUsage();
+ // We don't check to prune until after we've allocated new space for files
+ // So we should leave a buffer under our target to account for another allocation
+ // before the next pruning.
+ uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
+ uint64_t nBytesToPrune;
+ int count = 0;
+
+ if (nCurrentUsage + nBuffer >= nPruneTarget) {
+ // On a prune event, the chainstate DB is flushed.
+ // To avoid excessive prune events negating the benefit of high dbcache
+ // values, we should not prune too rapidly.
+ // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
+ if (is_ibd) {
+ // Since this is only relevant during IBD, we use a fixed 10%
+ nBuffer += nPruneTarget / 10;
+ }
+
+ for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
+ nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
+
+ if (vinfoBlockFile[fileNumber].nSize == 0) {
+ continue;
+ }
+
+ if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
+ break;
+ }
+
+ // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
+ if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
+ continue;
+ }
+
+ PruneOneBlockFile(fileNumber);
+ // Queue up the files for removal
+ setFilesToPrune.insert(fileNumber);
+ nCurrentUsage -= nBytesToPrune;
+ count++;
+ }
+ }
+
+ LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
+ nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
+ ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
+ nLastBlockWeCanPrune, count);
+}
+
+CBlockIndex* BlockManager::InsertBlockIndex(const uint256& hash)
+{
+ AssertLockHeld(cs_main);
+
+ if (hash.IsNull()) {
+ return nullptr;
+ }
+
+ // Return existing
+ BlockMap::iterator mi = m_block_index.find(hash);
+ if (mi != m_block_index.end()) {
+ return (*mi).second;
+ }
+
+ // Create new
+ CBlockIndex* pindexNew = new CBlockIndex();
+ mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
+ pindexNew->phashBlock = &((*mi).first);
+
+ return pindexNew;
+}
+
+bool BlockManager::LoadBlockIndex(
+ const Consensus::Params& consensus_params,
+ ChainstateManager& chainman)
+{
+ if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
+ return false;
+ }
+
+ // Calculate nChainWork
+ std::vector<std::pair<int, CBlockIndex*>> vSortedByHeight;
+ vSortedByHeight.reserve(m_block_index.size());
+ for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
+ CBlockIndex* pindex = item.second;
+ vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
+ }
+ sort(vSortedByHeight.begin(), vSortedByHeight.end());
+
+ // Find start of assumed-valid region.
+ int first_assumed_valid_height = std::numeric_limits<int>::max();
+
+ for (const auto& [height, block] : vSortedByHeight) {
+ if (block->IsAssumedValid()) {
+ auto chainstates = chainman.GetAll();
+
+ // If we encounter an assumed-valid block index entry, ensure that we have
+ // one chainstate that tolerates assumed-valid entries and another that does
+ // not (i.e. the background validation chainstate), since assumed-valid
+ // entries should always be pending validation by a fully-validated chainstate.
+ auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
+ assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
+ assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
+
+ first_assumed_valid_height = height;
+ break;
+ }
+ }
+
+ for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight) {
+ if (ShutdownRequested()) return false;
+ CBlockIndex* pindex = item.second;
+ pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
+ pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
+
+ // We can link the chain of blocks for which we've received transactions at some point, or
+ // blocks that are assumed-valid on the basis of snapshot load (see
+ // PopulateAndValidateSnapshot()).
+ // Pruned nodes may have deleted the block.
+ if (pindex->nTx > 0) {
+ if (pindex->pprev) {
+ if (pindex->pprev->nChainTx > 0) {
+ pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
+ } else {
+ pindex->nChainTx = 0;
+ m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
+ }
+ } else {
+ pindex->nChainTx = pindex->nTx;
+ }
+ }
+ if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
+ pindex->nStatus |= BLOCK_FAILED_CHILD;
+ setDirtyBlockIndex.insert(pindex);
+ }
+ if (pindex->IsAssumedValid() ||
+ (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
+ (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
+
+ // Fill each chainstate's block candidate set. Only add assumed-valid
+ // blocks to the tip candidate set if the chainstate is allowed to rely on
+ // assumed-valid blocks.
+ //
+ // If all setBlockIndexCandidates contained the assumed-valid blocks, the
+ // background chainstate's ActivateBestChain() call would add assumed-valid
+ // blocks to the chain (based on how FindMostWorkChain() works). Obviously
+ // we don't want this since the purpose of the background validation chain
+ // is to validate assued-valid blocks.
+ //
+ // Note: This is considering all blocks whose height is greater or equal to
+ // the first assumed-valid block to be assumed-valid blocks, and excluding
+ // them from the background chainstate's setBlockIndexCandidates set. This
+ // does mean that some blocks which are not technically assumed-valid
+ // (later blocks on a fork beginning before the first assumed-valid block)
+ // might not get added to the the background chainstate, but this is ok,
+ // because they will still be attached to the active chainstate if they
+ // actually contain more work.
+ //
+ // Instead of this height-based approach, an earlier attempt was made at
+ // detecting "holistically" whether the block index under consideration
+ // relied on an assumed-valid ancestor, but this proved to be too slow to
+ // be practical.
+ for (CChainState* chainstate : chainman.GetAll()) {
+ if (chainstate->reliesOnAssumedValid() ||
+ pindex->nHeight < first_assumed_valid_height) {
+ chainstate->setBlockIndexCandidates.insert(pindex);
+ }
+ }
+ }
+ if (pindex->nStatus & BLOCK_FAILED_MASK && (!chainman.m_best_invalid || pindex->nChainWork > chainman.m_best_invalid->nChainWork)) {
+ chainman.m_best_invalid = pindex;
+ }
+ if (pindex->pprev) {
+ pindex->BuildSkip();
+ }
+ if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
+ pindexBestHeader = pindex;
+ }
+
+ return true;
+}
+
+void BlockManager::Unload()
+{
+ m_blocks_unlinked.clear();
+
+ for (const BlockMap::value_type& entry : m_block_index) {
+ delete entry.second;
+ }
+
+ m_block_index.clear();
+}
+
+bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
+{
+ if (!LoadBlockIndex(::Params().GetConsensus(), chainman)) {
+ return false;
+ }
+
+ // Load block file info
+ m_block_tree_db->ReadLastBlockFile(nLastBlockFile);
+ vinfoBlockFile.resize(nLastBlockFile + 1);
+ LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
+ for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
+ m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
+ }
+ LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
+ for (int nFile = nLastBlockFile + 1; true; nFile++) {
+ CBlockFileInfo info;
+ if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
+ vinfoBlockFile.push_back(info);
+ } else {
+ break;
+ }
+ }
+
+ // Check presence of blk files
+ LogPrintf("Checking all blk files are present...\n");
+ std::set<int> setBlkDataFiles;
+ for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
+ CBlockIndex* pindex = item.second;
+ if (pindex->nStatus & BLOCK_HAVE_DATA) {
+ setBlkDataFiles.insert(pindex->nFile);
+ }
+ }
+ for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++) {
+ FlatFilePos pos(*it, 0);
+ if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
+ return false;
+ }
+ }
+
+ // Check whether we have ever pruned block & undo files
+ m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
+ if (fHavePruned) {
+ LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
+ }
+
+ // Check whether we need to continue reindexing
+ bool fReindexing = false;
+ m_block_tree_db->ReadReindexing(fReindexing);
+ if (fReindexing) fReindex = true;
+
+ return true;
+}
+
+CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
+{
+ const MapCheckpoints& checkpoints = data.mapCheckpoints;
+
+ for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints)) {
+ const uint256& hash = i.second;
+ CBlockIndex* pindex = LookupBlockIndex(hash);
+ if (pindex) {
+ return pindex;
+ }
+ }
+ return nullptr;
+}
+
bool IsBlockPruned(const CBlockIndex* pblockindex)
{
return (fHavePruned && !(pblockindex->nStatus & BLOCK_HAVE_DATA) && pblockindex->nTx > 0);
diff --git a/src/node/blockstorage.h b/src/node/blockstorage.h
index 7c7bf68178..a18203f48d 100644
--- a/src/node/blockstorage.h
+++ b/src/node/blockstorage.h
@@ -7,6 +7,7 @@
#include <fs.h>
#include <protocol.h> // For CMessageHeader::MessageStartChars
+#include <txdb.h>
#include <atomic>
#include <cstdint>
@@ -20,7 +21,9 @@ class CBlockIndex;
class CBlockUndo;
class CChain;
class CChainParams;
+class CChainState;
class ChainstateManager;
+struct CCheckpointData;
struct FlatFilePos;
namespace Consensus {
struct Params;
@@ -45,6 +48,87 @@ extern bool fPruneMode;
/** Number of MiB of block files that we're trying to stay below. */
extern uint64_t nPruneTarget;
+typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
+
+struct CBlockIndexWorkComparator {
+ bool operator()(const CBlockIndex* pa, const CBlockIndex* pb) const;
+};
+
+/**
+ * Maintains a tree of blocks (stored in `m_block_index`) which is consulted
+ * to determine where the most-work tip is.
+ *
+ * This data is used mostly in `CChainState` - information about, e.g.,
+ * candidate tips is not maintained here.
+ */
+class BlockManager
+{
+ friend CChainState;
+
+private:
+ /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
+ void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height);
+
+ /**
+ * Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a user-defined target.
+ * The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
+ * space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
+ * (which in this case means the blockchain must be re-downloaded.)
+ *
+ * Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
+ * Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
+ * Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
+ * Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
+ * The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
+ * A db flag records the fact that at least some block files have been pruned.
+ *
+ * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
+ */
+ void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd);
+
+public:
+ BlockMap m_block_index GUARDED_BY(cs_main);
+
+ /**
+ * All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
+ * Pruned nodes may have entries where B is missing data.
+ */
+ std::multimap<CBlockIndex*, CBlockIndex*> m_blocks_unlinked;
+
+ std::unique_ptr<CBlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main);
+
+ bool LoadBlockIndexDB(ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+
+ /**
+ * Load the blocktree off disk and into memory. Populate certain metadata
+ * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
+ * collections like setDirtyBlockIndex.
+ */
+ bool LoadBlockIndex(
+ const Consensus::Params& consensus_params,
+ ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ /** Clear all data members. */
+ void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+ /** Create a new block index entry for a given block hash */
+ CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ //! Mark one block file as pruned (modify associated database entries)
+ void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ CBlockIndex* LookupBlockIndex(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ //! Returns last CBlockIndex* that is a checkpoint
+ CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
+
+ ~BlockManager()
+ {
+ Unload();
+ }
+};
+
//! Check whether the block associated with this index entry is pruned or not.
bool IsBlockPruned(const CBlockIndex* pblockindex);
diff --git a/src/node/chainstate.cpp b/src/node/chainstate.cpp
index abeebad1a6..0274587e17 100644
--- a/src/node/chainstate.cpp
+++ b/src/node/chainstate.cpp
@@ -141,7 +141,7 @@ std::optional<ChainstateLoadVerifyError> VerifyLoadedChainstate(ChainstateManage
for (CChainState* chainstate : chainman.GetAll()) {
if (!is_coinsview_empty(chainstate)) {
const CBlockIndex* tip = chainstate->m_chain.Tip();
- if (tip && tip->nTime > get_unix_time_seconds() + 2 * 60 * 60) {
+ if (tip && tip->nTime > get_unix_time_seconds() + MAX_FUTURE_BLOCK_TIME) {
return ChainstateLoadVerifyError::ERROR_BLOCK_FROM_FUTURE;
}
diff --git a/src/node/chainstate.h b/src/node/chainstate.h
index 11278a0991..4df917a281 100644
--- a/src/node/chainstate.h
+++ b/src/node/chainstate.h
@@ -50,7 +50,7 @@ enum class ChainstateLoadingError {
* this sequence, when we explicitly checked shutdown_requested() at
* arbitrary points, one of those calls returned true". Therefore, a
* return value other than SHUTDOWN_PROBED does not guarantee that
- * shutdown_requested() hasn't been called indirectly.
+ * shutdown hasn't been called indirectly.
* - else
* - Success!
*/
diff --git a/src/node/coinstats.cpp b/src/node/coinstats.cpp
index b538474d8d..68cc65d3ed 100644
--- a/src/node/coinstats.cpp
+++ b/src/node/coinstats.cpp
@@ -11,6 +11,7 @@
#include <index/coinstatsindex.h>
#include <serialize.h>
#include <uint256.h>
+#include <util/overflow.h>
#include <util/system.h>
#include <validation.h>
@@ -82,7 +83,9 @@ static void ApplyStats(CCoinsStats& stats, const uint256& hash, const std::map<u
stats.nTransactions++;
for (auto it = outputs.begin(); it != outputs.end(); ++it) {
stats.nTransactionOutputs++;
- stats.nTotalAmount += it->second.out.nValue;
+ if (stats.total_amount.has_value()) {
+ stats.total_amount = CheckedAdd(*stats.total_amount, it->second.out.nValue);
+ }
stats.nBogoSize += GetBogoSize(it->second.out.scriptPubKey);
}
}
@@ -95,10 +98,8 @@ static bool GetUTXOStats(CCoinsView* view, BlockManager& blockman, CCoinsStats&
assert(pcursor);
if (!pindex) {
- {
- LOCK(cs_main);
- pindex = blockman.LookupBlockIndex(view->GetBestBlock());
- }
+ LOCK(cs_main);
+ pindex = blockman.LookupBlockIndex(view->GetBestBlock());
}
stats.nHeight = Assert(pindex)->nHeight;
stats.hashBlock = pindex->GetBlockHash();
diff --git a/src/node/coinstats.h b/src/node/coinstats.h
index 4a5d17b3fd..3b641200ad 100644
--- a/src/node/coinstats.h
+++ b/src/node/coinstats.h
@@ -24,9 +24,10 @@ enum class CoinStatsHashType {
NONE,
};
-struct CCoinsStats
-{
- CoinStatsHashType m_hash_type;
+struct CCoinsStats {
+ //! Which hash type to use
+ const CoinStatsHashType m_hash_type;
+
int nHeight{0};
uint256 hashBlock{};
uint64_t nTransactions{0};
@@ -34,7 +35,8 @@ struct CCoinsStats
uint64_t nBogoSize{0};
uint256 hashSerialized{};
uint64_t nDiskSize{0};
- CAmount nTotalAmount{0};
+ //! The total amount, or nullopt if an overflow occurred calculating it
+ std::optional<CAmount> total_amount{0};
//! The number of coins contained.
uint64_t coins_count{0};
diff --git a/src/qt/android/src/org/bitcoincore/qt/BitcoinQtActivity.java b/src/qt/android/src/org/bitcoincore/qt/BitcoinQtActivity.java
index cf3b4f6668..2cba489242 100644
--- a/src/qt/android/src/org/bitcoincore/qt/BitcoinQtActivity.java
+++ b/src/qt/android/src/org/bitcoincore/qt/BitcoinQtActivity.java
@@ -18,12 +18,6 @@ public class BitcoinQtActivity extends QtActivity
bitcoinDir.mkdir();
}
- try {
- Os.setenv("QT_QPA_PLATFORM", "android", true);
- } catch (ErrnoException e) {
- e.printStackTrace();
- }
-
super.onCreate(savedInstanceState);
}
}
diff --git a/src/qt/forms/receivecoinsdialog.ui b/src/qt/forms/receivecoinsdialog.ui
index 4b2c3ed434..7590dd524d 100644
--- a/src/qt/forms/receivecoinsdialog.ui
+++ b/src/qt/forms/receivecoinsdialog.ui
@@ -302,7 +302,7 @@
</property>
<property name="icon">
<iconset resource="../bitcoin.qrc">
- <normaloff>:/icons/edit</normaloff>:/icons/edit</iconset>
+ <normaloff>:/icons/eye</normaloff>:/icons/eye</iconset>
</property>
<property name="autoDefault">
<bool>false</bool>
diff --git a/src/qt/receivecoinsdialog.cpp b/src/qt/receivecoinsdialog.cpp
index ba386a97ae..3c80c01160 100644
--- a/src/qt/receivecoinsdialog.cpp
+++ b/src/qt/receivecoinsdialog.cpp
@@ -38,7 +38,7 @@ ReceiveCoinsDialog::ReceiveCoinsDialog(const PlatformStyle *_platformStyle, QWid
} else {
ui->clearButton->setIcon(_platformStyle->SingleColorIcon(":/icons/remove"));
ui->receiveButton->setIcon(_platformStyle->SingleColorIcon(":/icons/receiving_addresses"));
- ui->showRequestButton->setIcon(_platformStyle->SingleColorIcon(":/icons/edit"));
+ ui->showRequestButton->setIcon(_platformStyle->SingleColorIcon(":/icons/eye"));
ui->removeRequestButton->setIcon(_platformStyle->SingleColorIcon(":/icons/remove"));
}
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 689879ae97..073c28b5e8 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -1,5 +1,5 @@
// Copyright (c) 2010 Satoshi Nakamoto
-// Copyright (c) 2009-2021 The Bitcoin Core developers
+// Copyright (c) 2009-2022 The Bitcoin Core developers
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
@@ -19,10 +19,10 @@
#include <hash.h>
#include <index/blockfilterindex.h>
#include <index/coinstatsindex.h>
+#include <logging/timer.h>
#include <net.h>
#include <net_processing.h>
#include <node/blockstorage.h>
-#include <logging/timer.h>
#include <node/coinstats.h>
#include <node/context.h>
#include <node/utxo_snapshot.h>
@@ -185,6 +185,7 @@ UniValue blockToJSON(const CBlock& block, const CBlockIndex* tip, const CBlockIn
TxToUniv(*tx, uint256(), objTx, true, RPCSerializationFlags(), txundo, verbosity);
txs.push_back(objTx);
}
+ break;
}
result.pushKV("tx", txs);
@@ -967,7 +968,7 @@ static RPCHelpMan getblock()
"If verbosity is 3, returns an Object with information about block <hash> and information about each transaction, including prevout information for inputs (only for unpruned blocks in the current best chain).\n",
{
{"blockhash", RPCArg::Type::STR_HEX, RPCArg::Optional::NO, "The block hash"},
- {"verbosity|verbose", RPCArg::Type::NUM, RPCArg::Default{1}, "0 for hex-encoded data, 1 for a json object, and 2 for json object with transaction data"},
+ {"verbosity|verbose", RPCArg::Type::NUM, RPCArg::Default{1}, "0 for hex-encoded data, 1 for a JSON object, 2 for JSON object with transaction data, and 3 for JSON object with transaction data including prevout information for inputs"},
},
{
RPCResult{"for verbosity = 0",
@@ -1009,6 +1010,37 @@ static RPCHelpMan getblock()
}},
}},
}},
+ RPCResult{"for verbosity = 3",
+ RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ELISION, "", "Same output as verbosity = 2"},
+ {RPCResult::Type::ARR, "tx", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ARR, "vin", "",
+ {
+ {RPCResult::Type::OBJ, "", "",
+ {
+ {RPCResult::Type::ELISION, "", "The same output as verbosity = 2"},
+ {RPCResult::Type::OBJ, "prevout", "(Only if undo information is available)",
+ {
+ {RPCResult::Type::BOOL, "generated", "Coinbase or not"},
+ {RPCResult::Type::NUM, "height", "The height of the prevout"},
+ {RPCResult::Type::NUM, "value", "The value in " + CURRENCY_UNIT},
+ {RPCResult::Type::OBJ, "scriptPubKey", "",
+ {
+ {RPCResult::Type::STR, "asm", "The asm"},
+ {RPCResult::Type::STR, "hex", "The hex"},
+ {RPCResult::Type::STR, "address", /* optional */ true, "The Bitcoin address (only if a well-defined address exists)"},
+ {RPCResult::Type::STR, "type", "The type, eg 'pubkeyhash'"},
+ }},
+ }},
+ }},
+ }},
+ }},
+ }},
+ }},
},
RPCExamples{
HelpExampleCli("getblock", "\"00000000c937983704a73af28acdec37b049d214adbda81d7e2a3dd146f6ed09\"")
@@ -1242,9 +1274,10 @@ static RPCHelpMan gettxoutsetinfo()
ret.pushKV("hash_serialized_2", stats.hashSerialized.GetHex());
}
if (hash_type == CoinStatsHashType::MUHASH) {
- ret.pushKV("muhash", stats.hashSerialized.GetHex());
+ ret.pushKV("muhash", stats.hashSerialized.GetHex());
}
- ret.pushKV("total_amount", ValueFromAmount(stats.nTotalAmount));
+ CHECK_NONFATAL(stats.total_amount.has_value());
+ ret.pushKV("total_amount", ValueFromAmount(stats.total_amount.value()));
if (!stats.index_used) {
ret.pushKV("transactions", static_cast<int64_t>(stats.nTransactions));
ret.pushKV("disk_size", stats.nDiskSize);
@@ -1512,6 +1545,7 @@ RPCHelpMan getblockchaininfo()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
+ const ArgsManager& args{EnsureAnyArgsman(request.context)};
ChainstateManager& chainman = EnsureAnyChainman(request.context);
LOCK(cs_main);
CChainState& active_chainstate = chainman.ActiveChainstate();
@@ -1542,7 +1576,7 @@ RPCHelpMan getblockchaininfo()
obj.pushKV("pruneheight", block->nHeight);
// if 0, execution bypasses the whole if block.
- bool automatic_pruning = (gArgs.GetIntArg("-prune", 0) != 1);
+ bool automatic_pruning{args.GetIntArg("-prune", 0) != 1};
obj.pushKV("automatic_pruning", automatic_pruning);
if (automatic_pruning) {
obj.pushKV("prune_target_size", nPruneTarget);
@@ -1709,7 +1743,7 @@ static RPCHelpMan getmempoolinfo()
{RPCResult::Type::NUM, "size", "Current tx count"},
{RPCResult::Type::NUM, "bytes", "Sum of all virtual transaction sizes as defined in BIP 141. Differs from actual serialized size because witness data is discounted"},
{RPCResult::Type::NUM, "usage", "Total memory usage for the mempool"},
- {RPCResult::Type::STR_AMOUNT, "total_fee", "Total fees for the mempool in " + CURRENCY_UNIT + ", ignoring modified fees through prioritizetransaction"},
+ {RPCResult::Type::STR_AMOUNT, "total_fee", "Total fees for the mempool in " + CURRENCY_UNIT + ", ignoring modified fees through prioritisetransaction"},
{RPCResult::Type::NUM, "maxmempool", "Maximum memory usage for the mempool"},
{RPCResult::Type::STR_AMOUNT, "mempoolminfee", "Minimum fee rate in " + CURRENCY_UNIT + "/kvB for tx to be accepted. Is the maximum of minrelaytxfee and minimum mempool fee"},
{RPCResult::Type::STR_AMOUNT, "minrelaytxfee", "Current minimum relay fee for transactions"},
@@ -2238,10 +2272,9 @@ static RPCHelpMan savemempool()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
+ const ArgsManager& args{EnsureAnyArgsman(request.context)};
const CTxMemPool& mempool = EnsureAnyMemPool(request.context);
- const NodeContext& node = EnsureAnyNodeContext(request.context);
-
if (!mempool.IsLoaded()) {
throw JSONRPCError(RPC_MISC_ERROR, "The mempool was not loaded yet");
}
@@ -2251,7 +2284,7 @@ static RPCHelpMan savemempool()
}
UniValue ret(UniValue::VOBJ);
- ret.pushKV("filename", fs::path((node.args->GetDataDirNet() / "mempool.dat")).u8string());
+ ret.pushKV("filename", fs::path((args.GetDataDirNet() / "mempool.dat")).u8string());
return ret;
},
@@ -2576,13 +2609,9 @@ static RPCHelpMan dumptxoutset()
{
return RPCHelpMan{
"dumptxoutset",
- "\nWrite the serialized UTXO set to disk.\n",
+ "Write the serialized UTXO set to disk.",
{
- {"path",
- RPCArg::Type::STR,
- RPCArg::Optional::NO,
- /* default_val */ "",
- "path to the output file. If relative, will be prefixed by datadir."},
+ {"path", RPCArg::Type::STR, RPCArg::Optional::NO, "Path to the output file. If relative, will be prefixed by datadir."},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -2600,10 +2629,11 @@ static RPCHelpMan dumptxoutset()
},
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
- const fs::path path = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), fs::u8path(request.params[0].get_str()));
+ const ArgsManager& args{EnsureAnyArgsman(request.context)};
+ const fs::path path = fsbridge::AbsPathJoin(args.GetDataDirNet(), fs::u8path(request.params[0].get_str()));
// Write to a temporary path and then move into `path` on completion
// to avoid confusion due to an interruption.
- const fs::path temppath = fsbridge::AbsPathJoin(gArgs.GetDataDirNet(), fs::u8path(request.params[0].get_str() + ".incomplete"));
+ const fs::path temppath = fsbridge::AbsPathJoin(args.GetDataDirNet(), fs::u8path(request.params[0].get_str() + ".incomplete"));
if (fs::exists(path)) {
throw JSONRPCError(
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
index e487accb66..2176b5997e 100644
--- a/src/rpc/blockchain.h
+++ b/src/rpc/blockchain.h
@@ -21,7 +21,6 @@ class CBlock;
class CBlockIndex;
class CChainState;
class CTxMemPool;
-class ChainstateManager;
class UniValue;
struct NodeContext;
diff --git a/src/rpc/rawtransaction.cpp b/src/rpc/rawtransaction.cpp
index 3c9e0625e6..f4790b007c 100644
--- a/src/rpc/rawtransaction.cpp
+++ b/src/rpc/rawtransaction.cpp
@@ -1066,8 +1066,9 @@ static RPCHelpMan testmempoolaccept()
static RPCHelpMan decodepsbt()
{
- return RPCHelpMan{"decodepsbt",
- "\nReturn a JSON object representing the serialized, base64-encoded partially signed Bitcoin transaction.\n",
+ return RPCHelpMan{
+ "decodepsbt",
+ "Return a JSON object representing the serialized, base64-encoded partially signed Bitcoin transaction.",
{
{"psbt", RPCArg::Type::STR, RPCArg::Optional::NO, "The PSBT base64 string"},
},
@@ -1176,7 +1177,7 @@ static RPCHelpMan decodepsbt()
{
{RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
}},
- {RPCResult::Type::ARR, "proprietary", "The input proprietary map",
+ {RPCResult::Type::ARR, "proprietary", /*optional=*/true, "The input proprietary map",
{
{RPCResult::Type::OBJ, "", "",
{
@@ -1217,7 +1218,7 @@ static RPCHelpMan decodepsbt()
{
{RPCResult::Type::STR_HEX, "key", "(key-value pair) An unknown key-value pair"},
}},
- {RPCResult::Type::ARR, "proprietary", "The output proprietary map",
+ {RPCResult::Type::ARR, "proprietary", /*optional=*/true, "The output proprietary map",
{
{RPCResult::Type::OBJ, "", "",
{
diff --git a/src/rpc/server_util.cpp b/src/rpc/server_util.cpp
index 3fc35222e1..2978051574 100644
--- a/src/rpc/server_util.cpp
+++ b/src/rpc/server_util.cpp
@@ -37,6 +37,19 @@ CTxMemPool& EnsureAnyMemPool(const std::any& context)
return EnsureMemPool(EnsureAnyNodeContext(context));
}
+ArgsManager& EnsureArgsman(const NodeContext& node)
+{
+ if (!node.args) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Node args not found");
+ }
+ return *node.args;
+}
+
+ArgsManager& EnsureAnyArgsman(const std::any& context)
+{
+ return EnsureArgsman(EnsureAnyNodeContext(context));
+}
+
ChainstateManager& EnsureChainman(const NodeContext& node)
{
if (!node.chainman) {
diff --git a/src/rpc/server_util.h b/src/rpc/server_util.h
index ad3c149c90..3b0df2d651 100644
--- a/src/rpc/server_util.h
+++ b/src/rpc/server_util.h
@@ -7,16 +7,19 @@
#include <any>
+class ArgsManager;
class CBlockPolicyEstimator;
class CConnman;
-class ChainstateManager;
class CTxMemPool;
-struct NodeContext;
+class ChainstateManager;
class PeerManager;
+struct NodeContext;
NodeContext& EnsureAnyNodeContext(const std::any& context);
CTxMemPool& EnsureMemPool(const NodeContext& node);
CTxMemPool& EnsureAnyMemPool(const std::any& context);
+ArgsManager& EnsureArgsman(const NodeContext& node);
+ArgsManager& EnsureAnyArgsman(const std::any& context);
ChainstateManager& EnsureChainman(const NodeContext& node);
ChainstateManager& EnsureAnyChainman(const std::any& context);
CBlockPolicyEstimator& EnsureFeeEstimator(const NodeContext& node);
diff --git a/src/test/addrman_tests.cpp b/src/test/addrman_tests.cpp
index fd365e703f..752bd0af9e 100644
--- a/src/test/addrman_tests.cpp
+++ b/src/test/addrman_tests.cpp
@@ -22,59 +22,6 @@
using namespace std::literals;
-class AddrManTest : public AddrMan
-{
-public:
- explicit AddrManTest(std::vector<bool> asmap = std::vector<bool>())
- : AddrMan(asmap, /*deterministic=*/true, /*consistency_check_ratio=*/100)
- {}
-
- AddrInfo* Find(const CService& addr)
- {
- LOCK(m_impl->cs);
- return m_impl->Find(addr);
- }
-
- AddrInfo* Create(const CAddress& addr, const CNetAddr& addrSource, int* pnId)
- {
- LOCK(m_impl->cs);
- return m_impl->Create(addr, addrSource, pnId);
- }
-
- void Delete(int nId)
- {
- LOCK(m_impl->cs);
- m_impl->Delete(nId);
- }
-
- // Used to test deserialization
- std::pair<int, int> GetBucketAndEntry(const CAddress& addr)
- {
- LOCK(m_impl->cs);
- int nId = m_impl->mapAddr[addr];
- for (int bucket = 0; bucket < ADDRMAN_NEW_BUCKET_COUNT; ++bucket) {
- for (int entry = 0; entry < ADDRMAN_BUCKET_SIZE; ++entry) {
- if (nId == m_impl->vvNew[bucket][entry]) {
- return std::pair<int, int>(bucket, entry);
- }
- }
- }
- return std::pair<int, int>(-1, -1);
- }
-
- // Simulates connection failure so that we can test eviction of offline nodes
- void SimConnFail(const CService& addr)
- {
- int64_t nLastSuccess = 1;
- // Set last good connection in the deep past.
- Good(addr, nLastSuccess);
-
- bool count_failure = false;
- int64_t nLastTry = GetAdjustedTime() - 61;
- Attempt(addr, count_failure, nLastTry);
- }
-};
-
static CNetAddr ResolveIP(const std::string& ip)
{
CNetAddr addr;
@@ -102,12 +49,17 @@ static std::vector<bool> FromBytes(const unsigned char* source, int vector_size)
return result;
}
+/* Utility function to create a deterministic addrman, as used in most tests */
+static std::unique_ptr<AddrMan> TestAddrMan(std::vector<bool> asmap = std::vector<bool>())
+{
+ return std::make_unique<AddrMan>(asmap, /*deterministic=*/true, /*consistency_check_ratio=*/100);
+}
BOOST_FIXTURE_TEST_SUITE(addrman_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(addrman_simple)
{
- auto addrman = std::make_unique<AddrManTest>();
+ auto addrman = TestAddrMan();
CNetAddr source = ResolveIP("252.2.2.2");
@@ -141,7 +93,7 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
BOOST_CHECK(addrman->size() >= 1);
// Test: reset addrman and test AddrMan::Add multiple addresses works as expected
- addrman = std::make_unique<AddrManTest>();
+ addrman = TestAddrMan();
std::vector<CAddress> vAddr;
vAddr.push_back(CAddress(ResolveService("250.1.1.3", 8333), NODE_NONE));
vAddr.push_back(CAddress(ResolveService("250.1.1.4", 8333), NODE_NONE));
@@ -151,58 +103,58 @@ BOOST_AUTO_TEST_CASE(addrman_simple)
BOOST_AUTO_TEST_CASE(addrman_ports)
{
- AddrManTest addrman;
+ auto addrman = TestAddrMan();
CNetAddr source = ResolveIP("252.2.2.2");
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
+ BOOST_CHECK_EQUAL(addrman->size(), 0U);
// Test 7; Addr with same IP but diff port does not replace existing addr.
CService addr1 = ResolveService("250.1.1.1", 8333);
- BOOST_CHECK(addrman.Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman.size(), 1U);
+ BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
CService addr1_port = ResolveService("250.1.1.1", 8334);
- BOOST_CHECK(addrman.Add({CAddress(addr1_port, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman.size(), 2U);
- auto addr_ret2 = addrman.Select().first;
+ BOOST_CHECK(addrman->Add({CAddress(addr1_port, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman->size(), 2U);
+ auto addr_ret2 = addrman->Select().first;
BOOST_CHECK(addr_ret2.ToString() == "250.1.1.1:8333" || addr_ret2.ToString() == "250.1.1.1:8334");
// Test: Add same IP but diff port to tried table; this converts the entry with
// the specified port to tried, but not the other.
- addrman.Good(CAddress(addr1_port, NODE_NONE));
- BOOST_CHECK_EQUAL(addrman.size(), 2U);
+ addrman->Good(CAddress(addr1_port, NODE_NONE));
+ BOOST_CHECK_EQUAL(addrman->size(), 2U);
bool newOnly = true;
- auto addr_ret3 = addrman.Select(newOnly).first;
+ auto addr_ret3 = addrman->Select(newOnly).first;
BOOST_CHECK_EQUAL(addr_ret3.ToString(), "250.1.1.1:8333");
}
BOOST_AUTO_TEST_CASE(addrman_select)
{
- AddrManTest addrman;
+ auto addrman = TestAddrMan();
CNetAddr source = ResolveIP("252.2.2.2");
// Test: Select from new with 1 addr in new.
CService addr1 = ResolveService("250.1.1.1", 8333);
- BOOST_CHECK(addrman.Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman.size(), 1U);
+ BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
bool newOnly = true;
- auto addr_ret1 = addrman.Select(newOnly).first;
+ auto addr_ret1 = addrman->Select(newOnly).first;
BOOST_CHECK_EQUAL(addr_ret1.ToString(), "250.1.1.1:8333");
// Test: move addr to tried, select from new expected nothing returned.
- BOOST_CHECK(addrman.Good(CAddress(addr1, NODE_NONE)));
- BOOST_CHECK_EQUAL(addrman.size(), 1U);
- auto addr_ret2 = addrman.Select(newOnly).first;
+ BOOST_CHECK(addrman->Good(CAddress(addr1, NODE_NONE)));
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
+ auto addr_ret2 = addrman->Select(newOnly).first;
BOOST_CHECK_EQUAL(addr_ret2.ToString(), "[::]:0");
- auto addr_ret3 = addrman.Select().first;
+ auto addr_ret3 = addrman->Select().first;
BOOST_CHECK_EQUAL(addr_ret3.ToString(), "250.1.1.1:8333");
- BOOST_CHECK_EQUAL(addrman.size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
// Add three addresses to new table.
@@ -210,65 +162,97 @@ BOOST_AUTO_TEST_CASE(addrman_select)
CService addr3 = ResolveService("250.3.2.2", 9999);
CService addr4 = ResolveService("250.3.3.3", 9999);
- BOOST_CHECK(addrman.Add({CAddress(addr2, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
- BOOST_CHECK(addrman.Add({CAddress(addr3, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
- BOOST_CHECK(addrman.Add({CAddress(addr4, NODE_NONE)}, ResolveService("250.4.1.1", 8333)));
+ BOOST_CHECK(addrman->Add({CAddress(addr2, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
+ BOOST_CHECK(addrman->Add({CAddress(addr3, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
+ BOOST_CHECK(addrman->Add({CAddress(addr4, NODE_NONE)}, ResolveService("250.4.1.1", 8333)));
// Add three addresses to tried table.
CService addr5 = ResolveService("250.4.4.4", 8333);
CService addr6 = ResolveService("250.4.5.5", 7777);
CService addr7 = ResolveService("250.4.6.6", 8333);
- BOOST_CHECK(addrman.Add({CAddress(addr5, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
- BOOST_CHECK(addrman.Good(CAddress(addr5, NODE_NONE)));
- BOOST_CHECK(addrman.Add({CAddress(addr6, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
- BOOST_CHECK(addrman.Good(CAddress(addr6, NODE_NONE)));
- BOOST_CHECK(addrman.Add({CAddress(addr7, NODE_NONE)}, ResolveService("250.1.1.3", 8333)));
- BOOST_CHECK(addrman.Good(CAddress(addr7, NODE_NONE)));
+ BOOST_CHECK(addrman->Add({CAddress(addr5, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
+ BOOST_CHECK(addrman->Good(CAddress(addr5, NODE_NONE)));
+ BOOST_CHECK(addrman->Add({CAddress(addr6, NODE_NONE)}, ResolveService("250.3.1.1", 8333)));
+ BOOST_CHECK(addrman->Good(CAddress(addr6, NODE_NONE)));
+ BOOST_CHECK(addrman->Add({CAddress(addr7, NODE_NONE)}, ResolveService("250.1.1.3", 8333)));
+ BOOST_CHECK(addrman->Good(CAddress(addr7, NODE_NONE)));
// Test: 6 addrs + 1 addr from last test = 7.
- BOOST_CHECK_EQUAL(addrman.size(), 7U);
+ BOOST_CHECK_EQUAL(addrman->size(), 7U);
// Test: Select pulls from new and tried regardless of port number.
std::set<uint16_t> ports;
for (int i = 0; i < 20; ++i) {
- ports.insert(addrman.Select().first.GetPort());
+ ports.insert(addrman->Select().first.GetPort());
}
BOOST_CHECK_EQUAL(ports.size(), 3U);
}
BOOST_AUTO_TEST_CASE(addrman_new_collisions)
{
- AddrManTest addrman;
+ auto addrman = TestAddrMan();
CNetAddr source = ResolveIP("252.2.2.2");
uint32_t num_addrs{0};
- BOOST_CHECK_EQUAL(addrman.size(), num_addrs);
+ BOOST_CHECK_EQUAL(addrman->size(), num_addrs);
while (num_addrs < 22) { // Magic number! 250.1.1.1 - 250.1.1.22 do not collide with deterministic key = 1
CService addr = ResolveService("250.1.1." + ToString(++num_addrs));
- BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
+ BOOST_CHECK(addrman->Add({CAddress(addr, NODE_NONE)}, source));
// Test: No collision in new table yet.
- BOOST_CHECK_EQUAL(addrman.size(), num_addrs);
+ BOOST_CHECK_EQUAL(addrman->size(), num_addrs);
}
// Test: new table collision!
CService addr1 = ResolveService("250.1.1." + ToString(++num_addrs));
uint32_t collisions{1};
- BOOST_CHECK(addrman.Add({CAddress(addr1, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman.size(), num_addrs - collisions);
+ BOOST_CHECK(addrman->Add({CAddress(addr1, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman->size(), num_addrs - collisions);
CService addr2 = ResolveService("250.1.1." + ToString(++num_addrs));
- BOOST_CHECK(addrman.Add({CAddress(addr2, NODE_NONE)}, source));
- BOOST_CHECK_EQUAL(addrman.size(), num_addrs - collisions);
+ BOOST_CHECK(addrman->Add({CAddress(addr2, NODE_NONE)}, source));
+ BOOST_CHECK_EQUAL(addrman->size(), num_addrs - collisions);
+}
+
+BOOST_AUTO_TEST_CASE(addrman_new_multiplicity)
+{
+ auto addrman = TestAddrMan();
+ CAddress addr{CAddress(ResolveService("253.3.3.3", 8333), NODE_NONE)};
+ int64_t start_time{GetAdjustedTime()};
+ addr.nTime = start_time;
+
+ // test that multiplicity stays at 1 if nTime doesn't increase
+ for (unsigned int i = 1; i < 20; ++i) {
+ std::string addr_ip{ToString(i % 256) + "." + ToString(i >> 8 % 256) + ".1.1"};
+ CNetAddr source{ResolveIP(addr_ip)};
+ addrman->Add({addr}, source);
+ }
+ AddressPosition addr_pos = addrman->FindAddressEntry(addr).value();
+ BOOST_CHECK_EQUAL(addr_pos.multiplicity, 1U);
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
+
+ // if nTime increases, an addr can occur in up to 8 buckets
+ // The acceptance probability decreases exponentially with existing multiplicity -
+ // choose number of iterations such that it gets to 8 with deterministic addrman.
+ for (unsigned int i = 1; i < 400; ++i) {
+ std::string addr_ip{ToString(i % 256) + "." + ToString(i >> 8 % 256) + ".1.1"};
+ CNetAddr source{ResolveIP(addr_ip)};
+ addr.nTime = start_time + i;
+ addrman->Add({addr}, source);
+ }
+ AddressPosition addr_pos_multi = addrman->FindAddressEntry(addr).value();
+ BOOST_CHECK_EQUAL(addr_pos_multi.multiplicity, 8U);
+ // multiplicity doesn't affect size
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
}
BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
{
- auto addrman = std::make_unique<AddrMan>(std::vector<bool>(), /*deterministic=*/true, /*consistency_check_ratio=*/100);
+ auto addrman = TestAddrMan();
CNetAddr source = ResolveIP("252.2.2.2");
@@ -296,87 +280,15 @@ BOOST_AUTO_TEST_CASE(addrman_tried_collisions)
BOOST_CHECK(addrman->Good(CAddress(addr2, NODE_NONE)));
}
-BOOST_AUTO_TEST_CASE(addrman_find)
-{
- AddrManTest addrman;
-
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
-
- CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
- CAddress addr2 = CAddress(ResolveService("250.1.2.1", 9999), NODE_NONE);
- CAddress addr3 = CAddress(ResolveService("251.255.2.1", 8333), NODE_NONE);
-
- CNetAddr source1 = ResolveIP("250.1.2.1");
- CNetAddr source2 = ResolveIP("250.1.2.2");
-
- BOOST_CHECK(addrman.Add({addr1}, source1));
- BOOST_CHECK(addrman.Add({addr2}, source2));
- BOOST_CHECK(addrman.Add({addr3}, source1));
-
- // Test: ensure Find returns an IP/port matching what we searched on.
- AddrInfo* info1 = addrman.Find(addr1);
- BOOST_REQUIRE(info1);
- BOOST_CHECK_EQUAL(info1->ToString(), "250.1.2.1:8333");
-
- // Test; Find discriminates by port number.
- AddrInfo* info2 = addrman.Find(addr2);
- BOOST_REQUIRE(info2);
- BOOST_CHECK_EQUAL(info2->ToString(), "250.1.2.1:9999");
-
- // Test: Find returns another IP matching what we searched on.
- AddrInfo* info3 = addrman.Find(addr3);
- BOOST_REQUIRE(info3);
- BOOST_CHECK_EQUAL(info3->ToString(), "251.255.2.1:8333");
-}
-
-BOOST_AUTO_TEST_CASE(addrman_create)
-{
- AddrManTest addrman;
-
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
-
- CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
- CNetAddr source1 = ResolveIP("250.1.2.1");
-
- int nId;
- AddrInfo* pinfo = addrman.Create(addr1, source1, &nId);
-
- // Test: The result should be the same as the input addr.
- BOOST_CHECK_EQUAL(pinfo->ToString(), "250.1.2.1:8333");
-
- AddrInfo* info2 = addrman.Find(addr1);
- BOOST_CHECK_EQUAL(info2->ToString(), "250.1.2.1:8333");
-}
-
-
-BOOST_AUTO_TEST_CASE(addrman_delete)
-{
- AddrManTest addrman;
-
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
-
- CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
- CNetAddr source1 = ResolveIP("250.1.2.1");
-
- int nId;
- addrman.Create(addr1, source1, &nId);
-
- // Test: Delete should actually delete the addr.
- BOOST_CHECK_EQUAL(addrman.size(), 1U);
- addrman.Delete(nId);
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
- AddrInfo* info2 = addrman.Find(addr1);
- BOOST_CHECK(info2 == nullptr);
-}
BOOST_AUTO_TEST_CASE(addrman_getaddr)
{
- AddrManTest addrman;
+ auto addrman = TestAddrMan();
// Test: Sanity check, GetAddr should never return anything if addrman
// is empty.
- BOOST_CHECK_EQUAL(addrman.size(), 0U);
- std::vector<CAddress> vAddr1 = addrman.GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt);
+ BOOST_CHECK_EQUAL(addrman->size(), 0U);
+ std::vector<CAddress> vAddr1 = addrman->GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt);
BOOST_CHECK_EQUAL(vAddr1.size(), 0U);
CAddress addr1 = CAddress(ResolveService("250.250.2.1", 8333), NODE_NONE);
@@ -393,18 +305,18 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
CNetAddr source2 = ResolveIP("250.2.3.3");
// Test: Ensure GetAddr works with new addresses.
- BOOST_CHECK(addrman.Add({addr1, addr3, addr5}, source1));
- BOOST_CHECK(addrman.Add({addr2, addr4}, source2));
+ BOOST_CHECK(addrman->Add({addr1, addr3, addr5}, source1));
+ BOOST_CHECK(addrman->Add({addr2, addr4}, source2));
- BOOST_CHECK_EQUAL(addrman.GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt).size(), 5U);
+ BOOST_CHECK_EQUAL(addrman->GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt).size(), 5U);
// Net processing asks for 23% of addresses. 23% of 5 is 1 rounded down.
- BOOST_CHECK_EQUAL(addrman.GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt).size(), 1U);
+ BOOST_CHECK_EQUAL(addrman->GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt).size(), 1U);
// Test: Ensure GetAddr works with new and tried addresses.
- addrman.Good(CAddress(addr1, NODE_NONE));
- addrman.Good(CAddress(addr2, NODE_NONE));
- BOOST_CHECK_EQUAL(addrman.GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt).size(), 5U);
- BOOST_CHECK_EQUAL(addrman.GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt).size(), 1U);
+ BOOST_CHECK(addrman->Good(CAddress(addr1, NODE_NONE)));
+ BOOST_CHECK(addrman->Good(CAddress(addr2, NODE_NONE)));
+ BOOST_CHECK_EQUAL(addrman->GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt).size(), 5U);
+ BOOST_CHECK_EQUAL(addrman->GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt).size(), 1U);
// Test: Ensure GetAddr still returns 23% when addrman has many addrs.
for (unsigned int i = 1; i < (8 * 256); i++) {
@@ -415,24 +327,22 @@ BOOST_AUTO_TEST_CASE(addrman_getaddr)
// Ensure that for all addrs in addrman, isTerrible == false.
addr.nTime = GetAdjustedTime();
- addrman.Add({addr}, ResolveIP(strAddr));
+ addrman->Add({addr}, ResolveIP(strAddr));
if (i % 8 == 0)
- addrman.Good(addr);
+ addrman->Good(addr);
}
- std::vector<CAddress> vAddr = addrman.GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt);
+ std::vector<CAddress> vAddr = addrman->GetAddr(/*max_addresses=*/2500, /*max_pct=*/23, /*network=*/std::nullopt);
- size_t percent23 = (addrman.size() * 23) / 100;
+ size_t percent23 = (addrman->size() * 23) / 100;
BOOST_CHECK_EQUAL(vAddr.size(), percent23);
BOOST_CHECK_EQUAL(vAddr.size(), 461U);
// (Addrman.size() < number of addresses added) due to address collisions.
- BOOST_CHECK_EQUAL(addrman.size(), 2006U);
+ BOOST_CHECK_EQUAL(addrman->size(), 2006U);
}
BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
{
- AddrManTest addrman;
-
CAddress addr1 = CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.1.1.1", 9999), NODE_NONE);
@@ -486,8 +396,6 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket_legacy)
BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
{
- AddrManTest addrman;
-
CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.1.2.1", 9999), NODE_NONE);
@@ -564,8 +472,6 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket_legacy)
// 101.8.0.0/16 AS8
BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
{
- AddrManTest addrman;
-
CAddress addr1 = CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.1.1.1", 9999), NODE_NONE);
@@ -619,8 +525,6 @@ BOOST_AUTO_TEST_CASE(caddrinfo_get_tried_bucket)
BOOST_AUTO_TEST_CASE(caddrinfo_get_new_bucket)
{
- AddrManTest addrman;
-
CAddress addr1 = CAddress(ResolveService("250.1.2.1", 8333), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.1.2.1", 9999), NODE_NONE);
@@ -700,72 +604,69 @@ BOOST_AUTO_TEST_CASE(addrman_serialization)
{
std::vector<bool> asmap1 = FromBytes(asmap_raw, sizeof(asmap_raw) * 8);
- auto addrman_asmap1 = std::make_unique<AddrManTest>(asmap1);
- auto addrman_asmap1_dup = std::make_unique<AddrManTest>(asmap1);
- auto addrman_noasmap = std::make_unique<AddrManTest>();
+ auto addrman_asmap1 = TestAddrMan(asmap1);
+ auto addrman_asmap1_dup = TestAddrMan(asmap1);
+ auto addrman_noasmap = TestAddrMan();
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
CAddress addr = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
CNetAddr default_source;
-
addrman_asmap1->Add({addr}, default_source);
stream << *addrman_asmap1;
// serizalizing/deserializing addrman with the same asmap
stream >> *addrman_asmap1_dup;
- std::pair<int, int> bucketAndEntry_asmap1 = addrman_asmap1->GetBucketAndEntry(addr);
- std::pair<int, int> bucketAndEntry_asmap1_dup = addrman_asmap1_dup->GetBucketAndEntry(addr);
- BOOST_CHECK(bucketAndEntry_asmap1.second != -1);
- BOOST_CHECK(bucketAndEntry_asmap1_dup.second != -1);
+ AddressPosition addr_pos1 = addrman_asmap1->FindAddressEntry(addr).value();
+ AddressPosition addr_pos2 = addrman_asmap1_dup->FindAddressEntry(addr).value();
+ BOOST_CHECK(addr_pos1.multiplicity != 0);
+ BOOST_CHECK(addr_pos2.multiplicity != 0);
- BOOST_CHECK(bucketAndEntry_asmap1.first == bucketAndEntry_asmap1_dup.first);
- BOOST_CHECK(bucketAndEntry_asmap1.second == bucketAndEntry_asmap1_dup.second);
+ BOOST_CHECK(addr_pos1 == addr_pos2);
// deserializing asmaped peers.dat to non-asmaped addrman
stream << *addrman_asmap1;
stream >> *addrman_noasmap;
- std::pair<int, int> bucketAndEntry_noasmap = addrman_noasmap->GetBucketAndEntry(addr);
- BOOST_CHECK(bucketAndEntry_noasmap.second != -1);
- BOOST_CHECK(bucketAndEntry_asmap1.first != bucketAndEntry_noasmap.first);
- BOOST_CHECK(bucketAndEntry_asmap1.second != bucketAndEntry_noasmap.second);
+ AddressPosition addr_pos3 = addrman_noasmap->FindAddressEntry(addr).value();
+ BOOST_CHECK(addr_pos3.multiplicity != 0);
+ BOOST_CHECK(addr_pos1.bucket != addr_pos3.bucket);
+ BOOST_CHECK(addr_pos1.position != addr_pos3.position);
// deserializing non-asmaped peers.dat to asmaped addrman
- addrman_asmap1 = std::make_unique<AddrManTest>(asmap1);
- addrman_noasmap = std::make_unique<AddrManTest>();
+ addrman_asmap1 = TestAddrMan(asmap1);
+ addrman_noasmap = TestAddrMan();
addrman_noasmap->Add({addr}, default_source);
stream << *addrman_noasmap;
stream >> *addrman_asmap1;
- std::pair<int, int> bucketAndEntry_asmap1_deser = addrman_asmap1->GetBucketAndEntry(addr);
- BOOST_CHECK(bucketAndEntry_asmap1_deser.second != -1);
- BOOST_CHECK(bucketAndEntry_asmap1_deser.first != bucketAndEntry_noasmap.first);
- BOOST_CHECK(bucketAndEntry_asmap1_deser.first == bucketAndEntry_asmap1_dup.first);
- BOOST_CHECK(bucketAndEntry_asmap1_deser.second == bucketAndEntry_asmap1_dup.second);
+
+ AddressPosition addr_pos4 = addrman_asmap1->FindAddressEntry(addr).value();
+ BOOST_CHECK(addr_pos4.multiplicity != 0);
+ BOOST_CHECK(addr_pos4.bucket != addr_pos3.bucket);
+ BOOST_CHECK(addr_pos4 == addr_pos2);
// used to map to different buckets, now maps to the same bucket.
- addrman_asmap1 = std::make_unique<AddrManTest>(asmap1);
- addrman_noasmap = std::make_unique<AddrManTest>();
+ addrman_asmap1 = TestAddrMan(asmap1);
+ addrman_noasmap = TestAddrMan();
CAddress addr1 = CAddress(ResolveService("250.1.1.1"), NODE_NONE);
CAddress addr2 = CAddress(ResolveService("250.2.1.1"), NODE_NONE);
addrman_noasmap->Add({addr, addr2}, default_source);
- std::pair<int, int> bucketAndEntry_noasmap_addr1 = addrman_noasmap->GetBucketAndEntry(addr1);
- std::pair<int, int> bucketAndEntry_noasmap_addr2 = addrman_noasmap->GetBucketAndEntry(addr2);
- BOOST_CHECK(bucketAndEntry_noasmap_addr1.first != bucketAndEntry_noasmap_addr2.first);
- BOOST_CHECK(bucketAndEntry_noasmap_addr1.second != bucketAndEntry_noasmap_addr2.second);
+ AddressPosition addr_pos5 = addrman_noasmap->FindAddressEntry(addr1).value();
+ AddressPosition addr_pos6 = addrman_noasmap->FindAddressEntry(addr2).value();
+ BOOST_CHECK(addr_pos5.bucket != addr_pos6.bucket);
stream << *addrman_noasmap;
stream >> *addrman_asmap1;
- std::pair<int, int> bucketAndEntry_asmap1_deser_addr1 = addrman_asmap1->GetBucketAndEntry(addr1);
- std::pair<int, int> bucketAndEntry_asmap1_deser_addr2 = addrman_asmap1->GetBucketAndEntry(addr2);
- BOOST_CHECK(bucketAndEntry_asmap1_deser_addr1.first == bucketAndEntry_asmap1_deser_addr2.first);
- BOOST_CHECK(bucketAndEntry_asmap1_deser_addr1.second != bucketAndEntry_asmap1_deser_addr2.second);
+ AddressPosition addr_pos7 = addrman_asmap1->FindAddressEntry(addr1).value();
+ AddressPosition addr_pos8 = addrman_asmap1->FindAddressEntry(addr2).value();
+ BOOST_CHECK(addr_pos7.bucket == addr_pos8.bucket);
+ BOOST_CHECK(addr_pos7.position != addr_pos8.position);
}
BOOST_AUTO_TEST_CASE(remove_invalid)
{
// Confirm that invalid addresses are ignored in unserialization.
- auto addrman = std::make_unique<AddrManTest>();
+ auto addrman = TestAddrMan();
CDataStream stream(SER_NETWORK, PROTOCOL_VERSION);
const CAddress new1{ResolveService("5.5.5.5"), NODE_NONE};
@@ -797,29 +698,29 @@ BOOST_AUTO_TEST_CASE(remove_invalid)
BOOST_REQUIRE(pos + sizeof(tried2_raw_replacement) <= stream.size());
memcpy(stream.data() + pos, tried2_raw_replacement, sizeof(tried2_raw_replacement));
- addrman = std::make_unique<AddrManTest>();
+ addrman = TestAddrMan();
stream >> *addrman;
BOOST_CHECK_EQUAL(addrman->size(), 2);
}
BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
{
- AddrManTest addrman;
+ auto addrman = TestAddrMan();
- BOOST_CHECK(addrman.size() == 0);
+ BOOST_CHECK(addrman->size() == 0);
// Empty addrman should return blank addrman info.
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
// Add twenty two addresses.
CNetAddr source = ResolveIP("252.2.2.2");
for (unsigned int i = 1; i < 23; i++) {
CService addr = ResolveService("250.1.1." + ToString(i));
- BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
+ BOOST_CHECK(addrman->Add({CAddress(addr, NODE_NONE)}, source));
// No collisions in tried.
- BOOST_CHECK(addrman.Good(addr));
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->Good(addr));
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
}
// Ensure Good handles duplicates well.
@@ -828,114 +729,125 @@ BOOST_AUTO_TEST_CASE(addrman_selecttriedcollision)
CService addr = ResolveService("250.1.1." + ToString(i));
// Unable to add duplicate address to tried table.
- BOOST_CHECK(!addrman.Good(addr));
+ BOOST_CHECK(!addrman->Good(addr));
// Verify duplicate address not marked as a collision.
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
}
}
BOOST_AUTO_TEST_CASE(addrman_noevict)
{
- AddrManTest addrman;
+ auto addrman = TestAddrMan();
// Add 35 addresses.
CNetAddr source = ResolveIP("252.2.2.2");
for (unsigned int i = 1; i < 36; i++) {
CService addr = ResolveService("250.1.1." + ToString(i));
- BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
+ BOOST_CHECK(addrman->Add({CAddress(addr, NODE_NONE)}, source));
// No collision yet.
- BOOST_CHECK(addrman.Good(addr));
+ BOOST_CHECK(addrman->Good(addr));
}
// Collision in tried table between 36 and 19.
CService addr36 = ResolveService("250.1.1.36");
- BOOST_CHECK(addrman.Add({CAddress(addr36, NODE_NONE)}, source));
- BOOST_CHECK(!addrman.Good(addr36));
- BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().first.ToString(), "250.1.1.19:0");
+ BOOST_CHECK(addrman->Add({CAddress(addr36, NODE_NONE)}, source));
+ BOOST_CHECK(!addrman->Good(addr36));
+ BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToString(), "250.1.1.19:0");
// 36 should be discarded and 19 not evicted.
// This means we keep 19 in the tried table and
// 36 stays in the new table.
- addrman.ResolveCollisions();
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ addrman->ResolveCollisions();
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
// Lets create two collisions.
for (unsigned int i = 37; i < 59; i++) {
CService addr = ResolveService("250.1.1." + ToString(i));
- BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
- BOOST_CHECK(addrman.Good(addr));
+ BOOST_CHECK(addrman->Add({CAddress(addr, NODE_NONE)}, source));
+ BOOST_CHECK(addrman->Good(addr));
}
// Cause a collision in the tried table.
CService addr59 = ResolveService("250.1.1.59");
- BOOST_CHECK(addrman.Add({CAddress(addr59, NODE_NONE)}, source));
- BOOST_CHECK(!addrman.Good(addr59));
+ BOOST_CHECK(addrman->Add({CAddress(addr59, NODE_NONE)}, source));
+ BOOST_CHECK(!addrman->Good(addr59));
- BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().first.ToString(), "250.1.1.10:0");
+ BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToString(), "250.1.1.10:0");
// Cause a second collision in the new table.
- BOOST_CHECK(!addrman.Add({CAddress(addr36, NODE_NONE)}, source));
+ BOOST_CHECK(!addrman->Add({CAddress(addr36, NODE_NONE)}, source));
// 36 still cannot be moved from new to tried due to colliding with 19
- BOOST_CHECK(!addrman.Good(addr36));
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() != "[::]:0");
+ BOOST_CHECK(!addrman->Good(addr36));
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() != "[::]:0");
// Resolve all collisions.
- addrman.ResolveCollisions();
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ addrman->ResolveCollisions();
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
}
BOOST_AUTO_TEST_CASE(addrman_evictionworks)
{
- AddrManTest addrman;
+ auto addrman = TestAddrMan();
- BOOST_CHECK(addrman.size() == 0);
+ BOOST_CHECK(addrman->size() == 0);
// Empty addrman should return blank addrman info.
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
// Add 35 addresses
CNetAddr source = ResolveIP("252.2.2.2");
for (unsigned int i = 1; i < 36; i++) {
CService addr = ResolveService("250.1.1." + ToString(i));
- BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
+ BOOST_CHECK(addrman->Add({CAddress(addr, NODE_NONE)}, source));
// No collision yet.
- BOOST_CHECK(addrman.Good(addr));
+ BOOST_CHECK(addrman->Good(addr));
}
// Collision between 36 and 19.
CService addr = ResolveService("250.1.1.36");
- BOOST_CHECK(addrman.Add({CAddress(addr, NODE_NONE)}, source));
- BOOST_CHECK(!addrman.Good(addr));
+ BOOST_CHECK(addrman->Add({CAddress(addr, NODE_NONE)}, source));
+ BOOST_CHECK(!addrman->Good(addr));
- auto info = addrman.SelectTriedCollision().first;
+ auto info = addrman->SelectTriedCollision().first;
BOOST_CHECK_EQUAL(info.ToString(), "250.1.1.19:0");
// Ensure test of address fails, so that it is evicted.
- addrman.SimConnFail(info);
+ // Update entry in tried by setting last good connection in the deep past.
+ BOOST_CHECK(!addrman->Good(info, /*nTime=*/1));
+ addrman->Attempt(info, /*fCountFailure=*/false, /*nTime=*/GetAdjustedTime() - 61);
// Should swap 36 for 19.
- addrman.ResolveCollisions();
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ addrman->ResolveCollisions();
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ AddressPosition addr_pos{addrman->FindAddressEntry(CAddress(addr, NODE_NONE)).value()};
+ BOOST_CHECK(addr_pos.tried);
// If 36 was swapped for 19, then adding 36 to tried should fail because we
// are attempting to add a duplicate.
// We check this by verifying Good() returns false and also verifying that
// we have no collisions.
- BOOST_CHECK(!addrman.Good(addr));
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(!addrman->Good(addr));
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
// 19 should fail as a collision (not a duplicate) if we now attempt to move
// it to the tried table.
CService addr19 = ResolveService("250.1.1.19");
- BOOST_CHECK(!addrman.Good(addr19));
- BOOST_CHECK_EQUAL(addrman.SelectTriedCollision().first.ToString(), "250.1.1.36:0");
-
- addrman.ResolveCollisions();
- BOOST_CHECK(addrman.SelectTriedCollision().first.ToString() == "[::]:0");
+ BOOST_CHECK(!addrman->Good(addr19));
+ BOOST_CHECK_EQUAL(addrman->SelectTriedCollision().first.ToString(), "250.1.1.36:0");
+
+ // Eviction is also successful if too much time has passed since last try
+ SetMockTime(GetTime() + 4 * 60 *60);
+ addrman->ResolveCollisions();
+ BOOST_CHECK(addrman->SelectTriedCollision().first.ToString() == "[::]:0");
+ //Now 19 is in tried again, and 36 back to new
+ AddressPosition addr_pos19{addrman->FindAddressEntry(CAddress(addr19, NODE_NONE)).value()};
+ BOOST_CHECK(addr_pos19.tried);
+ AddressPosition addr_pos36{addrman->FindAddressEntry(CAddress(addr, NODE_NONE)).value()};
+ BOOST_CHECK(!addr_pos36.tried);
}
static CDataStream AddrmanToStream(const AddrMan& addrman)
@@ -1044,5 +956,35 @@ BOOST_AUTO_TEST_CASE(load_addrman_corrupted)
BOOST_CHECK_THROW(ReadFromStream(addrman2, ssPeers2), std::ios_base::failure);
}
+BOOST_AUTO_TEST_CASE(addrman_update_address)
+{
+ // Tests updating nTime via Connected() and nServices via SetServices()
+ auto addrman = TestAddrMan();
+ CNetAddr source{ResolveIP("252.2.2.2")};
+ CAddress addr{CAddress(ResolveService("250.1.1.1", 8333), NODE_NONE)};
+
+ int64_t start_time{GetAdjustedTime() - 10000};
+ addr.nTime = start_time;
+ BOOST_CHECK(addrman->Add({addr}, source));
+ BOOST_CHECK_EQUAL(addrman->size(), 1U);
+
+ // Updating an addrman entry with a different port doesn't change it
+ CAddress addr_diff_port{CAddress(ResolveService("250.1.1.1", 8334), NODE_NONE)};
+ addr_diff_port.nTime = start_time;
+ addrman->Connected(addr_diff_port);
+ addrman->SetServices(addr_diff_port, NODE_NETWORK_LIMITED);
+ std::vector<CAddress> vAddr1{addrman->GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt)};
+ BOOST_CHECK_EQUAL(vAddr1.size(), 1U);
+ BOOST_CHECK_EQUAL(vAddr1.at(0).nTime, start_time);
+ BOOST_CHECK_EQUAL(vAddr1.at(0).nServices, NODE_NONE);
+
+ // Updating an addrman entry with the correct port is successful
+ addrman->Connected(addr);
+ addrman->SetServices(addr, NODE_NETWORK_LIMITED);
+ std::vector<CAddress> vAddr2 = addrman->GetAddr(/*max_addresses=*/0, /*max_pct=*/0, /*network=*/std::nullopt);
+ BOOST_CHECK_EQUAL(vAddr2.size(), 1U);
+ BOOST_CHECK(vAddr2.at(0).nTime >= start_time + 10000);
+ BOOST_CHECK_EQUAL(vAddr2.at(0).nServices, NODE_NETWORK_LIMITED);
+}
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/fuzz/addition_overflow.cpp b/src/test/fuzz/addition_overflow.cpp
index c6cfbd8d30..cfad41659e 100644
--- a/src/test/fuzz/addition_overflow.cpp
+++ b/src/test/fuzz/addition_overflow.cpp
@@ -5,6 +5,7 @@
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <util/overflow.h>
#include <cstdint>
#include <string>
diff --git a/src/test/fuzz/crypto_chacha20_poly1305_aead.cpp b/src/test/fuzz/crypto_chacha20_poly1305_aead.cpp
index 5e60b0f25b..596614a71b 100644
--- a/src/test/fuzz/crypto_chacha20_poly1305_aead.cpp
+++ b/src/test/fuzz/crypto_chacha20_poly1305_aead.cpp
@@ -7,6 +7,7 @@
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <util/overflow.h>
#include <cassert>
#include <cstdint>
diff --git a/src/test/fuzz/golomb_rice.cpp b/src/test/fuzz/golomb_rice.cpp
index d6879067b8..2d0b29953c 100644
--- a/src/test/fuzz/golomb_rice.cpp
+++ b/src/test/fuzz/golomb_rice.cpp
@@ -19,20 +19,6 @@
#include <vector>
namespace {
-uint64_t MapIntoRange(const uint64_t x, const uint64_t n)
-{
- const uint64_t x_hi = x >> 32;
- const uint64_t x_lo = x & 0xFFFFFFFF;
- const uint64_t n_hi = n >> 32;
- const uint64_t n_lo = n & 0xFFFFFFFF;
- const uint64_t ac = x_hi * n_hi;
- const uint64_t ad = x_hi * n_lo;
- const uint64_t bc = x_lo * n_hi;
- const uint64_t bd = x_lo * n_lo;
- const uint64_t mid34 = (bd >> 32) + (bc & 0xFFFFFFFF) + (ad & 0xFFFFFFFF);
- const uint64_t upper64 = ac + (bc >> 32) + (ad >> 32) + (mid34 >> 32);
- return upper64;
-}
uint64_t HashToRange(const std::vector<uint8_t>& element, const uint64_t f)
{
diff --git a/src/test/fuzz/integer.cpp b/src/test/fuzz/integer.cpp
index 33b0d7325b..3087f11771 100644
--- a/src/test/fuzz/integer.cpp
+++ b/src/test/fuzz/integer.cpp
@@ -26,6 +26,7 @@
#include <univalue.h>
#include <util/check.h>
#include <util/moneystr.h>
+#include <util/overflow.h>
#include <util/strencodings.h>
#include <util/string.h>
#include <util/system.h>
diff --git a/src/test/fuzz/pow.cpp b/src/test/fuzz/pow.cpp
index 1123c8c170..0004d82d66 100644
--- a/src/test/fuzz/pow.cpp
+++ b/src/test/fuzz/pow.cpp
@@ -9,6 +9,7 @@
#include <test/fuzz/FuzzedDataProvider.h>
#include <test/fuzz/fuzz.h>
#include <test/fuzz/util.h>
+#include <util/overflow.h>
#include <cstdint>
#include <optional>
diff --git a/src/test/fuzz/util.cpp b/src/test/fuzz/util.cpp
index 843b29b911..f89b597eed 100644
--- a/src/test/fuzz/util.cpp
+++ b/src/test/fuzz/util.cpp
@@ -8,10 +8,13 @@
#include <pubkey.h>
#include <test/fuzz/util.h>
#include <test/util/script.h>
+#include <util/overflow.h>
#include <util/rbf.h>
#include <util/time.h>
#include <version.h>
+#include <memory>
+
FuzzedSock::FuzzedSock(FuzzedDataProvider& fuzzed_data_provider)
: m_fuzzed_data_provider{fuzzed_data_provider}
{
@@ -157,6 +160,20 @@ int FuzzedSock::Connect(const sockaddr*, socklen_t) const
return 0;
}
+std::unique_ptr<Sock> FuzzedSock::Accept(sockaddr* addr, socklen_t* addr_len) const
+{
+ constexpr std::array accept_errnos{
+ ECONNABORTED,
+ EINTR,
+ ENOMEM,
+ };
+ if (m_fuzzed_data_provider.ConsumeBool()) {
+ SetFuzzedErrNo(m_fuzzed_data_provider, accept_errnos);
+ return std::unique_ptr<FuzzedSock>();
+ }
+ return std::make_unique<FuzzedSock>(m_fuzzed_data_provider);
+}
+
int FuzzedSock::GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const
{
constexpr std::array getsockopt_errnos{
diff --git a/src/test/fuzz/util.h b/src/test/fuzz/util.h
index 7937315822..fd7f40c01d 100644
--- a/src/test/fuzz/util.h
+++ b/src/test/fuzz/util.h
@@ -193,17 +193,6 @@ template <typename T>
}
}
-template <class T>
-[[nodiscard]] bool AdditionOverflow(const T i, const T j) noexcept
-{
- static_assert(std::is_integral<T>::value, "Integral required.");
- if (std::numeric_limits<T>::is_signed) {
- return (i > 0 && j > std::numeric_limits<T>::max() - i) ||
- (i < 0 && j < std::numeric_limits<T>::min() - i);
- }
- return std::numeric_limits<T>::max() - i < j;
-}
-
[[nodiscard]] bool ContainsSpentInput(const CTransaction& tx, const CCoinsViewCache& inputs) noexcept;
/**
@@ -412,6 +401,8 @@ public:
int Connect(const sockaddr*, socklen_t) const override;
+ std::unique_ptr<Sock> Accept(sockaddr* addr, socklen_t* addr_len) const override;
+
int GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const override;
bool Wait(std::chrono::milliseconds timeout, Event requested, Event* occurred = nullptr) const override;
diff --git a/src/test/util/net.h b/src/test/util/net.h
index 006e876c1a..20c45058a1 100644
--- a/src/test/util/net.h
+++ b/src/test/util/net.h
@@ -13,6 +13,7 @@
#include <array>
#include <cassert>
#include <cstring>
+#include <memory>
#include <string>
struct ConnmanTestMsg : public CConnman {
@@ -126,6 +127,23 @@ public:
int Connect(const sockaddr*, socklen_t) const override { return 0; }
+ std::unique_ptr<Sock> Accept(sockaddr* addr, socklen_t* addr_len) const override
+ {
+ if (addr != nullptr) {
+ // Pretend all connections come from 5.5.5.5:6789
+ memset(addr, 0x00, *addr_len);
+ const socklen_t write_len = static_cast<socklen_t>(sizeof(sockaddr_in));
+ if (*addr_len >= write_len) {
+ *addr_len = write_len;
+ sockaddr_in* addr_in = reinterpret_cast<sockaddr_in*>(addr);
+ addr_in->sin_family = AF_INET;
+ memset(&addr_in->sin_addr, 0x05, sizeof(addr_in->sin_addr));
+ addr_in->sin_port = htons(6789);
+ }
+ }
+ return std::make_unique<StaticContentsSock>("");
+ };
+
int GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const override
{
std::memset(opt_val, 0x0, *opt_len);
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index 3a37aeb531..c48b2025ad 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -182,28 +182,28 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
// instead of unit tests, but for now we need these here.
RegisterAllCoreRPCCommands(tableRPC);
- auto rv = LoadChainstate(fReindex.load(),
- *Assert(m_node.chainman.get()),
- Assert(m_node.mempool.get()),
- fPruneMode,
- chainparams.GetConsensus(),
- m_args.GetBoolArg("-reindex-chainstate", false),
- m_cache_sizes.block_tree_db,
- m_cache_sizes.coins_db,
- m_cache_sizes.coins,
- true,
- true);
- assert(!rv.has_value());
-
- auto maybe_verify_failure = VerifyLoadedChainstate(
+ auto maybe_load_error = LoadChainstate(fReindex.load(),
+ *Assert(m_node.chainman.get()),
+ Assert(m_node.mempool.get()),
+ fPruneMode,
+ chainparams.GetConsensus(),
+ m_args.GetBoolArg("-reindex-chainstate", false),
+ m_cache_sizes.block_tree_db,
+ m_cache_sizes.coins_db,
+ m_cache_sizes.coins,
+ /*block_tree_db_in_memory=*/true,
+ /*coins_db_in_memory=*/true);
+ assert(!maybe_load_error.has_value());
+
+ auto maybe_verify_error = VerifyLoadedChainstate(
*Assert(m_node.chainman),
fReindex.load(),
m_args.GetBoolArg("-reindex-chainstate", false),
chainparams.GetConsensus(),
m_args.GetIntArg("-checkblocks", DEFAULT_CHECKBLOCKS),
m_args.GetIntArg("-checklevel", DEFAULT_CHECKLEVEL),
- static_cast<int64_t(*)()>(GetTime));
- assert(!maybe_verify_failure.has_value());
+ /*get_unix_time_seconds=*/static_cast<int64_t(*)()>(GetTime));
+ assert(!maybe_verify_error.has_value());
BlockValidationState state;
if (!m_node.chainman->ActiveChainstate().ActivateBestChain(state)) {
diff --git a/src/test/util_tests.cpp b/src/test/util_tests.cpp
index 077d6e2d37..04c4524911 100644
--- a/src/test/util_tests.cpp
+++ b/src/test/util_tests.cpp
@@ -15,6 +15,7 @@
#include <util/getuniquepath.h>
#include <util/message.h> // For MessageSign(), MessageVerify(), MESSAGE_MAGIC
#include <util/moneystr.h>
+#include <util/overflow.h>
#include <util/spanparsing.h>
#include <util/strencodings.h>
#include <util/string.h>
@@ -1463,6 +1464,38 @@ BOOST_AUTO_TEST_CASE(test_IsDigit)
BOOST_CHECK_EQUAL(IsDigit(9), false);
}
+/* Check for overflow */
+template <typename T>
+static void TestAddMatrixOverflow()
+{
+ constexpr T MAXI{std::numeric_limits<T>::max()};
+ BOOST_CHECK(!CheckedAdd(T{1}, MAXI));
+ BOOST_CHECK(!CheckedAdd(MAXI, MAXI));
+ BOOST_CHECK_EQUAL(0, CheckedAdd(T{0}, T{0}).value());
+ BOOST_CHECK_EQUAL(MAXI, CheckedAdd(T{0}, MAXI).value());
+ BOOST_CHECK_EQUAL(MAXI, CheckedAdd(T{1}, MAXI - 1).value());
+}
+
+/* Check for overflow or underflow */
+template <typename T>
+static void TestAddMatrix()
+{
+ TestAddMatrixOverflow<T>();
+ constexpr T MINI{std::numeric_limits<T>::min()};
+ constexpr T MAXI{std::numeric_limits<T>::max()};
+ BOOST_CHECK(!CheckedAdd(T{-1}, MINI));
+ BOOST_CHECK(!CheckedAdd(MINI, MINI));
+ BOOST_CHECK_EQUAL(MINI, CheckedAdd(T{0}, MINI).value());
+ BOOST_CHECK_EQUAL(MINI, CheckedAdd(T{-1}, MINI + 1).value());
+ BOOST_CHECK_EQUAL(-1, CheckedAdd(MINI, MAXI).value());
+}
+
+BOOST_AUTO_TEST_CASE(util_overflow)
+{
+ TestAddMatrixOverflow<unsigned>();
+ TestAddMatrix<signed>();
+}
+
BOOST_AUTO_TEST_CASE(test_ParseInt32)
{
int32_t n;
diff --git a/src/txmempool.cpp b/src/txmempool.cpp
index 66beb0a9b3..dc2769b81e 100644
--- a/src/txmempool.cpp
+++ b/src/txmempool.cpp
@@ -64,16 +64,6 @@ private:
int64_t feeDelta;
};
-struct update_lock_points
-{
- explicit update_lock_points(const LockPoints& _lp) : lp(_lp) { }
-
- void operator() (CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); }
-
-private:
- const LockPoints& lp;
-};
-
bool TestLockPointValidity(CChain& active_chain, const LockPoints& lp)
{
AssertLockHeld(cs_main);
@@ -649,10 +639,7 @@ void CTxMemPool::removeForReorg(CChain& chain, std::function<bool(txiter)> check
}
RemoveStaged(setAllRemoves, false, MemPoolRemovalReason::REORG);
for (indexed_transaction_set::const_iterator it = mapTx.begin(); it != mapTx.end(); it++) {
- const LockPoints lp{it->GetLockPoints()};
- if (!TestLockPointValidity(chain, lp)) {
- mapTx.modify(it, update_lock_points(lp));
- }
+ assert(TestLockPointValidity(chain, it->GetLockPoints()));
}
}
diff --git a/src/txmempool.h b/src/txmempool.h
index df578d5111..b8c508fd90 100644
--- a/src/txmempool.h
+++ b/src/txmempool.h
@@ -312,6 +312,16 @@ public:
}
};
+struct update_lock_points
+{
+ explicit update_lock_points(const LockPoints& _lp) : lp(_lp) { }
+
+ void operator() (CTxMemPoolEntry &e) { e.UpdateLockPoints(lp); }
+
+private:
+ const LockPoints& lp;
+};
+
// Multi_index tag names
struct descendant_score {};
struct entry_time {};
diff --git a/src/util/golombrice.h b/src/util/golombrice.h
index 67d262406f..f14cb594db 100644
--- a/src/util/golombrice.h
+++ b/src/util/golombrice.h
@@ -40,4 +40,35 @@ uint64_t GolombRiceDecode(BitStreamReader<IStream>& bitreader, uint8_t P)
return (q << P) + r;
}
+// Map a value x that is uniformly distributed in the range [0, 2^64) to a
+// value uniformly distributed in [0, n) by returning the upper 64 bits of
+// x * n.
+//
+// See: https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/
+static inline uint64_t MapIntoRange(uint64_t x, uint64_t n)
+{
+#ifdef __SIZEOF_INT128__
+ return (static_cast<unsigned __int128>(x) * static_cast<unsigned __int128>(n)) >> 64;
+#else
+ // To perform the calculation on 64-bit numbers without losing the
+ // result to overflow, split the numbers into the most significant and
+ // least significant 32 bits and perform multiplication piece-wise.
+ //
+ // See: https://stackoverflow.com/a/26855440
+ const uint64_t x_hi = x >> 32;
+ const uint64_t x_lo = x & 0xFFFFFFFF;
+ const uint64_t n_hi = n >> 32;
+ const uint64_t n_lo = n & 0xFFFFFFFF;
+
+ const uint64_t ac = x_hi * n_hi;
+ const uint64_t ad = x_hi * n_lo;
+ const uint64_t bc = x_lo * n_hi;
+ const uint64_t bd = x_lo * n_lo;
+
+ const uint64_t mid34 = (bd >> 32) + (bc & 0xFFFFFFFF) + (ad & 0xFFFFFFFF);
+ const uint64_t upper64 = ac + (bc >> 32) + (ad >> 32) + (mid34 >> 32);
+ return upper64;
+#endif
+}
+
#endif // BITCOIN_UTIL_GOLOMBRICE_H
diff --git a/src/util/overflow.h b/src/util/overflow.h
new file mode 100644
index 0000000000..5982af8d04
--- /dev/null
+++ b/src/util/overflow.h
@@ -0,0 +1,31 @@
+// Copyright (c) 2021 The Bitcoin Core developers
+// Distributed under the MIT software license, see the accompanying
+// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+
+#ifndef BITCOIN_UTIL_OVERFLOW_H
+#define BITCOIN_UTIL_OVERFLOW_H
+
+#include <limits>
+#include <type_traits>
+
+template <class T>
+[[nodiscard]] bool AdditionOverflow(const T i, const T j) noexcept
+{
+ static_assert(std::is_integral<T>::value, "Integral required.");
+ if (std::numeric_limits<T>::is_signed) {
+ return (i > 0 && j > std::numeric_limits<T>::max() - i) ||
+ (i < 0 && j < std::numeric_limits<T>::min() - i);
+ }
+ return std::numeric_limits<T>::max() - i < j;
+}
+
+template <class T>
+[[nodiscard]] std::optional<T> CheckedAdd(const T i, const T j) noexcept
+{
+ if (AdditionOverflow(i, j)) {
+ return std::nullopt;
+ }
+ return i + j;
+}
+
+#endif // BITCOIN_UTIL_OVERFLOW_H
diff --git a/src/util/sock.cpp b/src/util/sock.cpp
index 1a4d67a65e..2029d70a37 100644
--- a/src/util/sock.cpp
+++ b/src/util/sock.cpp
@@ -10,6 +10,7 @@
#include <util/system.h>
#include <util/time.h>
+#include <memory>
#include <stdexcept>
#include <string>
@@ -73,6 +74,32 @@ int Sock::Connect(const sockaddr* addr, socklen_t addr_len) const
return connect(m_socket, addr, addr_len);
}
+std::unique_ptr<Sock> Sock::Accept(sockaddr* addr, socklen_t* addr_len) const
+{
+#ifdef WIN32
+ static constexpr auto ERR = INVALID_SOCKET;
+#else
+ static constexpr auto ERR = SOCKET_ERROR;
+#endif
+
+ std::unique_ptr<Sock> sock;
+
+ const auto socket = accept(m_socket, addr, addr_len);
+ if (socket != ERR) {
+ try {
+ sock = std::make_unique<Sock>(socket);
+ } catch (const std::exception&) {
+#ifdef WIN32
+ closesocket(socket);
+#else
+ close(socket);
+#endif
+ }
+ }
+
+ return sock;
+}
+
int Sock::GetSockOpt(int level, int opt_name, void* opt_val, socklen_t* opt_len) const
{
return getsockopt(m_socket, level, opt_name, static_cast<char*>(opt_val), opt_len);
diff --git a/src/util/sock.h b/src/util/sock.h
index 59cc8c0b1d..7510482857 100644
--- a/src/util/sock.h
+++ b/src/util/sock.h
@@ -10,6 +10,7 @@
#include <util/time.h>
#include <chrono>
+#include <memory>
#include <string>
/**
@@ -97,6 +98,14 @@ public:
[[nodiscard]] virtual int Connect(const sockaddr* addr, socklen_t addr_len) const;
/**
+ * accept(2) wrapper. Equivalent to `std::make_unique<Sock>(accept(this->Get(), addr, addr_len))`.
+ * Code that uses this wrapper can be unit tested if this method is overridden by a mock Sock
+ * implementation.
+ * The returned unique_ptr is empty if `accept()` failed in which case errno will be set.
+ */
+ [[nodiscard]] virtual std::unique_ptr<Sock> Accept(sockaddr* addr, socklen_t* addr_len) const;
+
+ /**
* getsockopt(2) wrapper. Equivalent to
* `getsockopt(this->Get(), level, opt_name, opt_val, opt_len)`. Code that uses this
* wrapper can be unit tested if this method is overridden by a mock Sock implementation.
diff --git a/src/validation.cpp b/src/validation.cpp
index cb2b60b481..a98ffe006d 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -143,13 +143,6 @@ extern std::set<int> setDirtyFileInfo;
void FlushBlockFile(bool fFinalize = false, bool finalize_undo = false);
// ... TODO move fully to blockstorage
-CBlockIndex* BlockManager::LookupBlockIndex(const uint256& hash) const
-{
- AssertLockHeld(cs_main);
- BlockMap::const_iterator it = m_block_index.find(hash);
- return it == m_block_index.end() ? nullptr : it->second;
-}
-
CBlockIndex* CChainState::FindForkInGlobalIndex(const CBlockLocator& locator) const
{
AssertLockHeld(cs_main);
@@ -375,6 +368,8 @@ void CChainState::MaybeUpdateMempoolForReorg(
}
}
}
+ // CheckSequenceLocks updates lp. Update the mempool entry LockPoints.
+ if (!validLP) m_mempool->mapTx.modify(it, update_lock_points(lp));
return should_remove;
};
@@ -3123,42 +3118,6 @@ void CChainState::ResetBlockFailureFlags(CBlockIndex *pindex) {
}
}
-CBlockIndex* BlockManager::AddToBlockIndex(const CBlockHeader& block)
-{
- AssertLockHeld(cs_main);
-
- // Check for duplicate
- uint256 hash = block.GetHash();
- BlockMap::iterator it = m_block_index.find(hash);
- if (it != m_block_index.end())
- return it->second;
-
- // Construct new block index object
- CBlockIndex* pindexNew = new CBlockIndex(block);
- // We assign the sequence id to blocks only when the full data is available,
- // to avoid miners withholding blocks but broadcasting headers, to get a
- // competitive advantage.
- pindexNew->nSequenceId = 0;
- BlockMap::iterator mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
- pindexNew->phashBlock = &((*mi).first);
- BlockMap::iterator miPrev = m_block_index.find(block.hashPrevBlock);
- if (miPrev != m_block_index.end())
- {
- pindexNew->pprev = (*miPrev).second;
- pindexNew->nHeight = pindexNew->pprev->nHeight + 1;
- pindexNew->BuildSkip();
- }
- pindexNew->nTimeMax = (pindexNew->pprev ? std::max(pindexNew->pprev->nTimeMax, pindexNew->nTime) : pindexNew->nTime);
- pindexNew->nChainWork = (pindexNew->pprev ? pindexNew->pprev->nChainWork : 0) + GetBlockProof(*pindexNew);
- pindexNew->RaiseValidity(BLOCK_VALID_TREE);
- if (pindexBestHeader == nullptr || pindexBestHeader->nChainWork < pindexNew->nChainWork)
- pindexBestHeader = pindexNew;
-
- setDirtyBlockIndex.insert(pindexNew);
-
- return pindexNew;
-}
-
/** Mark a block as having its data received and checked (up to BLOCK_VALID_TRANSACTIONS). */
void CChainState::ReceivedBlockTransactions(const CBlock& block, CBlockIndex* pindexNew, const FlatFilePos& pos)
{
@@ -3325,21 +3284,6 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
return commitment;
}
-CBlockIndex* BlockManager::GetLastCheckpoint(const CCheckpointData& data)
-{
- const MapCheckpoints& checkpoints = data.mapCheckpoints;
-
- for (const MapCheckpoints::value_type& i : reverse_iterate(checkpoints))
- {
- const uint256& hash = i.second;
- CBlockIndex* pindex = LookupBlockIndex(hash);
- if (pindex) {
- return pindex;
- }
- }
- return nullptr;
-}
-
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
* set; UTXO-related validity checks are done in ConnectBlock().
@@ -3761,67 +3705,6 @@ bool TestBlockValidity(BlockValidationState& state,
return true;
}
-/**
- * BLOCK PRUNING CODE
- */
-
-void BlockManager::PruneOneBlockFile(const int fileNumber)
-{
- AssertLockHeld(cs_main);
- LOCK(cs_LastBlockFile);
-
- for (const auto& entry : m_block_index) {
- CBlockIndex* pindex = entry.second;
- if (pindex->nFile == fileNumber) {
- pindex->nStatus &= ~BLOCK_HAVE_DATA;
- pindex->nStatus &= ~BLOCK_HAVE_UNDO;
- pindex->nFile = 0;
- pindex->nDataPos = 0;
- pindex->nUndoPos = 0;
- setDirtyBlockIndex.insert(pindex);
-
- // Prune from m_blocks_unlinked -- any block we prune would have
- // to be downloaded again in order to consider its chain, at which
- // point it would be considered as a candidate for
- // m_blocks_unlinked or setBlockIndexCandidates.
- auto range = m_blocks_unlinked.equal_range(pindex->pprev);
- while (range.first != range.second) {
- std::multimap<CBlockIndex *, CBlockIndex *>::iterator _it = range.first;
- range.first++;
- if (_it->second == pindex) {
- m_blocks_unlinked.erase(_it);
- }
- }
- }
- }
-
- vinfoBlockFile[fileNumber].SetNull();
- setDirtyFileInfo.insert(fileNumber);
-}
-
-void BlockManager::FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height)
-{
- assert(fPruneMode && nManualPruneHeight > 0);
-
- LOCK2(cs_main, cs_LastBlockFile);
- if (chain_tip_height < 0) {
- return;
- }
-
- // last block to prune is the lesser of (user-specified height, MIN_BLOCKS_TO_KEEP from the tip)
- unsigned int nLastBlockWeCanPrune = std::min((unsigned)nManualPruneHeight, chain_tip_height - MIN_BLOCKS_TO_KEEP);
- int count = 0;
- for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
- if (vinfoBlockFile[fileNumber].nSize == 0 || vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
- continue;
- }
- PruneOneBlockFile(fileNumber);
- setFilesToPrune.insert(fileNumber);
- count++;
- }
- LogPrintf("Prune (Manual): prune_height=%d removed %d blk/rev pairs\n", nLastBlockWeCanPrune, count);
-}
-
/* This function is called from the RPC code for pruneblockchain */
void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeight)
{
@@ -3832,259 +3715,6 @@ void PruneBlockFilesManual(CChainState& active_chainstate, int nManualPruneHeigh
}
}
-void BlockManager::FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd)
-{
- LOCK2(cs_main, cs_LastBlockFile);
- if (chain_tip_height < 0 || nPruneTarget == 0) {
- return;
- }
- if ((uint64_t)chain_tip_height <= nPruneAfterHeight) {
- return;
- }
-
- unsigned int nLastBlockWeCanPrune{(unsigned)std::min(prune_height, chain_tip_height - static_cast<int>(MIN_BLOCKS_TO_KEEP))};
- uint64_t nCurrentUsage = CalculateCurrentUsage();
- // We don't check to prune until after we've allocated new space for files
- // So we should leave a buffer under our target to account for another allocation
- // before the next pruning.
- uint64_t nBuffer = BLOCKFILE_CHUNK_SIZE + UNDOFILE_CHUNK_SIZE;
- uint64_t nBytesToPrune;
- int count = 0;
-
- if (nCurrentUsage + nBuffer >= nPruneTarget) {
- // On a prune event, the chainstate DB is flushed.
- // To avoid excessive prune events negating the benefit of high dbcache
- // values, we should not prune too rapidly.
- // So when pruning in IBD, increase the buffer a bit to avoid a re-prune too soon.
- if (is_ibd) {
- // Since this is only relevant during IBD, we use a fixed 10%
- nBuffer += nPruneTarget / 10;
- }
-
- for (int fileNumber = 0; fileNumber < nLastBlockFile; fileNumber++) {
- nBytesToPrune = vinfoBlockFile[fileNumber].nSize + vinfoBlockFile[fileNumber].nUndoSize;
-
- if (vinfoBlockFile[fileNumber].nSize == 0) {
- continue;
- }
-
- if (nCurrentUsage + nBuffer < nPruneTarget) { // are we below our target?
- break;
- }
-
- // don't prune files that could have a block within MIN_BLOCKS_TO_KEEP of the main chain's tip but keep scanning
- if (vinfoBlockFile[fileNumber].nHeightLast > nLastBlockWeCanPrune) {
- continue;
- }
-
- PruneOneBlockFile(fileNumber);
- // Queue up the files for removal
- setFilesToPrune.insert(fileNumber);
- nCurrentUsage -= nBytesToPrune;
- count++;
- }
- }
-
- LogPrint(BCLog::PRUNE, "Prune: target=%dMiB actual=%dMiB diff=%dMiB max_prune_height=%d removed %d blk/rev pairs\n",
- nPruneTarget/1024/1024, nCurrentUsage/1024/1024,
- ((int64_t)nPruneTarget - (int64_t)nCurrentUsage)/1024/1024,
- nLastBlockWeCanPrune, count);
-}
-
-CBlockIndex * BlockManager::InsertBlockIndex(const uint256& hash)
-{
- AssertLockHeld(cs_main);
-
- if (hash.IsNull())
- return nullptr;
-
- // Return existing
- BlockMap::iterator mi = m_block_index.find(hash);
- if (mi != m_block_index.end())
- return (*mi).second;
-
- // Create new
- CBlockIndex* pindexNew = new CBlockIndex();
- mi = m_block_index.insert(std::make_pair(hash, pindexNew)).first;
- pindexNew->phashBlock = &((*mi).first);
-
- return pindexNew;
-}
-
-bool BlockManager::LoadBlockIndex(
- const Consensus::Params& consensus_params,
- ChainstateManager& chainman)
-{
- if (!m_block_tree_db->LoadBlockIndexGuts(consensus_params, [this](const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) { return this->InsertBlockIndex(hash); })) {
- return false;
- }
-
- // Calculate nChainWork
- std::vector<std::pair<int, CBlockIndex*> > vSortedByHeight;
- vSortedByHeight.reserve(m_block_index.size());
- for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index)
- {
- CBlockIndex* pindex = item.second;
- vSortedByHeight.push_back(std::make_pair(pindex->nHeight, pindex));
- }
- sort(vSortedByHeight.begin(), vSortedByHeight.end());
-
- // Find start of assumed-valid region.
- int first_assumed_valid_height = std::numeric_limits<int>::max();
-
- for (const auto& [height, block] : vSortedByHeight) {
- if (block->IsAssumedValid()) {
- auto chainstates = chainman.GetAll();
-
- // If we encounter an assumed-valid block index entry, ensure that we have
- // one chainstate that tolerates assumed-valid entries and another that does
- // not (i.e. the background validation chainstate), since assumed-valid
- // entries should always be pending validation by a fully-validated chainstate.
- auto any_chain = [&](auto fnc) { return std::any_of(chainstates.cbegin(), chainstates.cend(), fnc); };
- assert(any_chain([](auto chainstate) { return chainstate->reliesOnAssumedValid(); }));
- assert(any_chain([](auto chainstate) { return !chainstate->reliesOnAssumedValid(); }));
-
- first_assumed_valid_height = height;
- break;
- }
- }
-
- for (const std::pair<int, CBlockIndex*>& item : vSortedByHeight)
- {
- if (ShutdownRequested()) return false;
- CBlockIndex* pindex = item.second;
- pindex->nChainWork = (pindex->pprev ? pindex->pprev->nChainWork : 0) + GetBlockProof(*pindex);
- pindex->nTimeMax = (pindex->pprev ? std::max(pindex->pprev->nTimeMax, pindex->nTime) : pindex->nTime);
-
- // We can link the chain of blocks for which we've received transactions at some point, or
- // blocks that are assumed-valid on the basis of snapshot load (see
- // PopulateAndValidateSnapshot()).
- // Pruned nodes may have deleted the block.
- if (pindex->nTx > 0) {
- if (pindex->pprev) {
- if (pindex->pprev->nChainTx > 0) {
- pindex->nChainTx = pindex->pprev->nChainTx + pindex->nTx;
- } else {
- pindex->nChainTx = 0;
- m_blocks_unlinked.insert(std::make_pair(pindex->pprev, pindex));
- }
- } else {
- pindex->nChainTx = pindex->nTx;
- }
- }
- if (!(pindex->nStatus & BLOCK_FAILED_MASK) && pindex->pprev && (pindex->pprev->nStatus & BLOCK_FAILED_MASK)) {
- pindex->nStatus |= BLOCK_FAILED_CHILD;
- setDirtyBlockIndex.insert(pindex);
- }
- if (pindex->IsAssumedValid() ||
- (pindex->IsValid(BLOCK_VALID_TRANSACTIONS) &&
- (pindex->HaveTxsDownloaded() || pindex->pprev == nullptr))) {
-
- // Fill each chainstate's block candidate set. Only add assumed-valid
- // blocks to the tip candidate set if the chainstate is allowed to rely on
- // assumed-valid blocks.
- //
- // If all setBlockIndexCandidates contained the assumed-valid blocks, the
- // background chainstate's ActivateBestChain() call would add assumed-valid
- // blocks to the chain (based on how FindMostWorkChain() works). Obviously
- // we don't want this since the purpose of the background validation chain
- // is to validate assued-valid blocks.
- //
- // Note: This is considering all blocks whose height is greater or equal to
- // the first assumed-valid block to be assumed-valid blocks, and excluding
- // them from the background chainstate's setBlockIndexCandidates set. This
- // does mean that some blocks which are not technically assumed-valid
- // (later blocks on a fork beginning before the first assumed-valid block)
- // might not get added to the the background chainstate, but this is ok,
- // because they will still be attached to the active chainstate if they
- // actually contain more work.
- //
- // Instad of this height-based approach, an earlier attempt was made at
- // detecting "holistically" whether the block index under consideration
- // relied on an assumed-valid ancestor, but this proved to be too slow to
- // be practical.
- for (CChainState* chainstate : chainman.GetAll()) {
- if (chainstate->reliesOnAssumedValid() ||
- pindex->nHeight < first_assumed_valid_height) {
- chainstate->setBlockIndexCandidates.insert(pindex);
- }
- }
- }
- if (pindex->nStatus & BLOCK_FAILED_MASK && (!chainman.m_best_invalid || pindex->nChainWork > chainman.m_best_invalid->nChainWork)) {
- chainman.m_best_invalid = pindex;
- }
- if (pindex->pprev)
- pindex->BuildSkip();
- if (pindex->IsValid(BLOCK_VALID_TREE) && (pindexBestHeader == nullptr || CBlockIndexWorkComparator()(pindexBestHeader, pindex)))
- pindexBestHeader = pindex;
- }
-
- return true;
-}
-
-void BlockManager::Unload() {
- m_blocks_unlinked.clear();
-
- for (const BlockMap::value_type& entry : m_block_index) {
- delete entry.second;
- }
-
- m_block_index.clear();
-}
-
-bool BlockManager::LoadBlockIndexDB(ChainstateManager& chainman)
-{
- if (!LoadBlockIndex(::Params().GetConsensus(), chainman)) {
- return false;
- }
-
- // Load block file info
- m_block_tree_db->ReadLastBlockFile(nLastBlockFile);
- vinfoBlockFile.resize(nLastBlockFile + 1);
- LogPrintf("%s: last block file = %i\n", __func__, nLastBlockFile);
- for (int nFile = 0; nFile <= nLastBlockFile; nFile++) {
- m_block_tree_db->ReadBlockFileInfo(nFile, vinfoBlockFile[nFile]);
- }
- LogPrintf("%s: last block file info: %s\n", __func__, vinfoBlockFile[nLastBlockFile].ToString());
- for (int nFile = nLastBlockFile + 1; true; nFile++) {
- CBlockFileInfo info;
- if (m_block_tree_db->ReadBlockFileInfo(nFile, info)) {
- vinfoBlockFile.push_back(info);
- } else {
- break;
- }
- }
-
- // Check presence of blk files
- LogPrintf("Checking all blk files are present...\n");
- std::set<int> setBlkDataFiles;
- for (const std::pair<const uint256, CBlockIndex*>& item : m_block_index) {
- CBlockIndex* pindex = item.second;
- if (pindex->nStatus & BLOCK_HAVE_DATA) {
- setBlkDataFiles.insert(pindex->nFile);
- }
- }
- for (std::set<int>::iterator it = setBlkDataFiles.begin(); it != setBlkDataFiles.end(); it++)
- {
- FlatFilePos pos(*it, 0);
- if (CAutoFile(OpenBlockFile(pos, true), SER_DISK, CLIENT_VERSION).IsNull()) {
- return false;
- }
- }
-
- // Check whether we have ever pruned block & undo files
- m_block_tree_db->ReadFlag("prunedblockfiles", fHavePruned);
- if (fHavePruned)
- LogPrintf("LoadBlockIndexDB(): Block files have previously been pruned\n");
-
- // Check whether we need to continue reindexing
- bool fReindexing = false;
- m_block_tree_db->ReadReindexing(fReindexing);
- if(fReindexing) fReindex = true;
-
- return true;
-}
-
void CChainState::LoadMempool(const ArgsManager& args)
{
if (!m_mempool) return;
diff --git a/src/validation.h b/src/validation.h
index 68649a3d23..16f4bfe741 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -15,6 +15,7 @@
#include <chain.h>
#include <consensus/amount.h>
#include <fs.h>
+#include <node/blockstorage.h>
#include <policy/feerate.h>
#include <policy/packages.h>
#include <script/script_error.h>
@@ -40,7 +41,6 @@
class CChainState;
class CBlockTreeDB;
class CChainParams;
-struct CCheckpointData;
class CTxMemPool;
class ChainstateManager;
class SnapshotMetadata;
@@ -107,7 +107,6 @@ enum class SynchronizationState {
};
extern RecursiveMutex cs_main;
-typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
extern Mutex g_best_block_mutex;
extern std::condition_variable g_best_block_cv;
/** Used to notify getblocktemplate RPC of new tips. */
@@ -381,85 +380,6 @@ enum class FlushStateMode {
ALWAYS
};
-struct CBlockIndexWorkComparator
-{
- bool operator()(const CBlockIndex *pa, const CBlockIndex *pb) const;
-};
-
-/**
- * Maintains a tree of blocks (stored in `m_block_index`) which is consulted
- * to determine where the most-work tip is.
- *
- * This data is used mostly in `CChainState` - information about, e.g.,
- * candidate tips is not maintained here.
- */
-class BlockManager
-{
- friend CChainState;
-
-private:
- /* Calculate the block/rev files to delete based on height specified by user with RPC command pruneblockchain */
- void FindFilesToPruneManual(std::set<int>& setFilesToPrune, int nManualPruneHeight, int chain_tip_height);
-
- /**
- * Prune block and undo files (blk???.dat and rev???.dat) so that the disk space used is less than a user-defined target.
- * The user sets the target (in MB) on the command line or in config file. This will be run on startup and whenever new
- * space is allocated in a block or undo file, staying below the target. Changing back to unpruned requires a reindex
- * (which in this case means the blockchain must be re-downloaded.)
- *
- * Pruning functions are called from FlushStateToDisk when the global fCheckForPruning flag has been set.
- * Block and undo files are deleted in lock-step (when blk00003.dat is deleted, so is rev00003.dat.)
- * Pruning cannot take place until the longest chain is at least a certain length (100000 on mainnet, 1000 on testnet, 1000 on regtest).
- * Pruning will never delete a block within a defined distance (currently 288) from the active chain's tip.
- * The block index is updated by unsetting HAVE_DATA and HAVE_UNDO for any blocks that were stored in the deleted files.
- * A db flag records the fact that at least some block files have been pruned.
- *
- * @param[out] setFilesToPrune The set of file indices that can be unlinked will be returned
- */
- void FindFilesToPrune(std::set<int>& setFilesToPrune, uint64_t nPruneAfterHeight, int chain_tip_height, int prune_height, bool is_ibd);
-
-public:
- BlockMap m_block_index GUARDED_BY(cs_main);
-
- /**
- * All pairs A->B, where A (or one of its ancestors) misses transactions, but B has transactions.
- * Pruned nodes may have entries where B is missing data.
- */
- std::multimap<CBlockIndex*, CBlockIndex*> m_blocks_unlinked;
-
- std::unique_ptr<CBlockTreeDB> m_block_tree_db GUARDED_BY(::cs_main);
-
- bool LoadBlockIndexDB(ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
-
- /**
- * Load the blocktree off disk and into memory. Populate certain metadata
- * per index entry (nStatus, nChainWork, nTimeMax, etc.) as well as peripheral
- * collections like setDirtyBlockIndex.
- */
- bool LoadBlockIndex(
- const Consensus::Params& consensus_params,
- ChainstateManager& chainman) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- /** Clear all data members. */
- void Unload() EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- CBlockIndex* AddToBlockIndex(const CBlockHeader& block) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
- /** Create a new block index entry for a given block hash */
- CBlockIndex* InsertBlockIndex(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- //! Mark one block file as pruned (modify associated database entries)
- void PruneOneBlockFile(const int fileNumber) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- CBlockIndex* LookupBlockIndex(const uint256& hash) const EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- //! Returns last CBlockIndex* that is a checkpoint
- CBlockIndex* GetLastCheckpoint(const CCheckpointData& data) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
-
- ~BlockManager() {
- Unload();
- }
-};
-
/**
* A convenience class for constructing the CCoinsView* hierarchy used
* to facilitate access to the UTXO set.
diff --git a/src/wallet/dump.cpp b/src/wallet/dump.cpp
index 645576ef91..9a4c201c4a 100644
--- a/src/wallet/dump.cpp
+++ b/src/wallet/dump.cpp
@@ -214,6 +214,11 @@ bool CreateFromDump(const std::string& name, const fs::path& wallet_path, biling
if (key == "checksum") {
std::vector<unsigned char> parsed_checksum = ParseHex(value);
+ if (parsed_checksum.size() != checksum.size()) {
+ error = Untranslated("Error: Checksum is not the correct size");
+ ret = false;
+ break;
+ }
std::copy(parsed_checksum.begin(), parsed_checksum.end(), checksum.begin());
break;
}
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index 46d5bcf1a6..233ffd60da 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -17,7 +17,6 @@ from test_framework.messages import (
from test_framework.script import (
CScript,
OP_1,
- OP_2,
OP_DROP,
OP_TRUE,
)
@@ -36,16 +35,14 @@ from test_framework.util import (
# Construct 2 trivial P2SH's and the ScriptSigs that spend them
# So we can create many transactions without needing to spend
# time signing.
-REDEEM_SCRIPT_1 = CScript([OP_1, OP_DROP])
-REDEEM_SCRIPT_2 = CScript([OP_2, OP_DROP])
-P2SH_1 = script_to_p2sh_script(REDEEM_SCRIPT_1)
-P2SH_2 = script_to_p2sh_script(REDEEM_SCRIPT_2)
+SCRIPT = CScript([OP_1, OP_DROP])
+P2SH = script_to_p2sh_script(SCRIPT)
+REDEEM_SCRIPT = CScript([OP_TRUE, SCRIPT])
-# Associated ScriptSig's to spend satisfy P2SH_1 and P2SH_2
-SCRIPT_SIG = [CScript([OP_TRUE, REDEEM_SCRIPT_1]), CScript([OP_TRUE, REDEEM_SCRIPT_2])]
-
-def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee_increment):
+def small_txpuzzle_randfee(
+ from_node, conflist, unconflist, amount, min_fee, fee_increment
+):
"""Create and send a transaction with a random fee.
The transaction pays to a trivial P2SH script, and assumes that its inputs
@@ -66,20 +63,15 @@ def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee
while total_in <= (amount + fee) and len(conflist) > 0:
t = conflist.pop(0)
total_in += t["amount"]
- tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
+ tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), REDEEM_SCRIPT))
+ while total_in <= (amount + fee) and len(unconflist) > 0:
+ t = unconflist.pop(0)
+ total_in += t["amount"]
+ tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), REDEEM_SCRIPT))
if total_in <= amount + fee:
- while total_in <= (amount + fee) and len(unconflist) > 0:
- t = unconflist.pop(0)
- total_in += t["amount"]
- tx.vin.append(CTxIn(COutPoint(int(t["txid"], 16), t["vout"]), b""))
- if total_in <= amount + fee:
- raise RuntimeError(f"Insufficient funds: need {amount + fee}, have {total_in}")
- tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH_1))
- tx.vout.append(CTxOut(int(amount * COIN), P2SH_2))
- # These transactions don't need to be signed, but we still have to insert
- # the ScriptSig that will satisfy the ScriptPubKey.
- for inp in tx.vin:
- inp.scriptSig = SCRIPT_SIG[inp.prevout.n]
+ raise RuntimeError(f"Insufficient funds: need {amount + fee}, have {total_in}")
+ tx.vout.append(CTxOut(int((total_in - amount - fee) * COIN), P2SH))
+ tx.vout.append(CTxOut(int(amount * COIN), P2SH))
txid = from_node.sendrawtransaction(hexstring=tx.serialize().hex(), maxfeerate=0)
unconflist.append({"txid": txid, "vout": 0, "amount": total_in - amount - fee})
unconflist.append({"txid": txid, "vout": 1, "amount": amount})
@@ -87,34 +79,6 @@ def small_txpuzzle_randfee(from_node, conflist, unconflist, amount, min_fee, fee
return (tx.serialize().hex(), fee)
-def split_inputs(from_node, txins, txouts, initial_split=False):
- """Generate a lot of inputs so we can generate a ton of transactions.
-
- This function takes an input from txins, and creates and sends a transaction
- which splits the value into 2 outputs which are appended to txouts.
- Previously this was designed to be small inputs so they wouldn't have
- a high coin age when the notion of priority still existed."""
-
- prevtxout = txins.pop()
- tx = CTransaction()
- tx.vin.append(CTxIn(COutPoint(int(prevtxout["txid"], 16), prevtxout["vout"]), b""))
-
- half_change = satoshi_round(prevtxout["amount"] / 2)
- rem_change = prevtxout["amount"] - half_change - Decimal("0.00001000")
- tx.vout.append(CTxOut(int(half_change * COIN), P2SH_1))
- tx.vout.append(CTxOut(int(rem_change * COIN), P2SH_2))
-
- # If this is the initial split we actually need to sign the transaction
- # Otherwise we just need to insert the proper ScriptSig
- if (initial_split):
- completetx = from_node.signrawtransactionwithwallet(tx.serialize().hex())["hex"]
- else:
- tx.vin[0].scriptSig = SCRIPT_SIG[prevtxout["vout"]]
- completetx = tx.serialize().hex()
- txid = from_node.sendrawtransaction(hexstring=completetx, maxfeerate=0)
- txouts.append({"txid": txid, "vout": 0, "amount": half_change})
- txouts.append({"txid": txid, "vout": 1, "amount": rem_change})
-
def check_raw_estimates(node, fees_seen):
"""Call estimaterawfee and verify that the estimates meet certain invariants."""
@@ -125,7 +89,10 @@ def check_raw_estimates(node, fees_seen):
assert_greater_than(feerate, 0)
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
- raise AssertionError(f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})")
+ raise AssertionError(
+ f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})"
+ )
+
def check_smart_estimates(node, fees_seen):
"""Call estimatesmartfee and verify that the estimates meet certain invariants."""
@@ -133,8 +100,8 @@ def check_smart_estimates(node, fees_seen):
delta = 1.0e-6 # account for rounding error
last_feerate = float(max(fees_seen))
all_smart_estimates = [node.estimatesmartfee(i) for i in range(1, 26)]
- mempoolMinFee = node.getmempoolinfo()['mempoolminfee']
- minRelaytxFee = node.getmempoolinfo()['minrelaytxfee']
+ mempoolMinFee = node.getmempoolinfo()["mempoolminfee"]
+ minRelaytxFee = node.getmempoolinfo()["minrelaytxfee"]
for i, e in enumerate(all_smart_estimates): # estimate is for i+1
feerate = float(e["feerate"])
assert_greater_than(feerate, 0)
@@ -142,9 +109,13 @@ def check_smart_estimates(node, fees_seen):
assert_greater_than_or_equal(feerate, float(minRelaytxFee))
if feerate + delta < min(fees_seen) or feerate - delta > max(fees_seen):
- raise AssertionError(f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})")
+ raise AssertionError(
+ f"Estimated fee ({feerate}) out of range ({min(fees_seen)},{max(fees_seen)})"
+ )
if feerate - delta > last_feerate:
- raise AssertionError(f"Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms")
+ raise AssertionError(
+ f"Estimated fee ({feerate}) larger than last fee ({last_feerate}) for lower number of confirms"
+ )
last_feerate = feerate
if i == 0:
@@ -152,6 +123,7 @@ def check_smart_estimates(node, fees_seen):
else:
assert_greater_than_or_equal(i + 1, e["blocks"])
+
def check_estimates(node, fees_seen):
check_raw_estimates(node, fees_seen)
check_smart_estimates(node, fees_seen)
@@ -159,27 +131,25 @@ def check_estimates(node, fees_seen):
def send_tx(node, utxo, feerate):
"""Broadcast a 1in-1out transaction with a specific input and feerate (sat/vb)."""
- overhead, op, scriptsig, nseq, value, spk = 10, 36, 5, 4, 8, 24
- tx_size = overhead + op + scriptsig + nseq + value + spk
- fee = tx_size * feerate
-
tx = CTransaction()
- tx.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), SCRIPT_SIG[utxo["vout"]])]
- tx.vout = [CTxOut(int(utxo["amount"] * COIN) - fee, P2SH_1)]
- txid = node.sendrawtransaction(tx.serialize().hex())
+ tx.vin = [CTxIn(COutPoint(int(utxo["txid"], 16), utxo["vout"]), REDEEM_SCRIPT)]
+ tx.vout = [CTxOut(int(utxo["amount"] * COIN), P2SH)]
+
+ # vbytes == bytes as we are using legacy transactions
+ fee = tx.get_vsize() * feerate
+ tx.vout[0].nValue -= fee
- return txid
+ return node.sendrawtransaction(tx.serialize().hex())
class EstimateFeeTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 3
- # mine non-standard txs (e.g. txs with "dust" outputs)
# Force fSendTrickle to true (via whitelist.noban)
self.extra_args = [
- ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1"],
- ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1", "-blockmaxweight=68000"],
- ["-acceptnonstdtxn", "-whitelist=noban@127.0.0.1", "-blockmaxweight=32000"],
+ ["-whitelist=noban@127.0.0.1"],
+ ["-whitelist=noban@127.0.0.1", "-blockmaxweight=68000"],
+ ["-whitelist=noban@127.0.0.1", "-blockmaxweight=32000"],
]
def skip_test_if_missing_module(self):
@@ -212,11 +182,17 @@ class EstimateFeeTest(BitcoinTestFramework):
random.shuffle(self.confutxo)
for _ in range(random.randrange(100 - 50, 100 + 50)):
from_index = random.randint(1, 2)
- (txhex, fee) = small_txpuzzle_randfee(self.nodes[from_index], self.confutxo,
- self.memutxo, Decimal("0.005"), min_fee, min_fee)
+ (txhex, fee) = small_txpuzzle_randfee(
+ self.nodes[from_index],
+ self.confutxo,
+ self.memutxo,
+ Decimal("0.005"),
+ min_fee,
+ min_fee,
+ )
tx_kbytes = (len(txhex) // 2) / 1000.0
self.fees_per_kb.append(float(fee) / tx_kbytes)
- self.sync_mempools(wait=.1)
+ self.sync_mempools(wait=0.1)
mined = mining_node.getblock(self.generate(mining_node, 1)[0], True)["tx"]
# update which txouts are confirmed
newmem = []
@@ -229,46 +205,45 @@ class EstimateFeeTest(BitcoinTestFramework):
def initial_split(self, node):
"""Split two coinbase UTxOs into many small coins"""
- self.txouts = []
- self.txouts2 = []
- # Split a coinbase into two transaction puzzle outputs
- split_inputs(node, node.listunspent(0), self.txouts, True)
-
- # Mine
+ utxo_count = 2048
+ self.confutxo = []
+ splitted_amount = Decimal("0.04")
+ fee = Decimal("0.1")
+ change = Decimal("100") - splitted_amount * utxo_count - fee
+ tx = CTransaction()
+ tx.vin = [
+ CTxIn(COutPoint(int(cb["txid"], 16), cb["vout"]))
+ for cb in node.listunspent()[:2]
+ ]
+ tx.vout = [CTxOut(int(splitted_amount * COIN), P2SH) for _ in range(utxo_count)]
+ tx.vout.append(CTxOut(int(change * COIN), P2SH))
+ txhex = node.signrawtransactionwithwallet(tx.serialize().hex())["hex"]
+ txid = node.sendrawtransaction(txhex)
+ self.confutxo = [
+ {"txid": txid, "vout": i, "amount": splitted_amount}
+ for i in range(utxo_count)
+ ]
while len(node.getrawmempool()) > 0:
self.generate(node, 1, sync_fun=self.no_op)
- # Repeatedly split those 2 outputs, doubling twice for each rep
- # Use txouts to monitor the available utxo, since these won't be tracked in wallet
- reps = 0
- while reps < 5:
- # Double txouts to txouts2
- while len(self.txouts) > 0:
- split_inputs(node, self.txouts, self.txouts2)
- while len(node.getrawmempool()) > 0:
- self.generate(node, 1, sync_fun=self.no_op)
- # Double txouts2 to txouts
- while len(self.txouts2) > 0:
- split_inputs(node, self.txouts2, self.txouts)
- while len(node.getrawmempool()) > 0:
- self.generate(node, 1, sync_fun=self.no_op)
- reps += 1
-
def sanity_check_estimates_range(self):
"""Populate estimation buckets, assert estimates are in a sane range and
are strictly increasing as the target decreases."""
self.fees_per_kb = []
self.memutxo = []
- self.confutxo = self.txouts # Start with the set of confirmed txouts after splitting
self.log.info("Will output estimates for 1/2/3/6/15/25 blocks")
for _ in range(2):
- self.log.info("Creating transactions and mining them with a block size that can't keep up")
+ self.log.info(
+ "Creating transactions and mining them with a block size that can't keep up"
+ )
# Create transactions and mine 10 small blocks with node 2, but create txs faster than we can mine
self.transact_and_mine(10, self.nodes[2])
check_estimates(self.nodes[1], self.fees_per_kb)
- self.log.info("Creating transactions and mining them at a block size that is just big enough")
+ self.log.info(
+ "Creating transactions and mining them at a block size that is just big enough"
+ )
# Generate transactions while mining 10 more blocks, this time with node1
# which mines blocks with capacity just above the rate that transactions are being created
self.transact_and_mine(10, self.nodes[1])
@@ -277,12 +252,13 @@ class EstimateFeeTest(BitcoinTestFramework):
# Finish by mining a normal-sized block:
while len(self.nodes[1].getrawmempool()) > 0:
self.generate(self.nodes[1], 1)
+
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb)
def test_feerate_mempoolminfee(self):
- high_val = 3*self.nodes[1].estimatesmartfee(1)['feerate']
- self.restart_node(1, extra_args=[f'-minrelaytxfee={high_val}'])
+ high_val = 3 * self.nodes[1].estimatesmartfee(1)["feerate"]
+ self.restart_node(1, extra_args=[f"-minrelaytxfee={high_val}"])
check_estimates(self.nodes[1], self.fees_per_kb)
self.restart_node(1)
@@ -303,7 +279,7 @@ class EstimateFeeTest(BitcoinTestFramework):
utxos_to_respend = []
txids_to_replace = []
- assert len(utxos) >= 250
+ assert_greater_than_or_equal(len(utxos), 250)
for _ in range(5):
# Broadcast 45 low fee transactions that will need to be RBF'd
for _ in range(45):
@@ -315,27 +291,24 @@ class EstimateFeeTest(BitcoinTestFramework):
for _ in range(5):
send_tx(node, utxos.pop(0), low_feerate)
# Mine the transactions on another node
- self.sync_mempools(wait=.1, nodes=[node, miner])
+ self.sync_mempools(wait=0.1, nodes=[node, miner])
for txid in txids_to_replace:
miner.prioritisetransaction(txid=txid, fee_delta=-COIN)
self.generate(miner, 1)
# RBF the low-fee transactions
- while True:
- try:
- u = utxos_to_respend.pop(0)
- send_tx(node, u, high_feerate)
- except IndexError:
- break
+ while len(utxos_to_respend) > 0:
+ u = utxos_to_respend.pop(0)
+ send_tx(node, u, high_feerate)
# Mine the last replacement txs
- self.sync_mempools(wait=.1, nodes=[node, miner])
+ self.sync_mempools(wait=0.1, nodes=[node, miner])
self.generate(miner, 1)
# Only 10% of the transactions were really confirmed with a low feerate,
# the rest needed to be RBF'd. We must return the 90% conf rate feerate.
- high_feerate_kvb = Decimal(high_feerate) / COIN * 10**3
+ high_feerate_kvb = Decimal(high_feerate) / COIN * 10 ** 3
est_feerate = node.estimatesmartfee(2)["feerate"]
- assert est_feerate == high_feerate_kvb
+ assert_equal(est_feerate, high_feerate_kvb)
def run_test(self):
self.log.info("This test is time consuming, please be patient")
@@ -359,7 +332,9 @@ class EstimateFeeTest(BitcoinTestFramework):
self.sanity_check_estimates_range()
# check that the effective feerate is greater than or equal to the mempoolminfee even for high mempoolminfee
- self.log.info("Test fee rate estimation after restarting node with high MempoolMinFee")
+ self.log.info(
+ "Test fee rate estimation after restarting node with high MempoolMinFee"
+ )
self.test_feerate_mempoolminfee()
self.log.info("Restarting node with fresh estimation")
@@ -375,9 +350,10 @@ class EstimateFeeTest(BitcoinTestFramework):
self.log.info("Testing that fee estimation is disabled in blocksonly.")
self.restart_node(0, ["-blocksonly"])
- assert_raises_rpc_error(-32603, "Fee estimation disabled",
- self.nodes[0].estimatesmartfee, 2)
+ assert_raises_rpc_error(
+ -32603, "Fee estimation disabled", self.nodes[0].estimatesmartfee, 2
+ )
-if __name__ == '__main__':
+if __name__ == "__main__":
EstimateFeeTest().main()
diff --git a/test/functional/feature_init.py b/test/functional/feature_init.py
index 40468c66e2..16815c1afc 100755
--- a/test/functional/feature_init.py
+++ b/test/functional/feature_init.py
@@ -44,14 +44,9 @@ class InitStressTest(BitcoinTestFramework):
def sigterm_node():
node.process.terminate()
node.process.wait()
- node.debug_log_path.unlink()
- node.debug_log_path.touch()
def check_clean_start():
"""Ensure that node restarts successfully after various interrupts."""
- # TODO: add -txindex=1 to fully test index initiatlization.
- # See https://github.com/bitcoin/bitcoin/pull/23289#discussion_r735159180 for
- # a discussion of the related bug.
node.start()
node.wait_for_rpc_connection()
assert_equal(200, node.getblockcount())
@@ -71,56 +66,33 @@ class InitStressTest(BitcoinTestFramework):
'net thread start',
'addcon thread start',
'loadblk thread start',
- # TODO: reenable - see above TODO
- # 'txindex thread start',
- 'msghand thread start'
+ 'txindex thread start',
+ 'msghand thread start',
+ 'net thread start',
+ 'addcon thread start',
]
if self.is_wallet_compiled():
lines_to_terminate_after.append('Verifying wallet')
for terminate_line in lines_to_terminate_after:
self.log.info(f"Starting node and will exit after line '{terminate_line}'")
- node.start(
- # TODO: add -txindex=1 to fully test index initiatlization.
- # extra_args=['-txindex=1'],
- )
- logfile = open(node.debug_log_path, 'r', encoding='utf8')
-
- MAX_SECS_TO_WAIT = 30
- start = time.time()
- num_lines = 0
-
- while True:
- line = logfile.readline()
- if line:
- num_lines += 1
-
- if line and terminate_line.lower() in line.lower():
- self.log.debug(f"Terminating node after {num_lines} log lines seen")
- sigterm_node()
- break
-
- if (time.time() - start) > MAX_SECS_TO_WAIT:
- raise AssertionError(
- f"missed line {terminate_line}; terminating now after {num_lines} lines")
+ node.start(extra_args=['-txindex=1'])
- if node.process.poll() is not None:
- raise AssertionError(f"node failed to start (line: '{terminate_line}')")
+ num_total_logs = node.wait_for_debug_log([terminate_line], ignore_case=True)
+ self.log.debug(f"Terminating node after {num_total_logs} log lines seen")
+ sigterm_node()
check_clean_start()
- num_total_logs = len(node.debug_log_path.read_text().splitlines())
self.stop_node(0)
self.log.info(
f"Terminate at some random point in the init process (max logs: {num_total_logs})")
for _ in range(40):
- terminate_after = random.randint(1, num_total_logs)
- self.log.debug(f"Starting node and will exit after {terminate_after} lines")
- node.start(
- # TODO: add -txindex=1 to fully test index initiatlization.
- # extra_args=['-txindex=1'],
- )
+ num_logs = len(Path(node.debug_log_path).read_text().splitlines())
+ additional_lines = random.randint(1, num_total_logs)
+ self.log.debug(f"Starting node and will exit after {additional_lines} lines")
+ node.start(extra_args=['-txindex=1'])
logfile = open(node.debug_log_path, 'r', encoding='utf8')
MAX_SECS_TO_WAIT = 10
@@ -132,7 +104,8 @@ class InitStressTest(BitcoinTestFramework):
if line:
num_lines += 1
- if num_lines >= terminate_after or (time.time() - start) > MAX_SECS_TO_WAIT:
+ if num_lines >= (num_logs + additional_lines) or \
+ (time.time() - start) > MAX_SECS_TO_WAIT:
self.log.debug(f"Terminating node after {num_lines} log lines seen")
sigterm_node()
break
@@ -152,11 +125,12 @@ class InitStressTest(BitcoinTestFramework):
}
for file_patt, err_fragment in files_to_disturb.items():
- target_file = list(node.chain_path.glob(file_patt))[0]
+ target_files = list(node.chain_path.glob(file_patt))
- self.log.info(f"Tweaking file to ensure failure {target_file}")
- bak_path = str(target_file) + ".bak"
- target_file.rename(bak_path)
+ for target_file in target_files:
+ self.log.info(f"Tweaking file to ensure failure {target_file}")
+ bak_path = str(target_file) + ".bak"
+ target_file.rename(bak_path)
# TODO: at some point, we should test perturbing the files instead of removing
# them, e.g.
@@ -170,14 +144,16 @@ class InitStressTest(BitcoinTestFramework):
# investigate doing this later.
node.assert_start_raises_init_error(
- # TODO: add -txindex=1 to fully test index initiatlization.
- # extra_args=['-txindex=1'],
+ extra_args=['-txindex=1'],
expected_msg=err_fragment,
match=ErrorMatch.PARTIAL_REGEX,
)
- self.log.info(f"Restoring file from {bak_path} and restarting")
- Path(bak_path).rename(target_file)
+ for target_file in target_files:
+ bak_path = str(target_file) + ".bak"
+ self.log.debug(f"Restoring file from {bak_path} and restarting")
+ Path(bak_path).rename(target_file)
+
check_clean_start()
self.stop_node(0)
diff --git a/test/functional/feature_pruning.py b/test/functional/feature_pruning.py
index 39cdb2750b..ba3c5053cb 100755
--- a/test/functional/feature_pruning.py
+++ b/test/functional/feature_pruning.py
@@ -96,9 +96,6 @@ class PruneTest(BitcoinTestFramework):
]
self.rpc_timeout = 120
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
-
def setup_network(self):
self.setup_nodes()
@@ -114,7 +111,8 @@ class PruneTest(BitcoinTestFramework):
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args)
self.start_nodes()
- self.import_deterministic_coinbase_privkeys()
+ if self.is_wallet_compiled():
+ self.import_deterministic_coinbase_privkeys()
def create_big_chain(self):
# Start by creating some coinbases we can spend later
@@ -474,8 +472,9 @@ class PruneTest(BitcoinTestFramework):
self.log.info("Test manual pruning with timestamps")
self.manual_test(4, use_timestamp=True)
- self.log.info("Test wallet re-scan")
- self.wallet_test()
+ if self.is_wallet_compiled():
+ self.log.info("Test wallet re-scan")
+ self.wallet_test()
self.log.info("Test invalid pruning command line options")
self.test_invalid_command_line_options()
diff --git a/test/functional/feature_startupnotify.py b/test/functional/feature_startupnotify.py
new file mode 100755
index 0000000000..c6aa837768
--- /dev/null
+++ b/test/functional/feature_startupnotify.py
@@ -0,0 +1,41 @@
+#!/usr/bin/env python3
+# Copyright (c) 2020 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test -startupnotify."""
+
+import os
+
+from test_framework.test_framework import BitcoinTestFramework
+from test_framework.util import (
+ assert_equal,
+)
+
+NODE_DIR = "node0"
+FILE_NAME = "test.txt"
+
+
+class StartupNotifyTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ self.disable_syscall_sandbox = True
+
+ def run_test(self):
+ tmpdir_file = os.path.join(self.options.tmpdir, NODE_DIR, FILE_NAME)
+ assert not os.path.exists(tmpdir_file)
+
+ self.log.info("Test -startupnotify command is run when node starts")
+ self.restart_node(0, extra_args=[f"-startupnotify=echo '{FILE_NAME}' >> {NODE_DIR}/{FILE_NAME}"])
+ self.wait_until(lambda: os.path.exists(tmpdir_file))
+
+ self.log.info("Test -startupnotify is executed once")
+ with open(tmpdir_file, "r", encoding="utf8") as f:
+ file_content = f.read()
+ assert_equal(file_content.count(FILE_NAME), 1)
+
+ self.log.info("Test node is fully started")
+ assert_equal(self.nodes[0].getblockcount(), 200)
+
+
+if __name__ == '__main__':
+ StartupNotifyTest().main()
diff --git a/test/functional/mining_basic.py b/test/functional/mining_basic.py
index f8d002e664..9c64bb1945 100755
--- a/test/functional/mining_basic.py
+++ b/test/functional/mining_basic.py
@@ -29,6 +29,8 @@ from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
)
+from test_framework.wallet import MiniWallet
+
VERSIONBITS_TOP_BITS = 0x20000000
VERSIONBITS_DEPLOYMENT_TESTDUMMY_BIT = 28
@@ -51,14 +53,11 @@ class MiningTest(BitcoinTestFramework):
self.setup_clean_chain = True
self.supports_cli = False
- def skip_test_if_missing_module(self):
- self.skip_if_no_wallet()
-
def mine_chain(self):
self.log.info('Create some old blocks')
for t in range(TIME_GENESIS_BLOCK, TIME_GENESIS_BLOCK + 200 * 600, 600):
self.nodes[0].setmocktime(t)
- self.generate(self.nodes[0], 1, sync_fun=self.no_op)
+ self.generate(self.wallet, 1, sync_fun=self.no_op)
mining_info = self.nodes[0].getmininginfo()
assert_equal(mining_info['blocks'], 200)
assert_equal(mining_info['currentblocktx'], 0)
@@ -75,8 +74,9 @@ class MiningTest(BitcoinTestFramework):
self.connect_nodes(0, 1)
def run_test(self):
- self.mine_chain()
node = self.nodes[0]
+ self.wallet = MiniWallet(node)
+ self.mine_chain()
def assert_submitblock(block, result_str_1, result_str_2=None):
block.solve()
@@ -95,7 +95,7 @@ class MiningTest(BitcoinTestFramework):
assert_equal(mining_info['pooledtx'], 0)
self.log.info("getblocktemplate: Test default witness commitment")
- txid = int(node.sendtoaddress(node.getnewaddress(), 1), 16)
+ txid = int(self.wallet.send_self_transfer(from_node=node)['wtxid'], 16)
tmpl = node.getblocktemplate(NORMAL_GBT_REQUEST_PARAMS)
# Check that default_witness_commitment is present.
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index ca6e341be8..0b9154a030 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -389,14 +389,17 @@ class TestNode():
def debug_log_path(self) -> Path:
return self.chain_path / 'debug.log'
+ def debug_log_bytes(self) -> int:
+ with open(self.debug_log_path, encoding='utf-8') as dl:
+ dl.seek(0, 2)
+ return dl.tell()
+
@contextlib.contextmanager
def assert_debug_log(self, expected_msgs, unexpected_msgs=None, timeout=2):
if unexpected_msgs is None:
unexpected_msgs = []
time_end = time.time() + timeout * self.timeout_factor
- with open(self.debug_log_path, encoding='utf-8') as dl:
- dl.seek(0, 2)
- prev_size = dl.tell()
+ prev_size = self.debug_log_bytes()
yield
@@ -419,6 +422,42 @@ class TestNode():
time.sleep(0.05)
self._raise_assertion_error('Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(str(expected_msgs), print_log))
+ def wait_for_debug_log(self, expected_msgs, timeout=10, ignore_case=False) -> int:
+ """
+ Block until we see a particular debug log message fragment or until we exceed the timeout.
+ Return:
+ the number of log lines we encountered when matching
+ """
+ time_end = time.time() + timeout * self.timeout_factor
+ prev_size = self.debug_log_bytes()
+ re_flags = re.MULTILINE | (re.IGNORECASE if ignore_case else 0)
+
+ while True:
+ found = True
+ with open(self.debug_log_path, encoding='utf-8') as dl:
+ dl.seek(prev_size)
+ log = dl.read()
+
+ for expected_msg in expected_msgs:
+ if re.search(re.escape(expected_msg), log, flags=re_flags) is None:
+ found = False
+
+ if found:
+ num_logs = len(log.splitlines())
+ return num_logs
+
+ if time.time() >= time_end:
+ print_log = " - " + "\n - ".join(log.splitlines())
+ break
+
+ # No sleep here because we want to detect the message fragment as fast as
+ # possible.
+
+ self._raise_assertion_error(
+ 'Expected messages "{}" does not partially match log:\n\n{}\n\n'.format(
+ str(expected_msgs), print_log))
+ return -1 # useless return to satisfy linter
+
@contextlib.contextmanager
def profile_with_perf(self, profile_name: str):
"""
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index 675f9aa8ff..eb2d030f4a 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -257,6 +257,7 @@ BASE_SCRIPTS = [
'wallet_bumpfee.py --descriptors',
'wallet_implicitsegwit.py --legacy-wallet',
'rpc_named_arguments.py',
+ 'feature_startupnotify.py',
'wallet_listsinceblock.py --legacy-wallet',
'wallet_listsinceblock.py --descriptors',
'wallet_listdescriptors.py --descriptors',
@@ -529,33 +530,35 @@ def run_tests(*, test_list, src_dir, build_dir, tmpdir, jobs=1, enable_coverage=
max_len_name = len(max(test_list, key=len))
test_count = len(test_list)
- for i in range(test_count):
- test_result, testdir, stdout, stderr = job_queue.get_next()
- test_results.append(test_result)
- done_str = "{}/{} - {}{}{}".format(i + 1, test_count, BOLD[1], test_result.name, BOLD[0])
- if test_result.status == "Passed":
- logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
- elif test_result.status == "Skipped":
- logging.debug("%s skipped" % (done_str))
- else:
- print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
- print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
- print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
- if combined_logs_len and os.path.isdir(testdir):
- # Print the final `combinedlogslen` lines of the combined logs
- print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
- print('\n============')
- print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
- print('============\n')
- combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
- if BOLD[0]:
- combined_logs_args += ['--color']
- combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
- print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
-
- if failfast:
- logging.debug("Early exiting after test failure")
- break
+ i = 0
+ while i < test_count:
+ for test_result, testdir, stdout, stderr in job_queue.get_next():
+ test_results.append(test_result)
+ i += 1
+ done_str = "{}/{} - {}{}{}".format(i, test_count, BOLD[1], test_result.name, BOLD[0])
+ if test_result.status == "Passed":
+ logging.debug("%s passed, Duration: %s s" % (done_str, test_result.time))
+ elif test_result.status == "Skipped":
+ logging.debug("%s skipped" % (done_str))
+ else:
+ print("%s failed, Duration: %s s\n" % (done_str, test_result.time))
+ print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
+ print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
+ if combined_logs_len and os.path.isdir(testdir):
+ # Print the final `combinedlogslen` lines of the combined logs
+ print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
+ print('\n============')
+ print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
+ print('============\n')
+ combined_logs_args = [sys.executable, os.path.join(tests_dir, 'combine_logs.py'), testdir]
+ if BOLD[0]:
+ combined_logs_args += ['--color']
+ combined_logs, _ = subprocess.Popen(combined_logs_args, universal_newlines=True, stdout=subprocess.PIPE).communicate()
+ print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
+
+ if failfast:
+ logging.debug("Early exiting after test failure")
+ break
print_results(test_results, max_len_name, (int(time.time() - start_time)))
@@ -649,8 +652,9 @@ class TestHandler:
dot_count = 0
while True:
- # Return first proc that finishes
+ # Return all procs that have finished, if any. Otherwise sleep until there is one.
time.sleep(.5)
+ ret = []
for job in self.jobs:
(name, start_time, proc, testdir, log_out, log_err) = job
if proc.poll() is not None:
@@ -669,7 +673,9 @@ class TestHandler:
clearline = '\r' + (' ' * dot_count) + '\r'
print(clearline, end='', flush=True)
dot_count = 0
- return TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr
+ ret.append((TestResult(name, status, int(time.time() - start_time)), testdir, stdout, stderr))
+ if ret:
+ return ret
if self.use_term_control:
print('.', end='', flush=True)
dot_count += 1
diff --git a/test/functional/tool_wallet.py b/test/functional/tool_wallet.py
index afe4dba7b4..2cb9dc4523 100755
--- a/test/functional/tool_wallet.py
+++ b/test/functional/tool_wallet.py
@@ -390,7 +390,11 @@ class ToolWalletTest(BitcoinTestFramework):
bad_sum_wallet_dump = os.path.join(self.nodes[0].datadir, "wallet-bad_sum3.dump")
dump_data["checksum"] = "2" * 10
self.write_dump(dump_data, bad_sum_wallet_dump)
- self.assert_raises_tool_error('Error: Dumpfile checksum does not match. Computed {}, expected {}{}'.format(checksum, "2" * 10, "0" * 54), '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
+ self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
+ assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
+ dump_data["checksum"] = "3" * 66
+ self.write_dump(dump_data, bad_sum_wallet_dump)
+ self.assert_raises_tool_error('Error: Checksum is not the correct size', '-wallet=badload', '-dumpfile={}'.format(bad_sum_wallet_dump), 'createfromdump')
assert not os.path.isdir(os.path.join(self.nodes[0].datadir, "regtest/wallets", "badload"))
diff --git a/test/sanitizer_suppressions/lsan b/test/sanitizer_suppressions/lsan
index d2cb618d4e..828b1676f6 100644
--- a/test/sanitizer_suppressions/lsan
+++ b/test/sanitizer_suppressions/lsan
@@ -1,7 +1,4 @@
# Suppress warnings triggered in dependencies
-leak:libqminimal
-leak:libQt5Core
-leak:libQt5Gui
leak:libQt5Widgets
# false-positive due to use of secure_allocator<>
diff --git a/test/sanitizer_suppressions/ubsan b/test/sanitizer_suppressions/ubsan
index 6ff2008670..6e4ac80927 100644
--- a/test/sanitizer_suppressions/ubsan
+++ b/test/sanitizer_suppressions/ubsan
@@ -10,43 +10,57 @@ signed-integer-overflow:policy/feerate.cpp
# -fsanitize=integer suppressions
# ===============================
+# Dependencies
+# ------------
+# Suppressions in dependencies that are developed outside this repository.
+unsigned-integer-overflow:*/include/c++/
+unsigned-integer-overflow:bench/bench.h
+# unsigned-integer-overflow in FuzzedDataProvider's ConsumeIntegralInRange
+unsigned-integer-overflow:FuzzedDataProvider.h
+unsigned-integer-overflow:leveldb/
+unsigned-integer-overflow:minisketch/
+unsigned-integer-overflow:test/fuzz/crypto_diff_fuzz_chacha20.cpp
+implicit-integer-sign-change:*/include/boost/
+implicit-integer-sign-change:*/include/c++/
+implicit-integer-sign-change:*/new_allocator.h
+implicit-integer-sign-change:crc32c/
+# implicit-integer-sign-change in FuzzedDataProvider's ConsumeIntegralInRange
+implicit-integer-sign-change:FuzzedDataProvider.h
+implicit-integer-sign-change:minisketch/
+implicit-signed-integer-truncation:leveldb/
+implicit-unsigned-integer-truncation:*/include/c++/
+implicit-unsigned-integer-truncation:leveldb/
+implicit-unsigned-integer-truncation:test/fuzz/crypto_diff_fuzz_chacha20.cpp
+# std::variant warning fixed in https://github.com/gcc-mirror/gcc/commit/074436cf8cdd2a9ce75cadd36deb8301f00e55b9
+implicit-unsigned-integer-truncation:std::__detail::__variant::_Variant_storage
+shift-base:*/include/c++/
+shift-base:leveldb/
+shift-base:minisketch/
+shift-base:test/fuzz/crypto_diff_fuzz_chacha20.cpp
# Unsigned integer overflow occurs when the result of an unsigned integer
# computation cannot be represented in its type. Unlike signed integer overflow,
# this is not undefined behavior, but it is often unintentional. The list below
# contains files in which we expect unsigned integer overflows to occur. The
# list is used to suppress -fsanitize=integer warnings when running our CI UBSan
# job.
-unsigned-integer-overflow:*/include/c++/
unsigned-integer-overflow:addrman.cpp
unsigned-integer-overflow:arith_uint256.h
-unsigned-integer-overflow:basic_string.h
-unsigned-integer-overflow:bench/bench.h
unsigned-integer-overflow:bitcoin-tx.cpp
unsigned-integer-overflow:common/bloom.cpp
unsigned-integer-overflow:chain.cpp
unsigned-integer-overflow:chain.h
-unsigned-integer-overflow:coded_stream.h
unsigned-integer-overflow:coins.cpp
unsigned-integer-overflow:compressor.cpp
unsigned-integer-overflow:core_write.cpp
unsigned-integer-overflow:crypto/
-# unsigned-integer-overflow in FuzzedDataProvider's ConsumeIntegralInRange
-unsigned-integer-overflow:FuzzedDataProvider.h
unsigned-integer-overflow:hash.cpp
-unsigned-integer-overflow:leveldb/
-unsigned-integer-overflow:minisketch/
unsigned-integer-overflow:policy/fees.cpp
unsigned-integer-overflow:prevector.h
unsigned-integer-overflow:pubkey.h
unsigned-integer-overflow:script/interpreter.cpp
-unsigned-integer-overflow:stl_bvector.h
-unsigned-integer-overflow:test/fuzz/crypto_diff_fuzz_chacha20.cpp
unsigned-integer-overflow:txmempool.cpp
unsigned-integer-overflow:util/strencodings.cpp
unsigned-integer-overflow:validation.cpp
-implicit-integer-sign-change:*/include/boost/
-implicit-integer-sign-change:*/include/c++/
-implicit-integer-sign-change:*/new_allocator.h
implicit-integer-sign-change:addrman.h
implicit-integer-sign-change:arith_uint256.cpp
implicit-integer-sign-change:bech32.cpp
@@ -56,12 +70,8 @@ implicit-integer-sign-change:chain.h
implicit-integer-sign-change:coins.h
implicit-integer-sign-change:compat/stdin.cpp
implicit-integer-sign-change:compressor.h
-implicit-integer-sign-change:crc32c/
implicit-integer-sign-change:crypto/
-# implicit-integer-sign-change in FuzzedDataProvider's ConsumeIntegralInRange
-implicit-integer-sign-change:FuzzedDataProvider.h
implicit-integer-sign-change:key.cpp
-implicit-integer-sign-change:minisketch/
implicit-integer-sign-change:noui.cpp
implicit-integer-sign-change:policy/fees.cpp
implicit-integer-sign-change:prevector.h
@@ -84,7 +94,6 @@ implicit-signed-integer-truncation:addrman.cpp
implicit-signed-integer-truncation:addrman.h
implicit-signed-integer-truncation:chain.h
implicit-signed-integer-truncation:crypto/
-implicit-signed-integer-truncation:leveldb/
implicit-signed-integer-truncation:node/miner.cpp
implicit-signed-integer-truncation:net.cpp
implicit-signed-integer-truncation:net_processing.cpp
@@ -92,19 +101,9 @@ implicit-signed-integer-truncation:streams.h
implicit-signed-integer-truncation:test/arith_uint256_tests.cpp
implicit-signed-integer-truncation:test/skiplist_tests.cpp
implicit-signed-integer-truncation:torcontrol.cpp
-implicit-unsigned-integer-truncation:*/include/c++/
implicit-unsigned-integer-truncation:crypto/
-implicit-unsigned-integer-truncation:leveldb/
-implicit-unsigned-integer-truncation:test/fuzz/crypto_diff_fuzz_chacha20.cpp
-# std::variant warning fixed in https://github.com/gcc-mirror/gcc/commit/074436cf8cdd2a9ce75cadd36deb8301f00e55b9
-implicit-unsigned-integer-truncation:std::__detail::__variant::_Variant_storage
-shift-base:*/include/c++/
shift-base:arith_uint256.cpp
shift-base:crypto/
shift-base:hash.cpp
-shift-base:leveldb/
-shift-base:minisketch/
-shift-base:net_processing.cpp
shift-base:streams.h
-shift-base:test/fuzz/crypto_diff_fuzz_chacha20.cpp
shift-base:util/bip32.cpp
diff --git a/test/util/data/bitcoin-util-test.json b/test/util/data/bitcoin-util-test.json
index cca5732aa1..c9c64274c6 100644
--- a/test/util/data/bitcoin-util-test.json
+++ b/test/util/data/bitcoin-util-test.json
@@ -418,6 +418,29 @@
},
{ "exec": "./bitcoin-tx",
"args":
+ ["-create",
+ "in=00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff:0",
+ "set=privatekeys:[\"KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn\"]",
+ "set=prevtxs:[{\"txid\":\"00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff\",\"vout\":0,\"amount\":\"0\", \"scriptPubKey\":\"0014751e76e8199196d454941c45d1b3a323f1433bd6\"}]",
+ "sign=ALL",
+ "outaddr=0.001:193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"],
+ "output_cmp": "txcreatesignsegwit1.hex",
+ "description": "Creates a new transaction with a single witness input and a single output, and then signs the transaction"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
+ ["-create",
+ "in=00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff:0",
+ "set=privatekeys:[\"KwDiBf89QgGbjEhKnhXJuH7LrciVrZi3qYjgd9M7rFU73sVHnoWn\"]",
+ "set=prevtxs:[{\"txid\":\"00112233445566778899aabbccddeeff00112233445566778899aabbccddeeff\",\"vout\":0,\"scriptPubKey\":\"0014751e76e8199196d454941c45d1b3a323f1433bd6\"}]",
+ "sign=ALL",
+ "outaddr=0.001:193P6LtvS4nCnkDvM9uXn1gsSRqh4aDAz7"],
+ "return_code": 1,
+ "error_txt": "Missing amount for CTxOut with scriptPubKey=0014751e76e8199196d454941c45d1b3a323f1433bd6",
+ "description": "Tests the check for missing input amount for witness transactions"
+ },
+ { "exec": "./bitcoin-tx",
+ "args":
["-create", "outpubkey=0:02a5613bd857b7048924264d1e70e08fb2a7e6527d32b7ab1bb993ac59964ff397", "nversion=1"],
"output_cmp": "txcreateoutpubkey1.hex",
"description": "Creates a new transaction with a single pay-to-pubkey output"
diff --git a/test/util/data/txcreatesignsegwit1.hex b/test/util/data/txcreatesignsegwit1.hex
new file mode 100644
index 0000000000..45dd1f1dbf
--- /dev/null
+++ b/test/util/data/txcreatesignsegwit1.hex
@@ -0,0 +1 @@
+02000000000101ffeeddccbbaa99887766554433221100ffeeddccbbaa998877665544332211000000000000ffffffff01a0860100000000001976a9145834479edbbe0539b31ffd3a8f8ebadc2165ed0188ac0247304402202e8d8677912f73909ffbdb3ee87d10cce41d398ee206e534fa18330b566ece34022004f944f018a03c9f5b4cf0e9b0ae4f14049b55e7b6810a6ac26cd67cb4dcb31f01210279be667ef9dcbbac55a06295ce870b07029bfcdb2dce28d959f2815b16f8179800000000