aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Makefile.am22
-rwxr-xr-xci/lint/06_script.sh2
-rw-r--r--configure.ac30
-rwxr-xr-xcontrib/devtools/gen-manpages.sh16
-rwxr-xr-xcontrib/devtools/test-symbol-check.py37
-rw-r--r--contrib/macdeploy/README.md8
-rwxr-xr-xcontrib/macdeploy/custom_dsstore.py58
-rw-r--r--contrib/macdeploy/fancy.plist32
-rwxr-xr-xcontrib/macdeploy/macdeployqtplus562
-rw-r--r--depends/funcs.mk4
-rw-r--r--depends/packages/native_mac_alias.mk4
-rw-r--r--depends/packages/qt.mk4
-rw-r--r--doc/build-osx.md31
-rw-r--r--doc/release-notes-19776.md9
-rw-r--r--doc/tor.md7
-rw-r--r--src/Makefile.crc32c.include2
-rw-r--r--src/bench/wallet_balance.cpp6
-rw-r--r--src/bitcoin-tx.cpp16
-rw-r--r--src/bitcoin-wallet.cpp23
-rw-r--r--src/bitcoind.cpp10
-rw-r--r--src/core_read.cpp59
-rw-r--r--src/crc32c/.appveyor.yml13
-rw-r--r--src/crc32c/AUTHORS2
-rw-r--r--src/crc32c/CMakeLists.txt43
-rw-r--r--src/crc32c/Crc32cConfig.cmake.in (renamed from src/crc32c/Crc32cConfig.cmake)4
-rw-r--r--src/crc32c/src/crc32c.cc6
-rw-r--r--src/crc32c/src/crc32c_arm64.cc23
-rw-r--r--src/crc32c/src/crc32c_arm64.h8
-rw-r--r--src/crc32c/src/crc32c_arm64_check.h (renamed from src/crc32c/src/crc32c_arm64_linux_check.h)26
-rw-r--r--src/crc32c/src/crc32c_benchmark.cc8
-rw-r--r--src/crc32c/src/crc32c_read_le.h16
-rw-r--r--src/init.cpp46
-rw-r--r--src/interfaces/chain.h29
-rw-r--r--src/interfaces/node.h3
-rw-r--r--src/net.cpp57
-rw-r--r--src/net.h34
-rw-r--r--src/net_processing.cpp273
-rw-r--r--src/net_processing.h86
-rw-r--r--src/node/context.cpp1
-rw-r--r--src/node/context.h2
-rw-r--r--src/node/interfaces.cpp92
-rw-r--r--src/policy/fees.cpp29
-rw-r--r--src/policy/fees.h3
-rw-r--r--src/protocol.cpp1
-rw-r--r--src/protocol.h4
-rw-r--r--src/qt/test/rpcnestedtests.cpp64
-rw-r--r--src/randomenv.cpp2
-rw-r--r--src/rpc/blockchain.cpp10
-rw-r--r--src/rpc/blockchain.h2
-rw-r--r--src/rpc/mining.cpp36
-rw-r--r--src/rpc/net.cpp13
-rw-r--r--src/sync.cpp32
-rw-r--r--src/sync.h10
-rw-r--r--src/test/denialofservice_tests.cpp12
-rw-r--r--src/test/fuzz/connman.cpp2
-rw-r--r--src/test/fuzz/net.cpp23
-rw-r--r--src/test/interfaces_tests.cpp51
-rw-r--r--src/test/reverselock_tests.cpp10
-rw-r--r--src/test/sync_tests.cpp37
-rw-r--r--src/test/util/setup_common.cpp74
-rw-r--r--src/test/util/setup_common.h15
-rw-r--r--src/test/validation_chainstatemanager_tests.cpp14
-rw-r--r--src/util/time.h4
-rw-r--r--src/validation.cpp3
-rw-r--r--src/validation.h2
-rw-r--r--src/wallet/bdb.cpp45
-rw-r--r--src/wallet/bdb.h10
-rw-r--r--src/wallet/db.cpp117
-rw-r--r--src/wallet/db.h8
-rw-r--r--src/wallet/interfaces.cpp2
-rw-r--r--src/wallet/rpcwallet.cpp4
-rw-r--r--src/wallet/salvage.cpp5
-rw-r--r--src/wallet/sqlite.cpp45
-rw-r--r--src/wallet/sqlite.h2
-rw-r--r--src/wallet/test/coinselector_tests.cpp2
-rw-r--r--src/wallet/test/db_tests.cpp7
-rw-r--r--src/wallet/test/init_test_fixture.cpp2
-rw-r--r--src/wallet/test/init_test_fixture.h1
-rw-r--r--src/wallet/test/ismine_tests.cpp3
-rw-r--r--src/wallet/test/scriptpubkeyman_tests.cpp4
-rw-r--r--src/wallet/test/wallet_test_fixture.cpp4
-rw-r--r--src/wallet/test/wallet_test_fixture.h3
-rw-r--r--src/wallet/test/wallet_tests.cpp55
-rw-r--r--src/wallet/wallet.cpp34
-rw-r--r--src/wallet/walletdb.cpp8
-rw-r--r--src/wallet/walletutil.cpp55
-rw-r--r--src/wallet/walletutil.h3
-rwxr-xr-xtest/functional/feature_fee_estimation.py6
-rwxr-xr-xtest/functional/feature_taproot.py4
-rwxr-xr-xtest/functional/p2p_compactblocks.py31
-rwxr-xr-xtest/functional/p2p_invalid_messages.py9
-rwxr-xr-xtest/functional/p2p_leak.py19
-rwxr-xr-xtest/functional/p2p_timeouts.py6
-rwxr-xr-xtest/functional/rpc_estimatefee.py2
-rwxr-xr-xtest/functional/rpc_fundrawtransaction.py4
-rwxr-xr-xtest/functional/rpc_psbt.py4
-rwxr-xr-xtest/functional/rpc_rawtransaction.py7
-rwxr-xr-xtest/functional/test_framework/messages.py1
-rwxr-xr-xtest/functional/test_framework/p2p.py2
-rwxr-xr-xtest/functional/test_framework/test_node.py17
-rwxr-xr-xtest/functional/wallet_basic.py29
-rwxr-xr-xtest/functional/wallet_bumpfee.py2
-rwxr-xr-xtest/functional/wallet_send.py4
-rw-r--r--test/lint/README.md15
-rwxr-xr-xtest/lint/git-subtree-check.sh57
-rwxr-xr-xtest/lint/lint-circular-dependencies.sh1
-rw-r--r--test/sanitizer_suppressions/tsan1
107 files changed, 1489 insertions, 1318 deletions
diff --git a/Makefile.am b/Makefile.am
index ce8bff0f9a..958d68061f 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -25,6 +25,8 @@ BITCOIN_QT_BIN=$(top_builddir)/src/qt/$(BITCOIN_GUI_NAME)$(EXEEXT)
BITCOIN_CLI_BIN=$(top_builddir)/src/$(BITCOIN_CLI_NAME)$(EXEEXT)
BITCOIN_TX_BIN=$(top_builddir)/src/$(BITCOIN_TX_NAME)$(EXEEXT)
BITCOIN_WALLET_BIN=$(top_builddir)/src/$(BITCOIN_WALLET_TOOL_NAME)$(EXEEXT)
+BITCOIN_NODE_BIN=$(top_builddir)/src/$(BITCOIN_MP_NODE_NAME)$(EXEEXT)
+BITCOIN_GUI_BIN=$(top_builddir)/src/$(BITCOIN_MP_GUI_NAME)$(EXEEXT)
BITCOIN_WIN_INSTALLER=$(PACKAGE)-$(PACKAGE_VERSION)-win64-setup$(EXEEXT)
empty :=
@@ -36,12 +38,9 @@ OSX_DMG = $(OSX_VOLNAME).dmg
OSX_BACKGROUND_SVG=background.svg
OSX_BACKGROUND_IMAGE=background.tiff
OSX_BACKGROUND_IMAGE_DPIS=36 72
-OSX_DSSTORE_GEN=$(top_srcdir)/contrib/macdeploy/custom_dsstore.py
OSX_DEPLOY_SCRIPT=$(top_srcdir)/contrib/macdeploy/macdeployqtplus
-OSX_FANCY_PLIST=$(top_srcdir)/contrib/macdeploy/fancy.plist
OSX_INSTALLER_ICONS=$(top_srcdir)/src/qt/res/icons/bitcoin.icns
OSX_PLIST=$(top_builddir)/share/qt/Info.plist #not installed
-OSX_QT_TRANSLATIONS = ar,bg,ca,cs,da,de,es,fa,fi,fr,gd,gl,he,hu,it,ja,ko,lt,lv,pl,pt,ru,sk,sl,sv,uk,zh_CN,zh_TW
DIST_CONTRIB = \
$(top_srcdir)/contrib/linearize/linearize-data.py \
@@ -59,9 +58,8 @@ WINDOWS_PACKAGING = $(top_srcdir)/share/pixmaps/bitcoin.ico \
$(top_srcdir)/share/pixmaps/nsis-wizard.bmp \
$(top_srcdir)/doc/README_windows.txt
-OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_FANCY_PLIST) $(OSX_INSTALLER_ICONS) \
+OSX_PACKAGING = $(OSX_DEPLOY_SCRIPT) $(OSX_INSTALLER_ICONS) \
$(top_srcdir)/contrib/macdeploy/$(OSX_BACKGROUND_SVG) \
- $(OSX_DSSTORE_GEN) \
$(top_srcdir)/contrib/macdeploy/detached-sig-apply.sh \
$(top_srcdir)/contrib/macdeploy/detached-sig-create.sh
@@ -117,7 +115,7 @@ osx_volname:
if BUILD_DARWIN
$(OSX_DMG): $(OSX_APP_BUILT) $(OSX_PACKAGING) $(OSX_BACKGROUND_IMAGE)
- $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) -add-qt-tr $(OSX_QT_TRANSLATIONS) -translations-dir=$(QT_TRANSLATION_DIR) -dmg -fancy $(OSX_FANCY_PLIST) -verbose 2 -volname $(OSX_VOLNAME)
+ $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR) -dmg
$(OSX_BACKGROUND_IMAGE).png: contrib/macdeploy/$(OSX_BACKGROUND_SVG)
sed 's/PACKAGE_NAME/$(PACKAGE_NAME)/' < "$<" | $(RSVG_CONVERT) -f png -d 36 -p 36 -o $@
@@ -147,11 +145,8 @@ $(APP_DIST_DIR)/.background/$(OSX_BACKGROUND_IMAGE): $(OSX_BACKGROUND_IMAGE_DPIF
$(MKDIR_P) $(@D)
$(TIFFCP) -c none $(OSX_BACKGROUND_IMAGE_DPIFILES) $@
-$(APP_DIST_DIR)/.DS_Store: $(OSX_DSSTORE_GEN)
- $(PYTHON) $< "$@" "$(OSX_VOLNAME)"
-
$(APP_DIST_DIR)/$(OSX_APP)/Contents/MacOS/Bitcoin-Qt: $(OSX_APP_BUILT) $(OSX_PACKAGING)
- INSTALLNAMETOOL=$(INSTALLNAMETOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) -translations-dir=$(QT_TRANSLATION_DIR) -add-qt-tr $(OSX_QT_TRANSLATIONS) -verbose 2
+ INSTALLNAMETOOL=$(INSTALLNAMETOOL) OTOOL=$(OTOOL) STRIP=$(STRIP) $(PYTHON) $(OSX_DEPLOY_SCRIPT) $(OSX_APP) $(OSX_VOLNAME) -translations-dir=$(QT_TRANSLATION_DIR)
deploydir: $(APP_DIST_EXTRAS)
endif
@@ -179,6 +174,12 @@ $(BITCOIN_TX_BIN): FORCE
$(BITCOIN_WALLET_BIN): FORCE
$(MAKE) -C src $(@F)
+$(BITCOIN_NODE_BIN): FORCE
+ $(MAKE) -C src $(@F)
+
+$(BITCOIN_GUI_BIN): FORCE
+ $(MAKE) -C src $(@F)
+
if USE_LCOV
LCOV_FILTER_PATTERN = \
-p "/usr/local/" \
@@ -357,6 +358,7 @@ if TARGET_DARWIN
endif
if TARGET_WINDOWS
$(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_PE
+ $(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-symbol-check.py TestSymbolChecks.test_PE
endif
if TARGET_LINUX
$(AM_V_at) $(PYTHON) $(top_srcdir)/contrib/devtools/test-security-check.py TestSecurityChecks.test_ELF
diff --git a/ci/lint/06_script.sh b/ci/lint/06_script.sh
index dc0f9b923b..6f81ecb22e 100755
--- a/ci/lint/06_script.sh
+++ b/ci/lint/06_script.sh
@@ -14,6 +14,8 @@ if [ "$TRAVIS_EVENT_TYPE" = "pull_request" ]; then
test/lint/commit-script-check.sh $COMMIT_RANGE
fi
+# This only checks that the trees are pure subtrees, it is not doing a full
+# check with -r to not have to fetch all the remotes.
test/lint/git-subtree-check.sh src/crypto/ctaes
test/lint/git-subtree-check.sh src/secp256k1
test/lint/git-subtree-check.sh src/univalue
diff --git a/configure.ac b/configure.ac
index 32d1138653..9d951afba1 100644
--- a/configure.ac
+++ b/configure.ac
@@ -24,6 +24,9 @@ BITCOIN_GUI_NAME=bitcoin-qt
BITCOIN_CLI_NAME=bitcoin-cli
BITCOIN_TX_NAME=bitcoin-tx
BITCOIN_WALLET_TOOL_NAME=bitcoin-wallet
+dnl Multi Process
+BITCOIN_MP_NODE_NAME=bitcoin-node
+BITCOIN_MP_GUI_NAME=bitcoin-gui
dnl Unless the user specified ARFLAGS, force it to be cr
AC_ARG_VAR(ARFLAGS, [Flags for the archiver, defaults to <cr> if not set])
@@ -642,16 +645,19 @@ case $host in
dnl It's safe to add these paths even if the functionality is disabled by
dnl the user (--without-wallet or --without-gui for example).
- qt5_prefix=$($BREW --prefix qt5 2>/dev/null)
- if $BREW list --versions berkeley-db4 >/dev/null && test "x$BDB_CFLAGS" = "x" && test "x$BDB_LIBS" = "x" && test "$use_bdb" != "no"; then
+ if test "x$use_bdb" != xno && $BREW list --versions berkeley-db4 >/dev/null && test "x$BDB_CFLAGS" = "x" && test "x$BDB_LIBS" = "x"; then
bdb_prefix=$($BREW --prefix berkeley-db4 2>/dev/null)
dnl This must precede the call to BITCOIN_FIND_BDB48 below.
BDB_CFLAGS="-I$bdb_prefix/include"
BDB_LIBS="-L$bdb_prefix/lib -ldb_cxx-4.8"
fi
- if test x$qt5_prefix != x; then
- PKG_CONFIG_PATH="$qt5_prefix/lib/pkgconfig:$PKG_CONFIG_PATH"
- export PKG_CONFIG_PATH
+
+ if test "x$use_sqlite" != xno && $BREW list --versions sqlite3 >/dev/null; then
+ export PKG_CONFIG_PATH="$($BREW --prefix sqlite3 2>/dev/null)/lib/pkgconfig:$PKG_CONFIG_PATH"
+ fi
+
+ if $BREW list --versions qt5 >/dev/null; then
+ export PKG_CONFIG_PATH="$($BREW --prefix qt5 2>/dev/null)/lib/pkgconfig:$PKG_CONFIG_PATH"
fi
fi
else
@@ -1585,7 +1591,11 @@ AM_CONDITIONAL([ENABLE_ZMQ], [test "x$use_zmq" = "xyes"])
AC_MSG_CHECKING([whether to build test_bitcoin])
if test x$use_tests = xyes; then
- AC_MSG_RESULT([yes])
+ if test "x$enable_fuzz" = "xyes"; then
+ AC_MSG_RESULT([no, because fuzzing is enabled])
+ else
+ AC_MSG_RESULT([yes])
+ fi
BUILD_TEST="yes"
else
AC_MSG_RESULT([no])
@@ -1650,6 +1660,8 @@ AC_SUBST(BITCOIN_GUI_NAME)
AC_SUBST(BITCOIN_CLI_NAME)
AC_SUBST(BITCOIN_TX_NAME)
AC_SUBST(BITCOIN_WALLET_TOOL_NAME)
+AC_SUBST(BITCOIN_MP_NODE_NAME)
+AC_SUBST(BITCOIN_MP_GUI_NAME)
AC_SUBST(RELDFLAGS)
AC_SUBST(DEBUG_CPPFLAGS)
@@ -1761,8 +1773,10 @@ if test x$bitcoin_enable_qt != xno; then
echo " with qr = $use_qr"
fi
echo " with zmq = $use_zmq"
-echo " with test = $use_tests"
-if test x$use_tests != xno; then
+if test x$enable_fuzz == xno; then
+ echo " with test = $use_tests"
+else
+ echo " with test = not building test_bitcoin because fuzzing is enabled"
echo " with fuzz = $enable_fuzz"
fi
echo " with bench = $use_bench"
diff --git a/contrib/devtools/gen-manpages.sh b/contrib/devtools/gen-manpages.sh
index aa65953d83..3fdcda4fd4 100755
--- a/contrib/devtools/gen-manpages.sh
+++ b/contrib/devtools/gen-manpages.sh
@@ -18,6 +18,22 @@ BITCOINQT=${BITCOINQT:-$BINDIR/qt/bitcoin-qt}
[ ! -x $BITCOIND ] && echo "$BITCOIND not found or not executable." && exit 1
+# Don't allow man pages to be generated for binaries built from a dirty tree
+DIRTY=""
+for cmd in $BITCOIND $BITCOINCLI $BITCOINTX $WALLET_TOOL $BITCOINQT; do
+ VERSION_OUTPUT=$($cmd --version)
+ if [[ $VERSION_OUTPUT == *"dirty"* ]]; then
+ DIRTY="${DIRTY}${cmd}\n"
+ fi
+done
+if [ -n "$DIRTY" ]
+then
+ echo -e "WARNING: the following binaries were built from a dirty tree:\n"
+ echo -e $DIRTY
+ echo "man pages generated from dirty binaries should NOT be committed."
+ echo "To properly generate man pages, please commit your changes to the above binaries, rebuild them, then run this script again."
+fi
+
# The autodetected version git tag can screw up manpage output a little bit
read -r -a BTCVER <<< "$($BITCOINCLI --version | head -n1 | awk -F'[ -]' '{ print $6, $7 }')"
diff --git a/contrib/devtools/test-symbol-check.py b/contrib/devtools/test-symbol-check.py
index b07ec2ffdf..18ed7d61e0 100755
--- a/contrib/devtools/test-symbol-check.py
+++ b/contrib/devtools/test-symbol-check.py
@@ -120,6 +120,43 @@ class TestSymbolChecks(unittest.TestCase):
self.assertEqual(call_symbol_check(cc, source, executable, ['-framework', 'CoreGraphics']),
(0, ''))
+ def test_PE(self):
+ source = 'test1.c'
+ executable = 'test1.exe'
+ cc = 'x86_64-w64-mingw32-gcc'
+
+ with open(source, 'w', encoding="utf8") as f:
+ f.write('''
+ #include <pdh.h>
+
+ int main()
+ {
+ PdhConnectMachineA(NULL);
+ return 0;
+ }
+ ''')
+
+ self.assertEqual(call_symbol_check(cc, source, executable, ['-lpdh']),
+ (1, 'pdh.dll is not in ALLOWED_LIBRARIES!\n' +
+ executable + ': failed DYNAMIC_LIBRARIES'))
+
+ source = 'test2.c'
+ executable = 'test2.exe'
+ with open(source, 'w', encoding="utf8") as f:
+ f.write('''
+ #include <windows.h>
+
+ int main()
+ {
+ CoFreeUnusedLibrariesEx(0,0);
+ return 0;
+ }
+ ''')
+
+ self.assertEqual(call_symbol_check(cc, source, executable, ['-lole32']),
+ (0, ''))
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/contrib/macdeploy/README.md b/contrib/macdeploy/README.md
index fe677e3a1f..6c3db2620b 100644
--- a/contrib/macdeploy/README.md
+++ b/contrib/macdeploy/README.md
@@ -6,11 +6,7 @@ The `macdeployqtplus` script should not be run manually. Instead, after building
make deploy
```
-During the deployment process, the disk image window will pop up briefly
-when the fancy settings are applied. This is normal, please do not interfere,
-the process will unmount the DMG and cleanup before finishing.
-
-When complete, it will have produced `Bitcoin-Qt.dmg`.
+When complete, it will have produced `Bitcoin-Core.dmg`.
## SDK Extraction
@@ -111,7 +107,7 @@ broken. Only the compression feature is currently used. Ideally, the creation co
and `genisoimage` would no longer be necessary.
Background images and other features can be added to DMG files by inserting a
-`.DS_Store` before creation. This is generated by the script `contrib/macdeploy/custom_dsstore.py`.
+`.DS_Store` during creation.
As of OS X 10.9 Mavericks, using an Apple-blessed key to sign binaries is a requirement in
order to satisfy the new Gatekeeper requirements. Because this private key cannot be
diff --git a/contrib/macdeploy/custom_dsstore.py b/contrib/macdeploy/custom_dsstore.py
deleted file mode 100755
index 7ab42ea5d4..0000000000
--- a/contrib/macdeploy/custom_dsstore.py
+++ /dev/null
@@ -1,58 +0,0 @@
-#!/usr/bin/env python3
-# Copyright (c) 2013-2018 The Bitcoin Core developers
-# Distributed under the MIT software license, see the accompanying
-# file COPYING or http://www.opensource.org/licenses/mit-license.php.
-from ds_store import DSStore
-from mac_alias import Alias
-import sys
-
-output_file = sys.argv[1]
-package_name_ns = sys.argv[2]
-
-ds = DSStore.open(output_file, 'w+')
-ds['.']['bwsp'] = {
- 'ShowStatusBar': False,
- 'WindowBounds': '{{300, 280}, {500, 343}}',
- 'ContainerShowSidebar': False,
- 'SidebarWidth': 0,
- 'ShowTabView': False,
- 'PreviewPaneVisibility': False,
- 'ShowToolbar': False,
- 'ShowSidebar': False,
- 'ShowPathbar': True
-}
-
-icvp = {
- 'gridOffsetX': 0.0,
- 'textSize': 12.0,
- 'viewOptionsVersion': 1,
- 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
- 'backgroundColorBlue': 1.0,
- 'iconSize': 96.0,
- 'backgroundColorGreen': 1.0,
- 'arrangeBy': 'none',
- 'showIconPreview': True,
- 'gridSpacing': 100.0,
- 'gridOffsetY': 0.0,
- 'showItemInfo': False,
- 'labelOnBottom': True,
- 'backgroundType': 2,
- 'backgroundColorRed': 1.0
-}
-alias = Alias.from_bytes(icvp['backgroundImageAlias'])
-alias.volume.name = package_name_ns
-alias.volume.posix_path = '/Volumes/' + package_name_ns
-alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg'
-alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00bitcoinuser:\x00Documents:\x00bitcoin:\x00bitcoin:\x00' + package_name_ns + '.temp.dmg'
-alias.volume.disk_image_alias.target.posix_path = 'Users/bitcoinuser/Documents/bitcoin/bitcoin/' + package_name_ns + '.temp.dmg'
-alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff'
-icvp['backgroundImageAlias'] = alias.to_bytes()
-ds['.']['icvp'] = icvp
-
-ds['.']['vSrn'] = ('long', 1)
-
-ds['Applications']['Iloc'] = (370, 156)
-ds['Bitcoin-Qt.app']['Iloc'] = (128, 156)
-
-ds.flush()
-ds.close()
diff --git a/contrib/macdeploy/fancy.plist b/contrib/macdeploy/fancy.plist
deleted file mode 100644
index ef277a7f14..0000000000
--- a/contrib/macdeploy/fancy.plist
+++ /dev/null
@@ -1,32 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
- <key>window_bounds</key>
- <array>
- <integer>300</integer>
- <integer>300</integer>
- <integer>800</integer>
- <integer>620</integer>
- </array>
- <key>background_picture</key>
- <string>background.tiff</string>
- <key>icon_size</key>
- <integer>96</integer>
- <key>applications_symlink</key>
- <true/>
- <key>items_position</key>
- <dict>
- <key>Applications</key>
- <array>
- <integer>370</integer>
- <integer>156</integer>
- </array>
- <key>Bitcoin-Qt.app</key>
- <array>
- <integer>128</integer>
- <integer>156</integer>
- </array>
- </dict>
-</dict>
-</plist>
diff --git a/contrib/macdeploy/macdeployqtplus b/contrib/macdeploy/macdeployqtplus
index 524104398b..9bf3305288 100755
--- a/contrib/macdeploy/macdeployqtplus
+++ b/contrib/macdeploy/macdeployqtplus
@@ -16,9 +16,13 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
-import subprocess, sys, re, os, shutil, stat, os.path, time
-from string import Template
+import plistlib
+import sys, re, os, shutil, stat, os.path
from argparse import ArgumentParser
+from ds_store import DSStore
+from mac_alias import Alias
+from pathlib import Path
+from subprocess import PIPE, run
from typing import List, Optional
# This is ported from the original macdeployqt with modifications
@@ -49,28 +53,18 @@ class FrameworkInfo(object):
return False
def __str__(self):
- return """ Framework name: {}
- Framework directory: {}
- Framework path: {}
- Binary name: {}
- Binary directory: {}
- Binary path: {}
- Version: {}
- Install name: {}
- Deployed install name: {}
- Source file Path: {}
- Deployed Directory (relative to bundle): {}
-""".format(self.frameworkName,
- self.frameworkDirectory,
- self.frameworkPath,
- self.binaryName,
- self.binaryDirectory,
- self.binaryPath,
- self.version,
- self.installName,
- self.deployedInstallName,
- self.sourceFilePath,
- self.destinationDirectory)
+ return f""" Framework name: {frameworkName}
+ Framework directory: {self.frameworkDirectory}
+ Framework path: {self.frameworkPath}
+ Binary name: {self.binaryName}
+ Binary directory: {self.binaryDirectory}
+ Binary path: {self.binaryPath}
+ Version: {self.version}
+ Install name: {self.installName}
+ Deployed install name: {self.deployedInstallName}
+ Source file Path: {self.sourceFilePath}
+ Deployed Directory (relative to bundle): {self.destinationDirectory}
+"""
def isDylib(self):
return self.frameworkName.endswith(".dylib")
@@ -97,7 +91,7 @@ class FrameworkInfo(object):
m = cls.reOLine.match(line)
if m is None:
- raise RuntimeError("otool line could not be parsed: " + line)
+ raise RuntimeError(f"otool line could not be parsed: {line}")
path = m.group(1)
@@ -117,7 +111,7 @@ class FrameworkInfo(object):
info.version = "-"
info.installName = path
- info.deployedInstallName = "@executable_path/../Frameworks/" + info.binaryName
+ info.deployedInstallName = f"@executable_path/../Frameworks/{info.binaryName}"
info.sourceFilePath = path
info.destinationDirectory = cls.bundleFrameworkDirectory
else:
@@ -129,7 +123,7 @@ class FrameworkInfo(object):
break
i += 1
if i == len(parts):
- raise RuntimeError("Could not find .framework or .dylib in otool line: " + line)
+ raise RuntimeError(f"Could not find .framework or .dylib in otool line: {line}")
info.frameworkName = parts[i]
info.frameworkDirectory = "/".join(parts[:i])
@@ -140,7 +134,7 @@ class FrameworkInfo(object):
info.binaryPath = os.path.join(info.binaryDirectory, info.binaryName)
info.version = parts[i+2]
- info.deployedInstallName = "@executable_path/../Frameworks/" + os.path.join(info.frameworkName, info.binaryPath)
+ info.deployedInstallName = f"@executable_path/../Frameworks/{os.path.join(info.frameworkName, info.binaryPath)}"
info.destinationDirectory = os.path.join(cls.bundleFrameworkDirectory, info.frameworkName, info.binaryDirectory)
info.sourceResourcesDirectory = os.path.join(info.frameworkPath, "Resources")
@@ -154,10 +148,10 @@ class FrameworkInfo(object):
class ApplicationBundleInfo(object):
def __init__(self, path: str):
self.path = path
- appName = "Bitcoin-Qt"
- self.binaryPath = os.path.join(path, "Contents", "MacOS", appName)
+ # for backwards compatibility reasons, this must remain as Bitcoin-Qt
+ self.binaryPath = os.path.join(path, "Contents", "MacOS", "Bitcoin-Qt")
if not os.path.exists(self.binaryPath):
- raise RuntimeError("Could not find bundle binary for " + path)
+ raise RuntimeError(f"Could not find bundle binary for {path}")
self.resourcesPath = os.path.join(path, "Contents", "Resources")
self.pluginPath = os.path.join(path, "Contents", "PlugIns")
@@ -181,30 +175,26 @@ class DeploymentInfo(object):
self.pluginPath = pluginPath
def usesFramework(self, name: str) -> bool:
- nameDot = "{}.".format(name)
- libNameDot = "lib{}.".format(name)
for framework in self.deployedFrameworks:
if framework.endswith(".framework"):
- if framework.startswith(nameDot):
+ if framework.startswith(f"{name}."):
return True
elif framework.endswith(".dylib"):
- if framework.startswith(libNameDot):
+ if framework.startswith(f"lib{name}."):
return True
return False
def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]:
- if verbose >= 3:
- print("Inspecting with otool: " + binaryPath)
+ if verbose:
+ print(f"Inspecting with otool: {binaryPath}")
otoolbin=os.getenv("OTOOL", "otool")
- otool = subprocess.Popen([otoolbin, "-L", binaryPath], stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
- o_stdout, o_stderr = otool.communicate()
+ otool = run([otoolbin, "-L", binaryPath], stdout=PIPE, stderr=PIPE, universal_newlines=True)
if otool.returncode != 0:
- if verbose >= 1:
- sys.stderr.write(o_stderr)
- sys.stderr.flush()
- raise RuntimeError("otool failed with return code {}".format(otool.returncode))
+ sys.stderr.write(otool.stderr)
+ sys.stderr.flush()
+ raise RuntimeError(f"otool failed with return code {otool.returncode}")
- otoolLines = o_stdout.split("\n")
+ otoolLines = otool.stdout.split("\n")
otoolLines.pop(0) # First line is the inspected binary
if ".framework" in binaryPath or binaryPath.endswith(".dylib"):
otoolLines.pop(0) # Frameworks and dylibs list themselves as a dependency.
@@ -214,7 +204,7 @@ def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]:
line = line.replace("@loader_path", os.path.dirname(binaryPath))
info = FrameworkInfo.fromOtoolLibraryLine(line.strip())
if info is not None:
- if verbose >= 3:
+ if verbose:
print("Found framework:")
print(info)
libraries.append(info)
@@ -223,10 +213,10 @@ def getFrameworks(binaryPath: str, verbose: int) -> List[FrameworkInfo]:
def runInstallNameTool(action: str, *args):
installnametoolbin=os.getenv("INSTALLNAMETOOL", "install_name_tool")
- subprocess.check_call([installnametoolbin, "-"+action] + list(args))
+ run([installnametoolbin, "-"+action] + list(args), check=True)
def changeInstallName(oldName: str, newName: str, binaryPath: str, verbose: int):
- if verbose >= 3:
+ if verbose:
print("Using install_name_tool:")
print(" in", binaryPath)
print(" change reference", oldName)
@@ -234,7 +224,7 @@ def changeInstallName(oldName: str, newName: str, binaryPath: str, verbose: int)
runInstallNameTool("change", oldName, newName, binaryPath)
def changeIdentification(id: str, binaryPath: str, verbose: int):
- if verbose >= 3:
+ if verbose:
print("Using install_name_tool:")
print(" change identification in", binaryPath)
print(" to", id)
@@ -242,22 +232,22 @@ def changeIdentification(id: str, binaryPath: str, verbose: int):
def runStrip(binaryPath: str, verbose: int):
stripbin=os.getenv("STRIP", "strip")
- if verbose >= 3:
+ if verbose:
print("Using strip:")
print(" stripped", binaryPath)
- subprocess.check_call([stripbin, "-x", binaryPath])
+ run([stripbin, "-x", binaryPath], check=True)
def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional[str]:
if framework.sourceFilePath.startswith("Qt"):
#standard place for Nokia Qt installer's frameworks
- fromPath = "/Library/Frameworks/" + framework.sourceFilePath
+ fromPath = f"/Library/Frameworks/{framework.sourceFilePath}"
else:
fromPath = framework.sourceFilePath
toDir = os.path.join(path, framework.destinationDirectory)
toPath = os.path.join(toDir, framework.binaryName)
if not os.path.exists(fromPath):
- raise RuntimeError("No file at " + fromPath)
+ raise RuntimeError(f"No file at {fromPath}")
if os.path.exists(toPath):
return None # Already there
@@ -266,7 +256,7 @@ def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional
os.makedirs(toDir)
shutil.copy2(fromPath, toPath)
- if verbose >= 3:
+ if verbose:
print("Copied:", fromPath)
print(" to:", toPath)
@@ -280,13 +270,12 @@ def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional
linkto = framework.version
if not os.path.exists(linkfrom):
os.symlink(linkto, linkfrom)
- if verbose >= 2:
- print("Linked:", linkfrom, "->", linkto)
+ print("Linked:", linkfrom, "->", linkto)
fromResourcesDir = framework.sourceResourcesDirectory
if os.path.exists(fromResourcesDir):
toResourcesDir = os.path.join(path, framework.destinationResourcesDirectory)
shutil.copytree(fromResourcesDir, toResourcesDir, symlinks=True)
- if verbose >= 3:
+ if verbose:
print("Copied resources:", fromResourcesDir)
print(" to:", toResourcesDir)
fromContentsDir = framework.sourceVersionContentsDirectory
@@ -295,7 +284,7 @@ def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional
if os.path.exists(fromContentsDir):
toContentsDir = os.path.join(path, framework.destinationVersionContentsDirectory)
shutil.copytree(fromContentsDir, toContentsDir, symlinks=True)
- if verbose >= 3:
+ if verbose:
print("Copied Contents:", fromContentsDir)
print(" to:", toContentsDir)
elif framework.frameworkName.startswith("libQtGui"): # Copy qt_menu.nib (applies to non-framework layout)
@@ -303,7 +292,7 @@ def copyFramework(framework: FrameworkInfo, path: str, verbose: int) -> Optional
qtMenuNibDestinationPath = os.path.join(path, "Contents", "Resources", "qt_menu.nib")
if os.path.exists(qtMenuNibSourcePath) and not os.path.exists(qtMenuNibDestinationPath):
shutil.copytree(qtMenuNibSourcePath, qtMenuNibDestinationPath, symlinks=True)
- if verbose >= 3:
+ if verbose:
print("Copied for libQtGui:", qtMenuNibSourcePath)
print(" to:", qtMenuNibDestinationPath)
@@ -317,16 +306,14 @@ def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPat
framework = frameworks.pop(0)
deploymentInfo.deployedFrameworks.append(framework.frameworkName)
- if verbose >= 2:
- print("Processing", framework.frameworkName, "...")
+ print("Processing", framework.frameworkName, "...")
# Get the Qt path from one of the Qt frameworks
if deploymentInfo.qtPath is None and framework.isQtFramework():
deploymentInfo.detectQtPath(framework.frameworkDirectory)
if framework.installName.startswith("@executable_path") or framework.installName.startswith(bundlePath):
- if verbose >= 2:
- print(framework.frameworkName, "already deployed, skipping.")
+ print(framework.frameworkName, "already deployed, skipping.")
continue
# install_name_tool the new id into the binary
@@ -357,8 +344,8 @@ def deployFrameworks(frameworks: List[FrameworkInfo], bundlePath: str, binaryPat
def deployFrameworksForAppBundle(applicationBundle: ApplicationBundleInfo, strip: bool, verbose: int) -> DeploymentInfo:
frameworks = getFrameworks(applicationBundle.binaryPath, verbose)
- if len(frameworks) == 0 and verbose >= 1:
- print("Warning: Could not find any external frameworks to deploy in {}.".format(applicationBundle.path))
+ if len(frameworks) == 0:
+ print(f"Warning: Could not find any external frameworks to deploy in {applicationBundle.path}.")
return DeploymentInfo()
else:
return deployFrameworks(frameworks, applicationBundle.path, applicationBundle.binaryPath, strip, verbose)
@@ -477,8 +464,7 @@ def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: Deployme
plugins.append((pluginDirectory, pluginName))
for pluginDirectory, pluginName in plugins:
- if verbose >= 2:
- print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...")
+ print("Processing plugin", os.path.join(pluginDirectory, pluginName), "...")
sourcePath = os.path.join(deploymentInfo.pluginPath, pluginDirectory, pluginName)
destinationDirectory = os.path.join(appBundleInfo.pluginPath, pluginDirectory)
@@ -487,7 +473,7 @@ def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: Deployme
destinationPath = os.path.join(destinationDirectory, pluginName)
shutil.copy2(sourcePath, destinationPath)
- if verbose >= 3:
+ if verbose:
print("Copied:", sourcePath)
print(" to:", destinationPath)
@@ -503,147 +489,50 @@ def deployPlugins(appBundleInfo: ApplicationBundleInfo, deploymentInfo: Deployme
if dependency.frameworkName not in deploymentInfo.deployedFrameworks:
deployFrameworks([dependency], appBundleInfo.path, destinationPath, strip, verbose, deploymentInfo)
-qt_conf="""[Paths]
-Translations=Resources
-Plugins=PlugIns
-"""
-
ap = ArgumentParser(description="""Improved version of macdeployqt.
Outputs a ready-to-deploy app in a folder "dist" and optionally wraps it in a .dmg file.
Note, that the "dist" folder will be deleted before deploying on each run.
-Optionally, Qt translation files (.qm) and additional resources can be added to the bundle.
-
-Also optionally signs the .app bundle; set the CODESIGNARGS environment variable to pass arguments
-to the codesign tool.
-E.g. CODESIGNARGS='--sign "Developer ID Application: ..." --keychain /encrypted/foo.keychain'""")
+Optionally, Qt translation files (.qm) can be added to the bundle.""")
ap.add_argument("app_bundle", nargs=1, metavar="app-bundle", help="application bundle to be deployed")
-ap.add_argument("-verbose", type=int, nargs=1, default=[1], metavar="<0-3>", help="0 = no output, 1 = error/warning (default), 2 = normal, 3 = debug")
+ap.add_argument("appname", nargs=1, metavar="appname", help="name of the app being deployed")
+ap.add_argument("-verbose", nargs="?", const=True, help="Output additional debugging information")
ap.add_argument("-no-plugins", dest="plugins", action="store_false", default=True, help="skip plugin deployment")
ap.add_argument("-no-strip", dest="strip", action="store_false", default=True, help="don't run 'strip' on the binaries")
-ap.add_argument("-sign", dest="sign", action="store_true", default=False, help="sign .app bundle with codesign tool")
-ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image; if basename is not specified, a camel-cased version of the app name is used")
-ap.add_argument("-fancy", nargs=1, metavar="plist", default=[], help="make a fancy looking disk image using the given plist file with instructions; requires -dmg to work")
-ap.add_argument("-add-qt-tr", nargs=1, metavar="languages", default=[], help="add Qt translation files to the bundle's resources; the language list must be separated with commas, not with whitespace")
-ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translation files")
-ap.add_argument("-add-resources", nargs="+", metavar="path", default=[], help="list of additional files or folders to be copied into the bundle's resources; must be the last argument")
-ap.add_argument("-volname", nargs=1, metavar="volname", default=[], help="custom volume name for dmg")
+ap.add_argument("-dmg", nargs="?", const="", metavar="basename", help="create a .dmg disk image")
+ap.add_argument("-translations-dir", nargs=1, metavar="path", default=None, help="Path to Qt's translations. Base translations will automatically be added to the bundle's resources.")
config = ap.parse_args()
-verbose = config.verbose[0]
+verbose = config.verbose
# ------------------------------------------------
app_bundle = config.app_bundle[0]
+appname = config.appname[0]
if not os.path.exists(app_bundle):
- if verbose >= 1:
- sys.stderr.write("Error: Could not find app bundle \"{}\"\n".format(app_bundle))
+ sys.stderr.write(f"Error: Could not find app bundle \"{app_bundle}\"\n")
sys.exit(1)
-app_bundle_name = os.path.splitext(os.path.basename(app_bundle))[0]
-
-# ------------------------------------------------
-translations_dir = None
-if config.translations_dir and config.translations_dir[0]:
- if os.path.exists(config.translations_dir[0]):
- translations_dir = config.translations_dir[0]
- else:
- if verbose >= 1:
- sys.stderr.write("Error: Could not find translation dir \"{}\"\n".format(translations_dir))
- sys.exit(1)
-# ------------------------------------------------
-
-for p in config.add_resources:
- if verbose >= 3:
- print("Checking for \"%s\"..." % p)
- if not os.path.exists(p):
- if verbose >= 1:
- sys.stderr.write("Error: Could not find additional resource file \"{}\"\n".format(p))
- sys.exit(1)
-
-# ------------------------------------------------
-
-if len(config.fancy) == 1:
- if verbose >= 3:
- print("Fancy: Importing plistlib...")
- try:
- import plistlib
- except ImportError:
- if verbose >= 1:
- sys.stderr.write("Error: Could not import plistlib which is required for fancy disk images.\n")
- sys.exit(1)
-
- p = config.fancy[0]
- if verbose >= 3:
- print("Fancy: Loading \"{}\"...".format(p))
- if not os.path.exists(p):
- if verbose >= 1:
- sys.stderr.write("Error: Could not find fancy disk image plist at \"{}\"\n".format(p))
- sys.exit(1)
-
- try:
- with open(p, 'rb') as fp:
- fancy = plistlib.load(fp, fmt=plistlib.FMT_XML)
- except:
- if verbose >= 1:
- sys.stderr.write("Error: Could not parse fancy disk image plist at \"{}\"\n".format(p))
- sys.exit(1)
-
- try:
- assert "window_bounds" not in fancy or (isinstance(fancy["window_bounds"], list) and len(fancy["window_bounds"]) == 4)
- assert "background_picture" not in fancy or isinstance(fancy["background_picture"], str)
- assert "icon_size" not in fancy or isinstance(fancy["icon_size"], int)
- assert "applications_symlink" not in fancy or isinstance(fancy["applications_symlink"], bool)
- if "items_position" in fancy:
- assert isinstance(fancy["items_position"], dict)
- for key, value in fancy["items_position"].items():
- assert isinstance(value, list) and len(value) == 2 and isinstance(value[0], int) and isinstance(value[1], int)
- except:
- if verbose >= 1:
- sys.stderr.write("Error: Bad format of fancy disk image plist at \"{}\"\n".format(p))
- sys.exit(1)
-
- if "background_picture" in fancy:
- bp = fancy["background_picture"]
- if verbose >= 3:
- print("Fancy: Resolving background picture \"{}\"...".format(bp))
- if not os.path.exists(bp):
- bp = os.path.join(os.path.dirname(p), bp)
- if not os.path.exists(bp):
- if verbose >= 1:
- sys.stderr.write("Error: Could not find background picture at \"{}\" or \"{}\"\n".format(fancy["background_picture"], bp))
- sys.exit(1)
- else:
- fancy["background_picture"] = bp
-else:
- fancy = None
-
# ------------------------------------------------
if os.path.exists("dist"):
- if verbose >= 2:
- print("+ Removing old dist folder +")
-
+ print("+ Removing existing dist folder +")
shutil.rmtree("dist")
-# ------------------------------------------------
-
-if len(config.volname) == 1:
- volname = config.volname[0]
-else:
- volname = app_bundle_name
+if os.path.exists(appname + ".dmg"):
+ print("+ Removing existing DMG +")
+ os.unlink(appname + ".dmg")
# ------------------------------------------------
target = os.path.join("dist", "Bitcoin-Qt.app")
-if verbose >= 2:
- print("+ Copying source bundle +")
-if verbose >= 3:
+print("+ Copying source bundle +")
+if verbose:
print(app_bundle, "->", target)
os.mkdir("dist")
@@ -653,257 +542,154 @@ applicationBundle = ApplicationBundleInfo(target)
# ------------------------------------------------
-if verbose >= 2:
- print("+ Deploying frameworks +")
+print("+ Deploying frameworks +")
try:
deploymentInfo = deployFrameworksForAppBundle(applicationBundle, config.strip, verbose)
if deploymentInfo.qtPath is None:
deploymentInfo.qtPath = os.getenv("QTDIR", None)
if deploymentInfo.qtPath is None:
- if verbose >= 1:
- sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n")
+ sys.stderr.write("Warning: Could not detect Qt's path, skipping plugin deployment!\n")
config.plugins = False
except RuntimeError as e:
- if verbose >= 1:
- sys.stderr.write("Error: {}\n".format(str(e)))
+ sys.stderr.write(f"Error: {str(e)}\n")
sys.exit(1)
# ------------------------------------------------
if config.plugins:
- if verbose >= 2:
- print("+ Deploying plugins +")
+ print("+ Deploying plugins +")
try:
deployPlugins(applicationBundle, deploymentInfo, config.strip, verbose)
except RuntimeError as e:
- if verbose >= 1:
- sys.stderr.write("Error: {}\n".format(str(e)))
+ sys.stderr.write(f"Error: {str(e)}\n")
sys.exit(1)
# ------------------------------------------------
-if len(config.add_qt_tr) == 0:
- add_qt_tr = []
-else:
- if translations_dir is not None:
- qt_tr_dir = translations_dir
- else:
- if deploymentInfo.qtPath is not None:
- qt_tr_dir = os.path.join(deploymentInfo.qtPath, "translations")
- else:
- sys.stderr.write("Error: Could not find Qt translation path\n")
- sys.exit(1)
- add_qt_tr = ["qt_{}.qm".format(lng) for lng in config.add_qt_tr[0].split(",")]
- for lng_file in add_qt_tr:
- p = os.path.join(qt_tr_dir, lng_file)
- if verbose >= 3:
- print("Checking for \"{}\"...".format(p))
- if not os.path.exists(p):
- if verbose >= 1:
- sys.stderr.write("Error: Could not find Qt translation file \"{}\"\n".format(lng_file))
- sys.exit(1)
+if config.translations_dir:
+ if not Path(config.translations_dir[0]).exists():
+ sys.stderr.write(f"Error: Could not find translation dir \"{config.translations_dir[0]}\"\n")
+ sys.exit(1)
+
+print("+ Adding Qt translations +")
+
+translations = Path(config.translations_dir[0])
+
+regex = re.compile('qt_[a-z]*(.qm|_[A-Z]*.qm)')
+
+lang_files = [x for x in translations.iterdir() if regex.match(x.name)]
+
+for file in lang_files:
+ if verbose:
+ print(file.as_posix(), "->", os.path.join(applicationBundle.resourcesPath, file.name))
+ shutil.copy2(file.as_posix(), os.path.join(applicationBundle.resourcesPath, file.name))
# ------------------------------------------------
-if verbose >= 2:
- print("+ Installing qt.conf +")
+print("+ Installing qt.conf +")
+
+qt_conf="""[Paths]
+Translations=Resources
+Plugins=PlugIns
+"""
with open(os.path.join(applicationBundle.resourcesPath, "qt.conf"), "wb") as f:
f.write(qt_conf.encode())
# ------------------------------------------------
-if len(add_qt_tr) > 0 and verbose >= 2:
- print("+ Adding Qt translations +")
-
-for lng_file in add_qt_tr:
- if verbose >= 3:
- print(os.path.join(qt_tr_dir, lng_file), "->", os.path.join(applicationBundle.resourcesPath, lng_file))
- shutil.copy2(os.path.join(qt_tr_dir, lng_file), os.path.join(applicationBundle.resourcesPath, lng_file))
+print("+ Generating .DS_Store +")
+
+output_file = os.path.join("dist", ".DS_Store")
+
+ds = DSStore.open(output_file, 'w+')
+
+ds['.']['bwsp'] = {
+ 'WindowBounds': '{{300, 280}, {500, 343}}',
+ 'PreviewPaneVisibility': False,
+}
+
+icvp = {
+ 'gridOffsetX': 0.0,
+ 'textSize': 12.0,
+ 'viewOptionsVersion': 1,
+ 'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00',
+ 'backgroundColorBlue': 1.0,
+ 'iconSize': 96.0,
+ 'backgroundColorGreen': 1.0,
+ 'arrangeBy': 'none',
+ 'showIconPreview': True,
+ 'gridSpacing': 100.0,
+ 'gridOffsetY': 0.0,
+ 'showItemInfo': False,
+ 'labelOnBottom': True,
+ 'backgroundType': 2,
+ 'backgroundColorRed': 1.0
+}
+alias = Alias().from_bytes(icvp['backgroundImageAlias'])
+alias.volume.name = appname
+alias.volume.posix_path = '/Volumes/' + appname
+icvp['backgroundImageAlias'] = alias.to_bytes()
+ds['.']['icvp'] = icvp
+
+ds['.']['vSrn'] = ('long', 1)
+
+ds['Applications']['Iloc'] = (370, 156)
+ds['Bitcoin-Qt.app']['Iloc'] = (128, 156)
+
+ds.flush()
+ds.close()
# ------------------------------------------------
-if len(config.add_resources) > 0 and verbose >= 2:
- print("+ Adding additional resources +")
+if config.dmg is not None:
-for p in config.add_resources:
- t = os.path.join(applicationBundle.resourcesPath, os.path.basename(p))
- if verbose >= 3:
- print(p, "->", t)
- if os.path.isdir(p):
- shutil.copytree(p, t, symlinks=True)
- else:
- shutil.copy2(p, t)
+ print("+ Preparing .dmg disk image +")
-# ------------------------------------------------
+ if verbose:
+ print("Determining size of \"dist\"...")
+ size = 0
+ for path, dirs, files in os.walk("dist"):
+ for file in files:
+ size += os.path.getsize(os.path.join(path, file))
+ size += int(size * 0.15)
-if config.sign and 'CODESIGNARGS' not in os.environ:
- print("You must set the CODESIGNARGS environment variable. Skipping signing.")
-elif config.sign:
- if verbose >= 1:
- print("Code-signing app bundle {}".format(target))
- subprocess.check_call("codesign --force {} {}".format(os.environ['CODESIGNARGS'], target), shell=True)
+ if verbose:
+ print("Creating temp image for modification...")
-# ------------------------------------------------
+ tempname: str = appname + ".temp.dmg"
-if config.dmg is not None:
+ run(["hdiutil", "create", tempname, "-srcfolder", "dist", "-format", "UDRW", "-size", str(size), "-volname", appname], check=True, universal_newlines=True)
- def runHDIUtil(verb: str, image_basename: str, **kwargs) -> int:
- hdiutil_args = ["hdiutil", verb, image_basename + ".dmg"]
- if "capture_stdout" in kwargs:
- del kwargs["capture_stdout"]
- run = subprocess.check_output
- else:
- if verbose < 2:
- hdiutil_args.append("-quiet")
- elif verbose >= 3:
- hdiutil_args.append("-verbose")
- run = subprocess.check_call
-
- for key, value in kwargs.items():
- hdiutil_args.append("-" + key)
- if value is not True:
- hdiutil_args.append(str(value))
-
- return run(hdiutil_args, universal_newlines=True)
-
- if verbose >= 2:
- if fancy is None:
- print("+ Creating .dmg disk image +")
- else:
- print("+ Preparing .dmg disk image +")
-
- if config.dmg != "":
- dmg_name = config.dmg
- else:
- spl = app_bundle_name.split(" ")
- dmg_name = spl[0] + "".join(p.capitalize() for p in spl[1:])
-
- if fancy is None:
- try:
- runHDIUtil("create", dmg_name, srcfolder="dist", format="UDBZ", volname=volname, ov=True)
- except subprocess.CalledProcessError as e:
- sys.exit(e.returncode)
- else:
- if verbose >= 3:
- print("Determining size of \"dist\"...")
- size = 0
- for path, dirs, files in os.walk("dist"):
- for file in files:
- size += os.path.getsize(os.path.join(path, file))
- size += int(size * 0.15)
-
- if verbose >= 3:
- print("Creating temp image for modification...")
- try:
- runHDIUtil("create", dmg_name + ".temp", srcfolder="dist", format="UDRW", size=size, volname=volname, ov=True)
- except subprocess.CalledProcessError as e:
- sys.exit(e.returncode)
-
- if verbose >= 3:
- print("Attaching temp image...")
- try:
- output = runHDIUtil("attach", dmg_name + ".temp", readwrite=True, noverify=True, noautoopen=True, capture_stdout=True)
- except subprocess.CalledProcessError as e:
- sys.exit(e.returncode)
-
- m = re.search(r"/Volumes/(.+$)", output)
- disk_root = m.group(0)
- disk_name = m.group(1)
-
- if verbose >= 2:
- print("+ Applying fancy settings +")
-
- if "background_picture" in fancy:
- bg_path = os.path.join(disk_root, ".background", os.path.basename(fancy["background_picture"]))
- os.mkdir(os.path.dirname(bg_path))
- if verbose >= 3:
- print(fancy["background_picture"], "->", bg_path)
- shutil.copy2(fancy["background_picture"], bg_path)
- else:
- bg_path = None
-
- if fancy.get("applications_symlink", False):
- os.symlink("/Applications", os.path.join(disk_root, "Applications"))
-
- # The Python appscript package broke with OSX 10.8 and isn't being fixed.
- # So we now build up an AppleScript string and use the osascript command
- # to make the .dmg file pretty:
- appscript = Template( """
- on run argv
- tell application "Finder"
- tell disk "$disk"
- open
- set current view of container window to icon view
- set toolbar visible of container window to false
- set statusbar visible of container window to false
- set the bounds of container window to {$window_bounds}
- set theViewOptions to the icon view options of container window
- set arrangement of theViewOptions to not arranged
- set icon size of theViewOptions to $icon_size
- $background_commands
- $items_positions
- close -- close/reopen works around a bug...
- open
- update without registering applications
- delay 5
- eject
- end tell
- end tell
- end run
- """)
-
- itemscript = Template('set position of item "${item}" of container window to {${position}}')
- items_positions = []
- if "items_position" in fancy:
- for name, position in fancy["items_position"].items():
- params = { "item" : name, "position" : ",".join([str(p) for p in position]) }
- items_positions.append(itemscript.substitute(params))
-
- params = {
- "disk" : volname,
- "window_bounds" : "300,300,800,620",
- "icon_size" : "96",
- "background_commands" : "",
- "items_positions" : "\n ".join(items_positions)
- }
- if "window_bounds" in fancy:
- params["window_bounds"] = ",".join([str(p) for p in fancy["window_bounds"]])
- if "icon_size" in fancy:
- params["icon_size"] = str(fancy["icon_size"])
- if bg_path is not None:
- # Set background file, then call SetFile to make it invisible.
- # (note: making it invisible first makes set background picture fail)
- bgscript = Template("""set background picture of theViewOptions to file ".background:$bgpic"
- do shell script "SetFile -a V /Volumes/$disk/.background/$bgpic" """)
- params["background_commands"] = bgscript.substitute({"bgpic" : os.path.basename(bg_path), "disk" : params["disk"]})
-
- s = appscript.substitute(params)
- if verbose >= 2:
- print("Running AppleScript:")
- print(s)
-
- p = subprocess.Popen(['osascript', '-'], stdin=subprocess.PIPE)
- p.communicate(input=s.encode('utf-8'))
- if p.returncode:
- print("Error running osascript.")
-
- if verbose >= 2:
- print("+ Finalizing .dmg disk image +")
- time.sleep(5)
-
- try:
- runHDIUtil("convert", dmg_name + ".temp", format="UDBZ", o=dmg_name + ".dmg", ov=True)
- except subprocess.CalledProcessError as e:
- sys.exit(e.returncode)
-
- os.unlink(dmg_name + ".temp.dmg")
+ if verbose:
+ print("Attaching temp image...")
+ output = run(["hdiutil", "attach", tempname, "-readwrite"], check=True, universal_newlines=True, stdout=PIPE).stdout
+
+ m = re.search(r"/Volumes/(.+$)", output)
+ disk_root = m.group(0)
+
+ print("+ Applying fancy settings +")
+
+ bg_path = os.path.join(disk_root, ".background", os.path.basename('background.tiff'))
+ os.mkdir(os.path.dirname(bg_path))
+ if verbose:
+ print('background.tiff', "->", bg_path)
+ shutil.copy2('background.tiff', bg_path)
+
+ os.symlink("/Applications", os.path.join(disk_root, "Applications"))
+
+ print("+ Finalizing .dmg disk image +")
+
+ run(["hdiutil", "detach", f"/Volumes/{appname}"], universal_newlines=True)
+
+ run(["hdiutil", "convert", tempname, "-format", "UDZO", "-o", appname, "-imagekey", "zlib-level=9"], check=True, universal_newlines=True)
+
+ os.unlink(tempname)
# ------------------------------------------------
-if verbose >= 2:
- print("+ Done +")
+print("+ Done +")
sys.exit(0)
diff --git a/depends/funcs.mk b/depends/funcs.mk
index 58d882eb05..5697bd6f15 100644
--- a/depends/funcs.mk
+++ b/depends/funcs.mk
@@ -163,7 +163,9 @@ $(1)_cmake=env CC="$$($(1)_cc)" \
CXXFLAGS="$$($(1)_cppflags) $$($(1)_cxxflags)" \
LDFLAGS="$$($(1)_ldflags)" \
cmake -DCMAKE_INSTALL_PREFIX:PATH="$$($($(1)_type)_prefix)"
-ifneq ($($(1)_type),build)
+ifeq ($($(1)_type),build)
+$(1)_cmake += -DCMAKE_INSTALL_RPATH:PATH="$$($($(1)_type)_prefix)/lib"
+else
ifneq ($(host),$(build))
$(1)_cmake += -DCMAKE_SYSTEM_NAME=$($(host_os)_cmake_system)
$(1)_cmake += -DCMAKE_C_COMPILER_TARGET=$(host)
diff --git a/depends/packages/native_mac_alias.mk b/depends/packages/native_mac_alias.mk
index e60b99dccc..5fe027fb8a 100644
--- a/depends/packages/native_mac_alias.mk
+++ b/depends/packages/native_mac_alias.mk
@@ -1,8 +1,8 @@
package=native_mac_alias
-$(package)_version=2.0.7
+$(package)_version=2.1.1
$(package)_download_path=https://github.com/al45tair/mac_alias/archive/
$(package)_file_name=v$($(package)_version).tar.gz
-$(package)_sha256_hash=6f606d3b6bccd2112aeabf1a063f5b5ece87005a5d7e97c8faca23b916e88838
+$(package)_sha256_hash=c0ffceee14f7d04a6eb323fb7b8217dc3f373b346198d2ca42300a8362db7efa
$(package)_install_libdir=$(build_prefix)/lib/python3/dist-packages
define $(package)_build_cmds
diff --git a/depends/packages/qt.mk b/depends/packages/qt.mk
index df0cf5893f..fdf03e73fc 100644
--- a/depends/packages/qt.mk
+++ b/depends/packages/qt.mk
@@ -14,7 +14,6 @@ $(package)_patches+= fix_android_qmake_conf.patch fix_android_jni_static.patch d
$(package)_patches+= freetype_back_compat.patch drop_lrelease_dependency.patch fix_powerpc_libpng.patch
$(package)_patches+= fix_mingw_cross_compile.patch fix_qpainter_non_determinism.patch
-# Update OSX_QT_TRANSLATIONS when this is updated
$(package)_qttranslations_file_name=qttranslations-$($(package)_suffix)
$(package)_qttranslations_sha256_hash=fb5a47799754af73d3bf501fe513342cfe2fc37f64e80df5533f6110e804220c
@@ -128,6 +127,9 @@ $(package)_config_opts_darwin += -device-option MAC_TARGET=$(host)
$(package)_config_opts_darwin += -device-option XCODE_VERSION=$(XCODE_VERSION)
endif
+# for macOS on Apple Silicon (ARM) see https://bugreports.qt.io/browse/QTBUG-85279
+$(package)_config_opts_arm_darwin += -device-option QMAKE_APPLE_DEVICE_ARCHS=arm64
+
$(package)_config_opts_linux = -qt-xkbcommon-x11
$(package)_config_opts_linux += -qt-xcb
$(package)_config_opts_linux += -no-xcb-xlib
diff --git a/doc/build-osx.md b/doc/build-osx.md
index 0a091f6afd..c1d101fde1 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -19,7 +19,7 @@ Then install [Homebrew](https://brew.sh).
## Dependencies
```shell
-brew install automake berkeley-db4 libtool boost miniupnpc pkg-config python qt libevent qrencode sqlite
+brew install automake libtool boost miniupnpc pkg-config python qt libevent qrencode
```
If you run into issues, check [Homebrew's troubleshooting page](https://docs.brew.sh/Troubleshooting).
@@ -30,7 +30,22 @@ If you want to build the disk image with `make deploy` (.dmg / optional), you ne
brew install librsvg
```
-## Berkeley DB
+The wallet support requires one or both of the dependencies ([*SQLite*](#sqlite) and [*Berkeley DB*](#berkeley-db)) in the sections below.
+To build Bitcoin Core without wallet, see [*Disable-wallet mode*](#disable-wallet-mode).
+
+#### SQLite
+
+Usually, macOS installation already has a suitable SQLite installation.
+Also, the Homebrew package could be installed:
+
+```shell
+brew install sqlite
+```
+
+In that case the Homebrew package will prevail.
+
+#### Berkeley DB
+
It is recommended to use Berkeley DB 4.8. If you have to build it yourself,
you can use [this](/contrib/install_db4.sh) script to install it
like so:
@@ -41,7 +56,11 @@ like so:
from the root of the repository.
-**Note**: You only need Berkeley DB if the wallet is enabled (see [*Disable-wallet mode*](/doc/build-osx.md#disable-wallet-mode)).
+Also, the Homebrew package could be installed:
+
+```shell
+brew install berkeley-db4
+```
## Build Bitcoin Core
@@ -72,14 +91,14 @@ from the root of the repository.
make deploy
```
-## `disable-wallet` mode
+## Disable-wallet mode
When the intention is to run only a P2P node without a wallet, Bitcoin Core may be
-compiled in `disable-wallet` mode with:
+compiled in disable-wallet mode with:
```shell
./configure --disable-wallet
```
-In this case there is no dependency on Berkeley DB 4.8 and SQLite.
+In this case there is no dependency on [*Berkeley DB*](#berkeley-db) and [*SQLite*](#sqlite).
Mining is also possible in disable-wallet mode using the `getblocktemplate` RPC call.
diff --git a/doc/release-notes-19776.md b/doc/release-notes-19776.md
new file mode 100644
index 0000000000..5553c5a7bd
--- /dev/null
+++ b/doc/release-notes-19776.md
@@ -0,0 +1,9 @@
+Updated RPCs
+------------
+
+- The `getpeerinfo` RPC returns two new boolean fields, `bip152_hb_to` and
+ `bip152_hb_from`, that respectively indicate whether we selected a peer to be
+ in compact blocks high-bandwidth mode or whether a peer selected us as a
+ compact blocks high-bandwidth peer. High-bandwidth peers send new block
+ announcements via a `cmpctblock` message rather than the usual inv/headers
+ announcements. See BIP 152 for more details. (#19776)
diff --git a/doc/tor.md b/doc/tor.md
index 12b5f70245..692041ccea 100644
--- a/doc/tor.md
+++ b/doc/tor.md
@@ -37,7 +37,7 @@ In a typical situation, this suffices to run behind a Tor proxy:
./bitcoind -proxy=127.0.0.1:9050
-## 2. Run a Bitcoin Core hidden server
+## 2. Manually create a Bitcoin Core onion service
If you configure your Tor system accordingly, it is possible to make your node also
reachable from the Tor network. Add these lines to your /etc/tor/torrc (or equivalent
@@ -46,7 +46,6 @@ versions of Tor see [Section 3](#3-automatically-listen-on-tor).*
HiddenServiceDir /var/lib/tor/bitcoin-service/
HiddenServicePort 8333 127.0.0.1:8334
- HiddenServicePort 18333 127.0.0.1:18334
The directory can be different of course, but virtual port numbers should be equal to
your bitcoind's P2P listen port (8333 by default), and target addresses and ports
@@ -92,7 +91,7 @@ for normal IPv4/IPv6 communication, use:
./bitcoind -onion=127.0.0.1:9050 -externalip=57qr3yd1nyntf5k.onion -discover
-## 3. Automatically listen on Tor
+## 3. Automatically create a Bitcoin Core onion service
Starting with Tor version 0.2.7.1 it is possible, through Tor's control socket
API, to create and destroy 'ephemeral' onion services programmatically.
@@ -130,6 +129,6 @@ in the tor configuration file. The hashed password can be obtained with the comm
- Do not add anything but Bitcoin Core ports to the onion service created in section 2.
If you run a web service too, create a new onion service for that.
- Otherwise it is trivial to link them, which may reduce privacy. Hidden
+ Otherwise it is trivial to link them, which may reduce privacy. Onion
services created automatically (as in section 3) always have only one port
open.
diff --git a/src/Makefile.crc32c.include b/src/Makefile.crc32c.include
index 802b3a2e4b..113272e65e 100644
--- a/src/Makefile.crc32c.include
+++ b/src/Makefile.crc32c.include
@@ -41,7 +41,7 @@ crc32c_libcrc32c_a_CXXFLAGS = $(AM_CXXFLAGS) $(PIE_FLAGS)
crc32c_libcrc32c_a_SOURCES =
crc32c_libcrc32c_a_SOURCES += crc32c/include/crc32c/crc32c.h
crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64.h
-crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64_linux_check.h
+crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_arm64_check.h
crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_internal.h
crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_prefetch.h
crc32c_libcrc32c_a_SOURCES += crc32c/src/crc32c_read_le.h
diff --git a/src/bench/wallet_balance.cpp b/src/bench/wallet_balance.cpp
index b3b73284d8..aa436ee3ea 100644
--- a/src/bench/wallet_balance.cpp
+++ b/src/bench/wallet_balance.cpp
@@ -24,15 +24,13 @@ static void WalletBalance(benchmark::Bench& bench, const bool set_dirty, const b
const auto& ADDRESS_WATCHONLY = ADDRESS_BCRT1_UNSPENDABLE;
- NodeContext node;
- std::unique_ptr<interfaces::Chain> chain = interfaces::MakeChain(node);
- CWallet wallet{chain.get(), "", CreateMockWalletDatabase()};
+ CWallet wallet{test_setup.m_node.chain.get(), "", CreateMockWalletDatabase()};
{
wallet.SetupLegacyScriptPubKeyMan();
bool first_run;
if (wallet.LoadWallet(first_run) != DBErrors::LOAD_OK) assert(false);
}
- auto handler = chain->handleNotifications({&wallet, [](CWallet*) {}});
+ auto handler = test_setup.m_node.chain->handleNotifications({&wallet, [](CWallet*) {}});
const Optional<std::string> address_mine{add_mine ? Optional<std::string>{getnewaddress(wallet)} : nullopt};
if (add_watchonly) importaddress(wallet, ADDRESS_WATCHONLY);
diff --git a/src/bitcoin-tx.cpp b/src/bitcoin-tx.cpp
index e22b3766cf..f87b9c1d16 100644
--- a/src/bitcoin-tx.cpp
+++ b/src/bitcoin-tx.cpp
@@ -40,6 +40,7 @@ static void SetupBitcoinTxArgs(ArgsManager &argsman)
{
SetupHelpOptions(argsman);
+ argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-create", "Create new, empty TX.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-json", "Select JSON output", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-txid", "Output only the hex-encoded transaction id of the resultant transaction.", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
@@ -95,13 +96,16 @@ static int AppInitRawTx(int argc, char* argv[])
fCreateBlank = gArgs.GetBoolArg("-create", false);
- if (argc < 2 || HelpRequested(gArgs)) {
+ if (argc < 2 || HelpRequested(gArgs) || gArgs.IsArgSet("-version")) {
// First part of help message is specific to this utility
- std::string strUsage = PACKAGE_NAME " bitcoin-tx utility version " + FormatFullVersion() + "\n\n" +
- "Usage: bitcoin-tx [options] <hex-tx> [commands] Update hex-encoded bitcoin transaction\n" +
- "or: bitcoin-tx [options] -create [commands] Create hex-encoded bitcoin transaction\n" +
- "\n";
- strUsage += gArgs.GetHelpMessage();
+ std::string strUsage = PACKAGE_NAME " bitcoin-tx utility version " + FormatFullVersion() + "\n";
+ if (!gArgs.IsArgSet("-version")) {
+ strUsage += "\n"
+ "Usage: bitcoin-tx [options] <hex-tx> [commands] Update hex-encoded bitcoin transaction\n"
+ "or: bitcoin-tx [options] -create [commands] Create hex-encoded bitcoin transaction\n"
+ "\n";
+ strUsage += gArgs.GetHelpMessage();
+ }
tfm::format(std::cout, "%s", strUsage);
diff --git a/src/bitcoin-wallet.cpp b/src/bitcoin-wallet.cpp
index b9c2fe2d34..d258f9f933 100644
--- a/src/bitcoin-wallet.cpp
+++ b/src/bitcoin-wallet.cpp
@@ -24,6 +24,7 @@ static void SetupWalletToolArgs(ArgsManager& argsman)
SetupHelpOptions(argsman);
SetupChainParamsBaseOptions(argsman);
+ argsman.AddArg("-version", "Print version and exit", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-datadir=<dir>", "Specify data directory", ArgsManager::ALLOW_ANY, OptionsCategory::OPTIONS);
argsman.AddArg("-wallet=<wallet-name>", "Specify wallet name", ArgsManager::ALLOW_ANY | ArgsManager::NETWORK_ONLY, OptionsCategory::OPTIONS);
argsman.AddArg("-debug=<category>", "Output debugging information (default: 0).", ArgsManager::ALLOW_ANY, OptionsCategory::DEBUG_TEST);
@@ -42,16 +43,18 @@ static bool WalletAppInit(int argc, char* argv[])
tfm::format(std::cerr, "Error parsing command line arguments: %s\n", error_message);
return false;
}
- if (argc < 2 || HelpRequested(gArgs)) {
- std::string usage = strprintf("%s bitcoin-wallet version", PACKAGE_NAME) + " " + FormatFullVersion() + "\n\n" +
- "bitcoin-wallet is an offline tool for creating and interacting with " PACKAGE_NAME " wallet files.\n" +
- "By default bitcoin-wallet will act on wallets in the default mainnet wallet directory in the datadir.\n" +
- "To change the target wallet, use the -datadir, -wallet and -testnet/-regtest arguments.\n\n" +
- "Usage:\n" +
- " bitcoin-wallet [options] <command>\n\n" +
- gArgs.GetHelpMessage();
-
- tfm::format(std::cout, "%s", usage);
+ if (argc < 2 || HelpRequested(gArgs) || gArgs.IsArgSet("-version")) {
+ std::string strUsage = strprintf("%s bitcoin-wallet version", PACKAGE_NAME) + " " + FormatFullVersion() + "\n";
+ if (!gArgs.IsArgSet("-version")) {
+ strUsage += "\n"
+ "bitcoin-wallet is an offline tool for creating and interacting with " PACKAGE_NAME " wallet files.\n"
+ "By default bitcoin-wallet will act on wallets in the default mainnet wallet directory in the datadir.\n"
+ "To change the target wallet, use the -datadir, -wallet and -testnet/-regtest arguments.\n\n"
+ "Usage:\n"
+ " bitcoin-wallet [options] <command>\n";
+ strUsage += "\n" + gArgs.GetHelpMessage();
+ }
+ tfm::format(std::cout, "%s", strUsage);
return false;
}
diff --git a/src/bitcoind.cpp b/src/bitcoind.cpp
index 455a82e390..4c89db54cb 100644
--- a/src/bitcoind.cpp
+++ b/src/bitcoind.cpp
@@ -57,11 +57,11 @@ static bool AppInit(int argc, char* argv[])
if (HelpRequested(args) || args.IsArgSet("-version")) {
std::string strUsage = PACKAGE_NAME " version " + FormatFullVersion() + "\n";
- if (args.IsArgSet("-version")) {
- strUsage += FormatParagraph(LicenseInfo()) + "\n";
- } else {
- strUsage += "\nUsage: bitcoind [options] Start " PACKAGE_NAME "\n";
- strUsage += "\n" + args.GetHelpMessage();
+ if (!args.IsArgSet("-version")) {
+ strUsage += FormatParagraph(LicenseInfo()) + "\n"
+ "\nUsage: bitcoind [options] Start " PACKAGE_NAME "\n"
+ "\n";
+ strUsage += args.GetHelpMessage();
}
tfm::format(std::cout, "%s", strUsage);
diff --git a/src/core_read.cpp b/src/core_read.cpp
index a2eebbd528..7687a86185 100644
--- a/src/core_read.cpp
+++ b/src/core_read.cpp
@@ -126,31 +126,72 @@ static bool CheckTxScriptsSanity(const CMutableTransaction& tx)
static bool DecodeTx(CMutableTransaction& tx, const std::vector<unsigned char>& tx_data, bool try_no_witness, bool try_witness)
{
+ // General strategy:
+ // - Decode both with extended serialization (which interprets the 0x0001 tag as a marker for
+ // the presense of witnesses) and with legacy serialization (which interprets the tag as a
+ // 0-input 1-output incomplete transaction).
+ // - Restricted by try_no_witness (which disables legacy if false) and try_witness (which
+ // disables extended if false).
+ // - Ignore serializations that do not fully consume the hex string.
+ // - If neither succeeds, fail.
+ // - If only one succeeds, return that one.
+ // - If both decode attempts succeed:
+ // - If only one passes the CheckTxScriptsSanity check, return that one.
+ // - If neither or both pass CheckTxScriptsSanity, return the extended one.
+
+ CMutableTransaction tx_extended, tx_legacy;
+ bool ok_extended = false, ok_legacy = false;
+
+ // Try decoding with extended serialization support, and remember if the result successfully
+ // consumes the entire input.
if (try_witness) {
CDataStream ssData(tx_data, SER_NETWORK, PROTOCOL_VERSION);
try {
- ssData >> tx;
- // If transaction looks sane, we don't try other mode even if requested
- if (ssData.empty() && (!try_no_witness || CheckTxScriptsSanity(tx))) {
- return true;
- }
+ ssData >> tx_extended;
+ if (ssData.empty()) ok_extended = true;
} catch (const std::exception&) {
// Fall through.
}
}
+ // Optimization: if extended decoding succeeded and the result passes CheckTxScriptsSanity,
+ // don't bother decoding the other way.
+ if (ok_extended && CheckTxScriptsSanity(tx_extended)) {
+ tx = std::move(tx_extended);
+ return true;
+ }
+
+ // Try decoding with legacy serialization, and remember if the result successfully consumes the entire input.
if (try_no_witness) {
CDataStream ssData(tx_data, SER_NETWORK, PROTOCOL_VERSION | SERIALIZE_TRANSACTION_NO_WITNESS);
try {
- ssData >> tx;
- if (ssData.empty()) {
- return true;
- }
+ ssData >> tx_legacy;
+ if (ssData.empty()) ok_legacy = true;
} catch (const std::exception&) {
// Fall through.
}
}
+ // If legacy decoding succeeded and passes CheckTxScriptsSanity, that's our answer, as we know
+ // at this point that extended decoding either failed or doesn't pass the sanity check.
+ if (ok_legacy && CheckTxScriptsSanity(tx_legacy)) {
+ tx = std::move(tx_legacy);
+ return true;
+ }
+
+ // If extended decoding succeeded, and neither decoding passes sanity, return the extended one.
+ if (ok_extended) {
+ tx = std::move(tx_extended);
+ return true;
+ }
+
+ // If legacy decoding succeeded and extended didn't, return the legacy one.
+ if (ok_legacy) {
+ tx = std::move(tx_legacy);
+ return true;
+ }
+
+ // If none succeeded, we failed.
return false;
}
diff --git a/src/crc32c/.appveyor.yml b/src/crc32c/.appveyor.yml
index 7345746750..b23e02e88a 100644
--- a/src/crc32c/.appveyor.yml
+++ b/src/crc32c/.appveyor.yml
@@ -8,9 +8,9 @@ environment:
matrix:
# AppVeyor currently has no custom job name feature.
# http://help.appveyor.com/discussions/questions/1623-can-i-provide-a-friendly-name-for-jobs
- - JOB: Visual Studio 2017
- APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017
- CMAKE_GENERATOR: Visual Studio 15 2017
+ - JOB: Visual Studio 2019
+ APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2019
+ CMAKE_GENERATOR: Visual Studio 16 2019
platform:
- x86
@@ -24,10 +24,11 @@ build_script:
- git submodule update --init --recursive
- mkdir build
- cd build
- - if "%platform%"=="x64" set CMAKE_GENERATOR=%CMAKE_GENERATOR% Win64
+ - if "%platform%"=="x86" (set CMAKE_GENERATOR_PLATFORM="Win32")
+ else (set CMAKE_GENERATOR_PLATFORM="%platform%")
- cmake --version
- - cmake .. -G "%CMAKE_GENERATOR%" -DCRC32C_USE_GLOG=0
- -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%"
+ - cmake .. -G "%CMAKE_GENERATOR%" -A "%CMAKE_GENERATOR_PLATFORM%"
+ -DCMAKE_CONFIGURATION_TYPES="%CONFIGURATION%" -DCRC32C_USE_GLOG=0
- cmake --build . --config "%CONFIGURATION%"
- cd ..
diff --git a/src/crc32c/AUTHORS b/src/crc32c/AUTHORS
index 6f1f6871a6..ef9b4ea933 100644
--- a/src/crc32c/AUTHORS
+++ b/src/crc32c/AUTHORS
@@ -7,3 +7,5 @@ Google Inc.
Fangming Fang <Fangming.Fang@arm.com>
Vadim Skipin <vadim.skipin@gmail.com>
+Rodrigo Tobar <rtobar@icrar.org>
+Harry Mallon <hjmallon@gmail.com>
diff --git a/src/crc32c/CMakeLists.txt b/src/crc32c/CMakeLists.txt
index 111a3e3614..71692d5796 100644
--- a/src/crc32c/CMakeLists.txt
+++ b/src/crc32c/CMakeLists.txt
@@ -5,15 +5,21 @@
cmake_minimum_required(VERSION 3.1)
project(Crc32c VERSION 1.1.0 LANGUAGES C CXX)
-# This project can use C11, but will gracefully decay down to C89.
-set(CMAKE_C_STANDARD 11)
-set(CMAKE_C_STANDARD_REQUIRED OFF)
-set(CMAKE_C_EXTENSIONS OFF)
-
-# This project requires C++11.
-set(CMAKE_CXX_STANDARD 11)
-set(CMAKE_CXX_STANDARD_REQUIRED ON)
-set(CMAKE_CXX_EXTENSIONS OFF)
+# C standard can be overridden when this is used as a sub-project.
+if(NOT CMAKE_C_STANDARD)
+ # This project can use C11, but will gracefully decay down to C89.
+ set(CMAKE_C_STANDARD 11)
+ set(CMAKE_C_STANDARD_REQUIRED OFF)
+ set(CMAKE_C_EXTENSIONS OFF)
+endif(NOT CMAKE_C_STANDARD)
+
+# C++ standard can be overridden when this is used as a sub-project.
+if(NOT CMAKE_CXX_STANDARD)
+ # This project requires C++11.
+ set(CMAKE_CXX_STANDARD 11)
+ set(CMAKE_CXX_STANDARD_REQUIRED ON)
+ set(CMAKE_CXX_EXTENSIONS OFF)
+endif(NOT CMAKE_CXX_STANDARD)
# https://github.com/izenecloud/cmake/blob/master/SetCompilerWarningAll.cmake
if(CMAKE_CXX_COMPILER_ID STREQUAL "MSVC")
@@ -269,7 +275,7 @@ target_sources(crc32c
PRIVATE
"${PROJECT_BINARY_DIR}/include/crc32c/crc32c_config.h"
"src/crc32c_arm64.h"
- "src/crc32c_arm64_linux_check.h"
+ "src/crc32c_arm64_check.h"
"src/crc32c_internal.h"
"src/crc32c_portable.cc"
"src/crc32c_prefetch.h"
@@ -405,19 +411,24 @@ if(CRC32C_INSTALL)
)
include(CMakePackageConfigHelpers)
+ configure_package_config_file(
+ "${PROJECT_NAME}Config.cmake.in"
+ "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
+ INSTALL_DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
+ )
write_basic_package_version_file(
- "${PROJECT_BINARY_DIR}/Crc32cConfigVersion.cmake"
- COMPATIBILITY SameMajorVersion
+ "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake"
+ COMPATIBILITY SameMajorVersion
)
install(
EXPORT Crc32cTargets
NAMESPACE Crc32c::
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/Crc32c"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
)
install(
FILES
- "Crc32cConfig.cmake"
- "${PROJECT_BINARY_DIR}/Crc32cConfigVersion.cmake"
- DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/Crc32c"
+ "${PROJECT_BINARY_DIR}/${PROJECT_NAME}Config.cmake"
+ "${PROJECT_BINARY_DIR}/${PROJECT_NAME}ConfigVersion.cmake"
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}"
)
endif(CRC32C_INSTALL)
diff --git a/src/crc32c/Crc32cConfig.cmake b/src/crc32c/Crc32cConfig.cmake.in
index 4d6057ec26..c6b8fc7913 100644
--- a/src/crc32c/Crc32cConfig.cmake
+++ b/src/crc32c/Crc32cConfig.cmake.in
@@ -2,4 +2,8 @@
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file. See the AUTHORS file for names of contributors.
+@PACKAGE_INIT@
+
include("${CMAKE_CURRENT_LIST_DIR}/Crc32cTargets.cmake")
+
+check_required_components(Crc32c)
diff --git a/src/crc32c/src/crc32c.cc b/src/crc32c/src/crc32c.cc
index 4d3018af47..804133bc17 100644
--- a/src/crc32c/src/crc32c.cc
+++ b/src/crc32c/src/crc32c.cc
@@ -8,7 +8,7 @@
#include <cstdint>
#include "./crc32c_arm64.h"
-#include "./crc32c_arm64_linux_check.h"
+#include "./crc32c_arm64_check.h"
#include "./crc32c_internal.h"
#include "./crc32c_sse42.h"
#include "./crc32c_sse42_check.h"
@@ -20,8 +20,8 @@ uint32_t Extend(uint32_t crc, const uint8_t* data, size_t count) {
static bool can_use_sse42 = CanUseSse42();
if (can_use_sse42) return ExtendSse42(crc, data, count);
#elif HAVE_ARM64_CRC32C
- static bool can_use_arm_linux = CanUseArm64Linux();
- if (can_use_arm_linux) return ExtendArm64(crc, data, count);
+ static bool can_use_arm64_crc32 = CanUseArm64Crc32();
+ if (can_use_arm64_crc32) return ExtendArm64(crc, data, count);
#endif // HAVE_SSE42 && (defined(_M_X64) || defined(__x86_64__))
return ExtendPortable(crc, data, count);
diff --git a/src/crc32c/src/crc32c_arm64.cc b/src/crc32c/src/crc32c_arm64.cc
index b872245f95..1da04ed34a 100644
--- a/src/crc32c/src/crc32c_arm64.cc
+++ b/src/crc32c/src/crc32c_arm64.cc
@@ -64,7 +64,7 @@
namespace crc32c {
-uint32_t ExtendArm64(uint32_t crc, const uint8_t *buf, size_t size) {
+uint32_t ExtendArm64(uint32_t crc, const uint8_t *data, size_t size) {
int64_t length = size;
uint32_t crc0, crc1, crc2, crc3;
uint64_t t0, t1, t2;
@@ -74,7 +74,6 @@ uint32_t ExtendArm64(uint32_t crc, const uint8_t *buf, size_t size) {
const poly64_t k0 = 0x8d96551c, k1 = 0xbd6f81f8, k2 = 0xdcb17aa4;
crc = crc ^ kCRC32Xor;
- const uint8_t *p = reinterpret_cast<const uint8_t *>(buf);
while (length >= KBYTES) {
crc0 = crc;
@@ -83,14 +82,14 @@ uint32_t ExtendArm64(uint32_t crc, const uint8_t *buf, size_t size) {
crc3 = 0;
// Process 1024 bytes in parallel.
- CRC32C1024BYTES(p);
+ CRC32C1024BYTES(data);
// Merge the 4 partial CRC32C values.
t2 = (uint64_t)vmull_p64(crc2, k2);
t1 = (uint64_t)vmull_p64(crc1, k1);
t0 = (uint64_t)vmull_p64(crc0, k0);
- crc = __crc32cd(crc3, *(uint64_t *)p);
- p += sizeof(uint64_t);
+ crc = __crc32cd(crc3, *(uint64_t *)data);
+ data += sizeof(uint64_t);
crc ^= __crc32cd(0, t2);
crc ^= __crc32cd(0, t1);
crc ^= __crc32cd(0, t0);
@@ -99,23 +98,23 @@ uint32_t ExtendArm64(uint32_t crc, const uint8_t *buf, size_t size) {
}
while (length >= 8) {
- crc = __crc32cd(crc, *(uint64_t *)p);
- p += 8;
+ crc = __crc32cd(crc, *(uint64_t *)data);
+ data += 8;
length -= 8;
}
if (length & 4) {
- crc = __crc32cw(crc, *(uint32_t *)p);
- p += 4;
+ crc = __crc32cw(crc, *(uint32_t *)data);
+ data += 4;
}
if (length & 2) {
- crc = __crc32ch(crc, *(uint16_t *)p);
- p += 2;
+ crc = __crc32ch(crc, *(uint16_t *)data);
+ data += 2;
}
if (length & 1) {
- crc = __crc32cb(crc, *p);
+ crc = __crc32cb(crc, *data);
}
return crc ^ kCRC32Xor;
diff --git a/src/crc32c/src/crc32c_arm64.h b/src/crc32c/src/crc32c_arm64.h
index 100cd56ec8..e093687ddc 100644
--- a/src/crc32c/src/crc32c_arm64.h
+++ b/src/crc32c/src/crc32c_arm64.h
@@ -2,10 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-// Linux-specific code checking the availability for ARM CRC32C instructions.
+// ARM-specific code
-#ifndef CRC32C_CRC32C_ARM_LINUX_H_
-#define CRC32C_CRC32C_ARM_LINUX_H_
+#ifndef CRC32C_CRC32C_ARM_H_
+#define CRC32C_CRC32C_ARM_H_
#include <cstddef>
#include <cstdint>
@@ -24,4 +24,4 @@ uint32_t ExtendArm64(uint32_t crc, const uint8_t* data, size_t count);
#endif // HAVE_ARM64_CRC32C
-#endif // CRC32C_CRC32C_ARM_LINUX_H_
+#endif // CRC32C_CRC32C_ARM_H_
diff --git a/src/crc32c/src/crc32c_arm64_linux_check.h b/src/crc32c/src/crc32c_arm64_check.h
index 1a20a757bb..62a07aba09 100644
--- a/src/crc32c/src/crc32c_arm64_linux_check.h
+++ b/src/crc32c/src/crc32c_arm64_check.h
@@ -2,12 +2,10 @@
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. See the AUTHORS file for names of contributors.
-// ARM Linux-specific code checking for the availability of CRC32C instructions.
+// ARM-specific code checking for the availability of CRC32C instructions.
-#ifndef CRC32C_CRC32C_ARM_LINUX_CHECK_H_
-#define CRC32C_CRC32C_ARM_LINUX_CHECK_H_
-
-// X86-specific code checking for the availability of SSE4.2 instructions.
+#ifndef CRC32C_CRC32C_ARM_CHECK_H_
+#define CRC32C_CRC32C_ARM_CHECK_H_
#include <cstddef>
#include <cstdint>
@@ -18,6 +16,7 @@
#if HAVE_ARM64_CRC32C
+#ifdef __linux__
#if HAVE_STRONG_GETAUXVAL
#include <sys/auxv.h>
#elif HAVE_WEAK_GETAUXVAL
@@ -27,17 +26,28 @@ extern "C" unsigned long getauxval(unsigned long type) __attribute__((weak));
#define AT_HWCAP 16
#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
+#endif // defined (__linux__)
+
+#ifdef __APPLE__
+#include <sys/types.h>
+#include <sys/sysctl.h>
+#endif // defined (__APPLE__)
namespace crc32c {
-inline bool CanUseArm64Linux() {
-#if HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
+inline bool CanUseArm64Crc32() {
+#if defined (__linux__) && (HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL)
// From 'arch/arm64/include/uapi/asm/hwcap.h' in Linux kernel source code.
constexpr unsigned long kHWCAP_PMULL = 1 << 4;
constexpr unsigned long kHWCAP_CRC32 = 1 << 7;
unsigned long hwcap = (&getauxval != nullptr) ? getauxval(AT_HWCAP) : 0;
return (hwcap & (kHWCAP_PMULL | kHWCAP_CRC32)) ==
(kHWCAP_PMULL | kHWCAP_CRC32);
+#elif defined(__APPLE__)
+ int val = 0;
+ size_t len = sizeof(val);
+ return sysctlbyname("hw.optional.armv8_crc32", &val, &len, nullptr, 0) == 0
+ && val != 0;
#else
return false;
#endif // HAVE_STRONG_GETAUXVAL || HAVE_WEAK_GETAUXVAL
@@ -47,4 +57,4 @@ inline bool CanUseArm64Linux() {
#endif // HAVE_ARM64_CRC32C
-#endif // CRC32C_CRC32C_ARM_LINUX_CHECK_H_
+#endif // CRC32C_CRC32C_ARM_CHECK_H_
diff --git a/src/crc32c/src/crc32c_benchmark.cc b/src/crc32c/src/crc32c_benchmark.cc
index c464304b3f..51194b370a 100644
--- a/src/crc32c/src/crc32c_benchmark.cc
+++ b/src/crc32c/src/crc32c_benchmark.cc
@@ -16,7 +16,7 @@
#endif // CRC32C_TESTS_BUILT_WITH_GLOG
#include "./crc32c_arm64.h"
-#include "./crc32c_arm64_linux_check.h"
+#include "./crc32c_arm64_check.h"
#include "./crc32c_internal.h"
#include "./crc32c_sse42.h"
#include "./crc32c_sse42_check.h"
@@ -58,8 +58,8 @@ BENCHMARK_REGISTER_F(CRC32CBenchmark, Portable)
#if HAVE_ARM64_CRC32C
-BENCHMARK_DEFINE_F(CRC32CBenchmark, ArmLinux)(benchmark::State& state) {
- if (!crc32c::CanUseArm64Linux()) {
+BENCHMARK_DEFINE_F(CRC32CBenchmark, ArmCRC32C)(benchmark::State& state) {
+ if (!crc32c::CanUseArm64Crc32()) {
state.SkipWithError("ARM CRC32C instructions not available or not enabled");
return;
}
@@ -69,7 +69,7 @@ BENCHMARK_DEFINE_F(CRC32CBenchmark, ArmLinux)(benchmark::State& state) {
crc = crc32c::ExtendArm64(crc, block_buffer_, block_size_);
state.SetBytesProcessed(state.iterations() * block_size_);
}
-BENCHMARK_REGISTER_F(CRC32CBenchmark, ArmLinux)
+BENCHMARK_REGISTER_F(CRC32CBenchmark, ArmCRC32C)
->RangeMultiplier(16)
->Range(256, 16777216); // Block size.
diff --git a/src/crc32c/src/crc32c_read_le.h b/src/crc32c/src/crc32c_read_le.h
index 3bd45fe3aa..673a2a0db7 100644
--- a/src/crc32c/src/crc32c_read_le.h
+++ b/src/crc32c/src/crc32c_read_le.h
@@ -32,14 +32,14 @@ inline uint32_t ReadUint32LE(const uint8_t* buffer) {
// Reads a little-endian 64-bit integer from a 64-bit-aligned buffer.
inline uint64_t ReadUint64LE(const uint8_t* buffer) {
#if BYTE_ORDER_BIG_ENDIAN
- return ((static_cast<uint32_t>(static_cast<uint8_t>(buffer[0]))) |
- (static_cast<uint32_t>(static_cast<uint8_t>(buffer[1])) << 8) |
- (static_cast<uint32_t>(static_cast<uint8_t>(buffer[2])) << 16) |
- (static_cast<uint32_t>(static_cast<uint8_t>(buffer[3])) << 24) |
- (static_cast<uint32_t>(static_cast<uint8_t>(buffer[4])) << 32) |
- (static_cast<uint32_t>(static_cast<uint8_t>(buffer[5])) << 40) |
- (static_cast<uint32_t>(static_cast<uint8_t>(buffer[6])) << 48) |
- (static_cast<uint32_t>(static_cast<uint8_t>(buffer[7])) << 56));
+ return ((static_cast<uint64_t>(static_cast<uint8_t>(buffer[0]))) |
+ (static_cast<uint64_t>(static_cast<uint8_t>(buffer[1])) << 8) |
+ (static_cast<uint64_t>(static_cast<uint8_t>(buffer[2])) << 16) |
+ (static_cast<uint64_t>(static_cast<uint8_t>(buffer[3])) << 24) |
+ (static_cast<uint64_t>(static_cast<uint8_t>(buffer[4])) << 32) |
+ (static_cast<uint64_t>(static_cast<uint8_t>(buffer[5])) << 40) |
+ (static_cast<uint64_t>(static_cast<uint8_t>(buffer[6])) << 48) |
+ (static_cast<uint64_t>(static_cast<uint8_t>(buffer[7])) << 56));
#else // !BYTE_ORDER_BIG_ENDIAN
uint64_t result;
// This should be optimized to a single instruction.
diff --git a/src/init.cpp b/src/init.cpp
index 9756a7dfb3..09be3d01fa 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -85,7 +85,6 @@
#include <zmq/zmqrpc.h>
#endif
-static bool fFeeEstimatesInitialized = false;
static const bool DEFAULT_PROXYRANDOMIZE = true;
static const bool DEFAULT_REST_ENABLE = false;
static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;
@@ -99,8 +98,6 @@ static const bool DEFAULT_STOPAFTERBLOCKIMPORT = false;
#define MIN_CORE_FILEDESCRIPTORS 150
#endif
-static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat";
-
static const char* DEFAULT_ASMAP_FILENAME="ip_asn.map";
/**
@@ -203,7 +200,7 @@ void Shutdown(NodeContext& node)
// using the other before destroying them.
if (node.peerman) UnregisterValidationInterface(node.peerman.get());
// Follow the lock order requirements:
- // * CheckForStaleTipAndEvictPeers locks cs_main before indirectly calling GetExtraOutboundCount
+ // * CheckForStaleTipAndEvictPeers locks cs_main before indirectly calling GetExtraFullOutboundCount
// which locks cs_vNodes.
// * ProcessMessage locks cs_main and g_cs_orphans before indirectly calling ForEachNode which
// locks cs_vNodes.
@@ -236,17 +233,8 @@ void Shutdown(NodeContext& node)
DumpMempool(*node.mempool);
}
- if (fFeeEstimatesInitialized)
- {
- ::feeEstimator.FlushUnconfirmed();
- fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
- CAutoFile est_fileout(fsbridge::fopen(est_path, "wb"), SER_DISK, CLIENT_VERSION);
- if (!est_fileout.IsNull())
- ::feeEstimator.Write(est_fileout);
- else
- LogPrintf("%s: Failed to write fee estimates to %s\n", __func__, est_path.string());
- fFeeEstimatesInitialized = false;
- }
+ // Drop transactions we were still watching, and record fee estimations.
+ if (node.fee_estimator) node.fee_estimator->Flush();
// FlushStateToDisk generates a ChainStateFlushed callback, which we should avoid missing
if (node.chainman) {
@@ -304,6 +292,7 @@ void Shutdown(NodeContext& node)
globalVerifyHandle.reset();
ECC_Stop();
node.mempool.reset();
+ node.fee_estimator.reset();
node.chainman = nullptr;
node.scheduler.reset();
@@ -1384,20 +1373,31 @@ bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockA
// is not yet setup and may end up being set up twice if we
// need to reindex later.
+ fListen = args.GetBoolArg("-listen", DEFAULT_LISTEN);
+ fDiscover = args.GetBoolArg("-discover", true);
+ const bool ignores_incoming_txs{args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY)};
+
assert(!node.banman);
node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", &uiInterface, args.GetArg("-bantime", DEFAULT_MISBEHAVING_BANTIME));
assert(!node.connman);
node.connman = MakeUnique<CConnman>(GetRand(std::numeric_limits<uint64_t>::max()), GetRand(std::numeric_limits<uint64_t>::max()), args.GetBoolArg("-networkactive", true));
+ assert(!node.fee_estimator);
+ // Don't initialize fee estimation with old data if we don't relay transactions,
+ // as they would never get updated.
+ if (!ignores_incoming_txs) node.fee_estimator = std::make_unique<CBlockPolicyEstimator>();
+
assert(!node.mempool);
int check_ratio = std::min<int>(std::max<int>(args.GetArg("-checkmempool", chainparams.DefaultConsistencyChecks() ? 1 : 0), 0), 1000000);
- node.mempool = MakeUnique<CTxMemPool>(&::feeEstimator, check_ratio);
+ node.mempool = std::make_unique<CTxMemPool>(node.fee_estimator.get(), check_ratio);
assert(!node.chainman);
node.chainman = &g_chainman;
ChainstateManager& chainman = *Assert(node.chainman);
- node.peerman.reset(new PeerManager(chainparams, *node.connman, node.banman.get(), *node.scheduler, chainman, *node.mempool));
+ assert(!node.peerman);
+ node.peerman = std::make_unique<PeerManager>(chainparams, *node.connman, node.banman.get(),
+ *node.scheduler, chainman, *node.mempool, ignores_incoming_txs);
RegisterValidationInterface(node.peerman.get());
// sanitize comments per BIP-0014, format user agent and check total size
@@ -1473,11 +1473,6 @@ bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockA
}
}
- // see Step 2: parameter interactions for more information about these
- fListen = args.GetBoolArg("-listen", DEFAULT_LISTEN);
- fDiscover = args.GetBoolArg("-discover", true);
- g_relay_txes = !args.GetBoolArg("-blocksonly", DEFAULT_BLOCKSONLY);
-
for (const std::string& strAddr : args.GetArgs("-externalip")) {
CService addrLocal;
if (Lookup(strAddr, addrLocal, GetListenPort(), fNameLookup) && addrLocal.IsValid())
@@ -1785,13 +1780,6 @@ bool AppInitMain(const util::Ref& context, NodeContext& node, interfaces::BlockA
return false;
}
- fs::path est_path = GetDataDir() / FEE_ESTIMATES_FILENAME;
- CAutoFile est_filein(fsbridge::fopen(est_path, "rb"), SER_DISK, CLIENT_VERSION);
- // Allowed to fail as this file IS missing on first startup.
- if (!est_filein.IsNull())
- ::feeEstimator.Read(est_filein);
- fFeeEstimatesInitialized = true;
-
// ********************************************************* Step 8: start indexers
if (args.GetBoolArg("-txindex", DEFAULT_TXINDEX)) {
g_txindex = MakeUnique<TxIndex>(nTxIndexCache, false, fReindex);
diff --git a/src/interfaces/chain.h b/src/interfaces/chain.h
index 85d09be0f3..1a49518d69 100644
--- a/src/interfaces/chain.h
+++ b/src/interfaces/chain.h
@@ -44,6 +44,10 @@ public:
FoundBlock& time(int64_t& time) { m_time = &time; return *this; }
FoundBlock& maxTime(int64_t& max_time) { m_max_time = &max_time; return *this; }
FoundBlock& mtpTime(int64_t& mtp_time) { m_mtp_time = &mtp_time; return *this; }
+ //! Return whether block is in the active (most-work) chain.
+ FoundBlock& inActiveChain(bool& in_active_chain) { m_in_active_chain = &in_active_chain; return *this; }
+ //! Return next block in the active chain if current block is in the active chain.
+ FoundBlock& nextBlock(const FoundBlock& next_block) { m_next_block = &next_block; return *this; }
//! Read block data from disk. If the block exists but doesn't have data
//! (for example due to pruning), the CBlock variable will be set to null.
FoundBlock& data(CBlock& data) { m_data = &data; return *this; }
@@ -53,6 +57,8 @@ public:
int64_t* m_time = nullptr;
int64_t* m_max_time = nullptr;
int64_t* m_mtp_time = nullptr;
+ bool* m_in_active_chain = nullptr;
+ const FoundBlock* m_next_block = nullptr;
CBlock* m_data = nullptr;
};
@@ -77,9 +83,9 @@ public:
//! wallet cache it, fee estimation being driven by node mempool, wallet
//! should be the consumer.
//!
-//! * The `guessVerificationProgress`, `getBlockHeight`, `getBlockHash`, etc
-//! methods can go away if rescan logic is moved on the node side, and wallet
-//! only register rescan request.
+//! * `guessVerificationProgress` and similar methods can go away if rescan
+//! logic moves out of the wallet, and the wallet just requests scans from the
+//! node (https://github.com/bitcoin/bitcoin/issues/11756)
class Chain
{
public:
@@ -90,11 +96,6 @@ public:
//! any blocks)
virtual Optional<int> getHeight() = 0;
- //! Get block height above genesis block. Returns 0 for genesis block,
- //! 1 for following block, and so on. Returns nullopt for a block not
- //! included in the current chain.
- virtual Optional<int> getBlockHeight(const uint256& hash) = 0;
-
//! Get block hash. Height must be valid or this function will abort.
virtual uint256 getBlockHash(int height) = 0;
@@ -102,13 +103,6 @@ public:
//! pruned), and contains transactions.
virtual bool haveBlockOnDisk(int height) = 0;
- //! Return height of the first block in the chain with timestamp equal
- //! or greater than the given time and height equal or greater than the
- //! given height, or nullopt if there is no block with a high enough
- //! timestamp and height. Also return the block hash as an optional output parameter
- //! (to avoid the cost of a second lookup in case this information is needed.)
- virtual Optional<int> findFirstBlockWithTimeAndHeight(int64_t time, int height, uint256* hash) = 0;
-
//! Get locator for the current chain tip.
virtual CBlockLocator getTipLocator() = 0;
@@ -130,11 +124,6 @@ public:
//! information.
virtual bool findFirstBlockWithTimeAndHeight(int64_t min_time, int min_height, const FoundBlock& block={}) = 0;
- //! Find next block if block is part of current chain. Also flag if
- //! there was a reorg and the specified block hash is no longer in the
- //! current chain, and optionally return block information.
- virtual bool findNextBlock(const uint256& block_hash, int block_height, const FoundBlock& next={}, bool* reorg=nullptr) = 0;
-
//! Find ancestor of block at specified height and optionally return
//! ancestor information.
virtual bool findAncestorByHeight(const uint256& block_hash, int ancestor_height, const FoundBlock& ancestor_out={}) = 0;
diff --git a/src/interfaces/node.h b/src/interfaces/node.h
index 5079be038e..36f76aeb4f 100644
--- a/src/interfaces/node.h
+++ b/src/interfaces/node.h
@@ -151,9 +151,6 @@ public:
//! Get network active.
virtual bool getNetworkActive() = 0;
- //! Estimate smart fee.
- virtual CFeeRate estimateSmartFee(int num_blocks, bool conservative, int* returned_target = nullptr) = 0;
-
//! Get dust relay fee.
virtual CFeeRate getDustRelayFee() = 0;
diff --git a/src/net.cpp b/src/net.cpp
index 9c6d7b6375..7cb91f1388 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -111,7 +111,6 @@ static const uint64_t RANDOMIZER_ID_ADDRCACHE = 0x1cf2e4ddd306dda9ULL; // SHA256
//
bool fDiscover = true;
bool fListen = true;
-bool g_relay_txes = !DEFAULT_BLOCKSONLY;
RecursiveMutex cs_mapLocalHost;
std::map<CNetAddr, LocalServiceInfo> mapLocalHost GUARDED_BY(cs_mapLocalHost);
static bool vfLimited[NET_MAX] GUARDED_BY(cs_mapLocalHost) = {};
@@ -585,6 +584,8 @@ void CNode::copyStats(CNodeStats &stats, const std::vector<bool> &m_asmap)
}
stats.fInbound = IsInboundConn();
stats.m_manual_connection = IsManualConn();
+ X(m_bip152_highbandwidth_to);
+ X(m_bip152_highbandwidth_from);
X(nStartingHeight);
{
LOCK(cs_vSend);
@@ -1826,18 +1827,32 @@ void CConnman::SetTryNewOutboundPeer(bool flag)
// Also exclude peers that haven't finished initial connection handshake yet
// (so that we don't decide we're over our desired connection limit, and then
// evict some peer that has finished the handshake)
-int CConnman::GetExtraOutboundCount()
+int CConnman::GetExtraFullOutboundCount()
{
- int nOutbound = 0;
+ int full_outbound_peers = 0;
{
LOCK(cs_vNodes);
for (const CNode* pnode : vNodes) {
- if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsOutboundOrBlockRelayConn()) {
- ++nOutbound;
+ if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsFullOutboundConn()) {
+ ++full_outbound_peers;
}
}
}
- return std::max(nOutbound - m_max_outbound_full_relay - m_max_outbound_block_relay, 0);
+ return std::max(full_outbound_peers - m_max_outbound_full_relay, 0);
+}
+
+int CConnman::GetExtraBlockRelayCount()
+{
+ int block_relay_peers = 0;
+ {
+ LOCK(cs_vNodes);
+ for (const CNode* pnode : vNodes) {
+ if (pnode->fSuccessfullyConnected && !pnode->fDisconnect && pnode->IsBlockOnlyConn()) {
+ ++block_relay_peers;
+ }
+ }
+ }
+ return std::max(block_relay_peers - m_max_outbound_block_relay, 0);
}
void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
@@ -1868,6 +1883,7 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// Minimum time before next feeler connection (in microseconds).
int64_t nNextFeeler = PoissonNextSend(nStart*1000*1000, FEELER_INTERVAL);
+ int64_t nNextExtraBlockRelay = PoissonNextSend(nStart*1000*1000, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
while (!interruptNet)
{
ProcessAddrFetch();
@@ -1940,8 +1956,9 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
// until we hit our block-relay-only peer limit.
// GetTryNewOutboundPeer() gets set when a stale tip is detected, so we
// try opening an additional OUTBOUND_FULL_RELAY connection. If none of
- // these conditions are met, check the nNextFeeler timer to decide if
- // we should open a FEELER.
+ // these conditions are met, check to see if it's time to try an extra
+ // block-relay-only peer (to confirm our tip is current, see below) or the nNextFeeler
+ // timer to decide if we should open a FEELER.
if (!m_anchors.empty() && (nOutboundBlockRelay < m_max_outbound_block_relay)) {
conn_type = ConnectionType::BLOCK_RELAY;
@@ -1952,6 +1969,30 @@ void CConnman::ThreadOpenConnections(const std::vector<std::string> connect)
conn_type = ConnectionType::BLOCK_RELAY;
} else if (GetTryNewOutboundPeer()) {
// OUTBOUND_FULL_RELAY
+ } else if (nTime > nNextExtraBlockRelay && m_start_extra_block_relay_peers) {
+ // Periodically connect to a peer (using regular outbound selection
+ // methodology from addrman) and stay connected long enough to sync
+ // headers, but not much else.
+ //
+ // Then disconnect the peer, if we haven't learned anything new.
+ //
+ // The idea is to make eclipse attacks very difficult to pull off,
+ // because every few minutes we're finding a new peer to learn headers
+ // from.
+ //
+ // This is similar to the logic for trying extra outbound (full-relay)
+ // peers, except:
+ // - we do this all the time on a poisson timer, rather than just when
+ // our tip is stale
+ // - we potentially disconnect our next-youngest block-relay-only peer, if our
+ // newest block-relay-only peer delivers a block more recently.
+ // See the eviction logic in net_processing.cpp.
+ //
+ // Because we can promote these connections to block-relay-only
+ // connections, they do not get their own ConnectionType enum
+ // (similar to how we deal with extra outbound peers).
+ nNextExtraBlockRelay = PoissonNextSend(nTime, EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL);
+ conn_type = ConnectionType::BLOCK_RELAY;
} else if (nTime > nNextFeeler) {
nNextFeeler = PoissonNextSend(nTime, FEELER_INTERVAL);
conn_type = ConnectionType::FEELER;
diff --git a/src/net.h b/src/net.h
index 21ee5e7808..41f7fa93ff 100644
--- a/src/net.h
+++ b/src/net.h
@@ -24,14 +24,15 @@
#include <sync.h>
#include <threadinterrupt.h>
#include <uint256.h>
+#include <util/check.h>
#include <atomic>
+#include <condition_variable>
#include <cstdint>
#include <deque>
#include <map>
-#include <thread>
#include <memory>
-#include <condition_variable>
+#include <thread>
class CScheduler;
class CNode;
@@ -47,6 +48,8 @@ static const bool DEFAULT_WHITELISTFORCERELAY = false;
static const int TIMEOUT_INTERVAL = 20 * 60;
/** Run the feeler connection loop once every 2 minutes or 120 seconds. **/
static const int FEELER_INTERVAL = 120;
+/** Run the extra block-relay-only connection loop once every 5 minutes. **/
+static const int EXTRA_BLOCK_RELAY_ONLY_PEER_INTERVAL = 300;
/** The maximum number of addresses from our addrman to return in response to a getaddr message. */
static constexpr size_t MAX_ADDR_TO_SEND = 1000;
/** Maximum length of incoming protocol messages (no message over 4 MB is currently acceptable). */
@@ -329,13 +332,20 @@ public:
void SetTryNewOutboundPeer(bool flag);
bool GetTryNewOutboundPeer();
+ void StartExtraBlockRelayPeers() {
+ LogPrint(BCLog::NET, "net: enabling extra block-relay-only peers\n");
+ m_start_extra_block_relay_peers = true;
+ }
+
// Return the number of outbound peers we have in excess of our target (eg,
// if we previously called SetTryNewOutboundPeer(true), and have since set
// to false, we may have extra peers that we wish to disconnect). This may
// return a value less than (num_outbound_connections - num_outbound_slots)
// in cases where some outbound connections are not yet fully connected, or
// not yet fully disconnected.
- int GetExtraOutboundCount();
+ int GetExtraFullOutboundCount();
+ // Count the number of block-relay-only peers we have over our limit.
+ int GetExtraBlockRelayCount();
bool AddNode(const std::string& node);
bool RemoveAddedNode(const std::string& node);
@@ -593,6 +603,12 @@ private:
* This takes the place of a feeler connection */
std::atomic_bool m_try_another_outbound_peer;
+ /** flag for initiating extra block-relay-only peer connections.
+ * this should only be enabled after initial chain sync has occurred,
+ * as these connections are intended to be short-lived and low-bandwidth.
+ */
+ std::atomic_bool m_start_extra_block_relay_peers{false};
+
std::atomic<int64_t> m_next_send_inv_to_incoming{0};
/**
@@ -664,7 +680,6 @@ CAddress GetLocalAddress(const CNetAddr *paddrPeer, ServiceFlags nLocalServices)
extern bool fDiscover;
extern bool fListen;
-extern bool g_relay_txes;
/** Subversion as sent to the P2P network in `version` messages */
extern std::string strSubVersion;
@@ -697,6 +712,8 @@ public:
std::string cleanSubVer;
bool fInbound;
bool m_manual_connection;
+ bool m_bip152_highbandwidth_to;
+ bool m_bip152_highbandwidth_from;
int nStartingHeight;
uint64_t nSendBytes;
mapMsgCmdSize mapSendBytesPerMsgCmd;
@@ -984,6 +1001,10 @@ protected:
public:
uint256 hashContinue;
std::atomic<int> nStartingHeight{-1};
+ // We selected peer as (compact blocks) high-bandwidth peer (BIP152)
+ std::atomic<bool> m_bip152_highbandwidth_to{false};
+ // Peer selected us as (compact blocks) high-bandwidth peer (BIP152)
+ std::atomic<bool> m_bip152_highbandwidth_from{false};
// flood relay
std::vector<CAddress> vAddrToSend;
@@ -1015,7 +1036,7 @@ public:
// Used for BIP35 mempool sending
bool fSendMempool GUARDED_BY(cs_tx_inventory){false};
// Last time a "MEMPOOL" request was serviced.
- std::atomic<std::chrono::seconds> m_last_mempool_req{std::chrono::seconds{0}};
+ std::atomic<std::chrono::seconds> m_last_mempool_req{0s};
std::chrono::microseconds nNextInvSend{0};
RecursiveMutex cs_feeFilter;
@@ -1048,7 +1069,7 @@ public:
// The pong reply we're expecting, or 0 if no pong expected.
std::atomic<uint64_t> nPingNonceSent{0};
/** When the last ping was sent, or 0 if no ping was ever sent */
- std::atomic<std::chrono::microseconds> m_ping_start{std::chrono::microseconds{0}};
+ std::atomic<std::chrono::microseconds> m_ping_start{0us};
// Last measured round-trip time.
std::atomic<int64_t> nPingUsecTime{0};
// Best measured round-trip time.
@@ -1131,6 +1152,7 @@ public:
void SetCommonVersion(int greatest_common_version)
{
+ Assume(m_greatest_common_version == INIT_PROTO_VERSION);
m_greatest_common_version = greatest_common_version;
}
int GetCommonVersion() const
diff --git a/src/net_processing.cpp b/src/net_processing.cpp
index 1b4a05f0b6..dacedef472 100644
--- a/src/net_processing.cpp
+++ b/src/net_processing.cpp
@@ -422,58 +422,6 @@ static CNodeState *State(NodeId pnode) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
return &it->second;
}
-/**
- * Data structure for an individual peer. This struct is not protected by
- * cs_main since it does not contain validation-critical data.
- *
- * Memory is owned by shared pointers and this object is destructed when
- * the refcount drops to zero.
- *
- * TODO: move most members from CNodeState to this structure.
- * TODO: move remaining application-layer data members from CNode to this structure.
- */
-struct Peer {
- /** Same id as the CNode object for this peer */
- const NodeId m_id{0};
-
- /** Protects misbehavior data members */
- Mutex m_misbehavior_mutex;
- /** Accumulated misbehavior score for this peer */
- int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
- /** Whether this peer should be disconnected and marked as discouraged (unless it has the noban permission). */
- bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
-
- /** Set of txids to reconsider once their parent transactions have been accepted **/
- std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
-
- /** Protects m_getdata_requests **/
- Mutex m_getdata_requests_mutex;
- /** Work queue of items requested by this peer **/
- std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
-
- explicit Peer(NodeId id) : m_id(id) {}
-};
-
-using PeerRef = std::shared_ptr<Peer>;
-
-/**
- * Map of all Peer objects, keyed by peer id. This map is protected
- * by the global g_peer_mutex. Once a shared pointer reference is
- * taken, the lock may be released. Individual fields are protected by
- * their own locks.
- */
-Mutex g_peer_mutex;
-static std::map<NodeId, PeerRef> g_peer_map GUARDED_BY(g_peer_mutex);
-
-/** Get a shared pointer to the Peer object.
- * May return nullptr if the Peer object can't be found. */
-static PeerRef GetPeerRef(NodeId id)
-{
- LOCK(g_peer_mutex);
- auto it = g_peer_map.find(id);
- return it != g_peer_map.end() ? it->second : nullptr;
-}
-
static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUSIVE_LOCKS_REQUIRED(cs_main)
{
nPreferredDownload -= state->fPreferredDownload;
@@ -484,32 +432,6 @@ static void UpdatePreferredDownload(const CNode& node, CNodeState* state) EXCLUS
nPreferredDownload += state->fPreferredDownload;
}
-static void PushNodeVersion(CNode& pnode, CConnman& connman, int64_t nTime)
-{
- // Note that pnode->GetLocalServices() is a reflection of the local
- // services we were offering when the CNode object was created for this
- // peer.
- ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
- uint64_t nonce = pnode.GetLocalNonce();
- int nNodeStartingHeight = pnode.GetMyStartingHeight();
- NodeId nodeid = pnode.GetId();
- CAddress addr = pnode.addr;
-
- CAddress addrYou = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ?
- addr :
- CAddress(CService(), addr.nServices);
- CAddress addrMe = CAddress(CService(), nLocalNodeServices);
-
- connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
- nonce, strSubVersion, nNodeStartingHeight, ::g_relay_txes && pnode.m_tx_relay != nullptr));
-
- if (fLogIPs) {
- LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
- } else {
- LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
- }
-}
-
// Returns a bool indicating whether we requested this block.
// Also used if a block was /not/ received and timed out or started with another peer
static bool MarkBlockAsReceived(const uint256& hash) EXCLUSIVE_LOCKS_REQUIRED(cs_main) {
@@ -635,11 +557,15 @@ static void MaybeSetPeerAsAnnouncingHeaderAndIDs(NodeId nodeid, CConnman& connma
// blocks using compact encodings.
connman.ForNode(lNodesAnnouncingHeaderAndIDs.front(), [&connman, nCMPCTBLOCKVersion](CNode* pnodeStop){
connman.PushMessage(pnodeStop, CNetMsgMaker(pnodeStop->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/false, nCMPCTBLOCKVersion));
+ // save BIP152 bandwidth state: we select peer to be low-bandwidth
+ pnodeStop->m_bip152_highbandwidth_to = false;
return true;
});
lNodesAnnouncingHeaderAndIDs.pop_front();
}
connman.PushMessage(pfrom, CNetMsgMaker(pfrom->GetCommonVersion()).Make(NetMsgType::SENDCMPCT, /*fAnnounceUsingCMPCTBLOCK=*/true, nCMPCTBLOCKVersion));
+ // save BIP152 bandwidth state: we select peer to be high-bandwidth
+ pfrom->m_bip152_highbandwidth_to = true;
lNodesAnnouncingHeaderAndIDs.push_back(pfrom->GetId());
return true;
});
@@ -760,6 +686,32 @@ static void FindNextBlocksToDownload(NodeId nodeid, unsigned int count, std::vec
} // namespace
+void PeerManager::PushNodeVersion(CNode& pnode, int64_t nTime)
+{
+ // Note that pnode->GetLocalServices() is a reflection of the local
+ // services we were offering when the CNode object was created for this
+ // peer.
+ ServiceFlags nLocalNodeServices = pnode.GetLocalServices();
+ uint64_t nonce = pnode.GetLocalNonce();
+ int nNodeStartingHeight = pnode.GetMyStartingHeight();
+ NodeId nodeid = pnode.GetId();
+ CAddress addr = pnode.addr;
+
+ CAddress addrYou = addr.IsRoutable() && !IsProxy(addr) && addr.IsAddrV1Compatible() ?
+ addr :
+ CAddress(CService(), addr.nServices);
+ CAddress addrMe = CAddress(CService(), nLocalNodeServices);
+
+ m_connman.PushMessage(&pnode, CNetMsgMaker(INIT_PROTO_VERSION).Make(NetMsgType::VERSION, PROTOCOL_VERSION, (uint64_t)nLocalNodeServices, nTime, addrYou, addrMe,
+ nonce, strSubVersion, nNodeStartingHeight, !m_ignore_incoming_txs && pnode.m_tx_relay != nullptr));
+
+ if (fLogIPs) {
+ LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, them=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), addrYou.ToString(), nodeid);
+ } else {
+ LogPrint(BCLog::NET, "send version message: version %d, blocks=%d, us=%s, peer=%d\n", PROTOCOL_VERSION, nNodeStartingHeight, addrMe.ToString(), nodeid);
+ }
+}
+
void PeerManager::AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
{
AssertLockHeld(::cs_main); // For m_txrequest
@@ -807,11 +759,11 @@ void PeerManager::InitializeNode(CNode *pnode) {
}
{
PeerRef peer = std::make_shared<Peer>(nodeid);
- LOCK(g_peer_mutex);
- g_peer_map.emplace_hint(g_peer_map.end(), nodeid, std::move(peer));
+ LOCK(m_peer_mutex);
+ m_peer_map.emplace_hint(m_peer_map.end(), nodeid, std::move(peer));
}
if (!pnode->IsInboundConn()) {
- PushNodeVersion(*pnode, m_connman, GetTime());
+ PushNodeVersion(*pnode, GetTime());
}
}
@@ -842,11 +794,9 @@ void PeerManager::FinalizeNode(const CNode& node, bool& fUpdateConnectionTime) {
LOCK(cs_main);
int misbehavior{0};
{
- PeerRef peer = GetPeerRef(nodeid);
+ PeerRef peer = RemovePeer(nodeid);
assert(peer != nullptr);
misbehavior = WITH_LOCK(peer->m_misbehavior_mutex, return peer->m_misbehavior_score);
- LOCK(g_peer_mutex);
- g_peer_map.erase(nodeid);
}
CNodeState *state = State(nodeid);
assert(state != nullptr);
@@ -887,7 +837,26 @@ void PeerManager::FinalizeNode(const CNode& node, bool& fUpdateConnectionTime) {
LogPrint(BCLog::NET, "Cleared nodestate for peer=%d\n", nodeid);
}
-bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
+PeerRef PeerManager::GetPeerRef(NodeId id) const
+{
+ LOCK(m_peer_mutex);
+ auto it = m_peer_map.find(id);
+ return it != m_peer_map.end() ? it->second : nullptr;
+}
+
+PeerRef PeerManager::RemovePeer(NodeId id)
+{
+ PeerRef ret;
+ LOCK(m_peer_mutex);
+ auto it = m_peer_map.find(id);
+ if (it != m_peer_map.end()) {
+ ret = std::move(it->second);
+ m_peer_map.erase(it);
+ }
+ return ret;
+}
+
+bool PeerManager::GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats) {
{
LOCK(cs_main);
CNodeState* state = State(nodeid);
@@ -1159,13 +1128,15 @@ static bool BlockRequestAllowed(const CBlockIndex* pindex, const Consensus::Para
}
PeerManager::PeerManager(const CChainParams& chainparams, CConnman& connman, BanMan* banman,
- CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool)
+ CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool,
+ bool ignore_incoming_txs)
: m_chainparams(chainparams),
m_connman(connman),
m_banman(banman),
m_chainman(chainman),
m_mempool(pool),
- m_stale_tip_check_time(0)
+ m_stale_tip_check_time(0),
+ m_ignore_incoming_txs(ignore_incoming_txs)
{
// Initialize global variables that cannot be constructed at startup.
recentRejects.reset(new CRollingBloomFilter(120000, 0.000001));
@@ -2284,10 +2255,8 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
if (peer == nullptr) return;
if (msg_type == NetMsgType::VERSION) {
- // Each connection can only send one version message
- if (pfrom.nVersion != 0)
- {
- Misbehaving(pfrom.GetId(), 1, "redundant version message");
+ if (pfrom.nVersion != 0) {
+ LogPrint(BCLog::NET, "redundant version message from peer=%d\n", pfrom.GetId());
return;
}
@@ -2347,9 +2316,9 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
SeenLocal(addrMe);
}
- // Be shy and don't send version until we hear
- if (pfrom.IsInboundConn())
- PushNodeVersion(pfrom, m_connman, GetAdjustedTime());
+ // Inbound peers send us their version message when they connect.
+ // We send our version message in response.
+ if (pfrom.IsInboundConn()) PushNodeVersion(pfrom, GetAdjustedTime());
// Change version
const int greatest_common_version = std::min(nVersion, PROTOCOL_VERSION);
@@ -2362,10 +2331,16 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::WTXIDRELAY));
}
- m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
-
// Signal ADDRv2 support (BIP155).
- m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
+ if (greatest_common_version >= 70016) {
+ // BIP155 defines addrv2 and sendaddrv2 for all protocol versions, but some
+ // implementations reject messages they don't know. As a courtesy, don't send
+ // it to nodes with a version before 70016, as no software is known to support
+ // BIP155 that doesn't announce at least that protocol version number.
+ m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::SENDADDRV2));
+ }
+
+ m_connman.PushMessage(&pfrom, msg_maker.Make(NetMsgType::VERACK));
pfrom.nServices = nServices;
pfrom.SetAddrLocal(addrMe);
@@ -2475,7 +2450,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
if (pfrom.nVersion == 0) {
// Must have a version message before anything else
- Misbehaving(pfrom.GetId(), 1, "non-version message before version handshake");
+ LogPrint(BCLog::NET, "non-version message before version handshake. Message \"%s\" from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
return;
}
@@ -2489,7 +2464,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
LogPrintf("New outbound peer connected: version: %d, blocks=%d, peer=%d%s (%s)\n",
pfrom.nVersion.load(), pfrom.nStartingHeight,
pfrom.GetId(), (fLogIPs ? strprintf(", peeraddr=%s", pfrom.addr.ToString()) : ""),
- pfrom.m_tx_relay == nullptr ? "block-relay" : "full-relay");
+ pfrom.IsBlockOnlyConn() ? "block-relay" : "full-relay");
}
if (pfrom.GetCommonVersion() >= SENDHEADERS_VERSION) {
@@ -2535,6 +2510,17 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
return;
}
+ if (msg_type == NetMsgType::SENDADDRV2) {
+ if (pfrom.fSuccessfullyConnected) {
+ // Disconnect peers that send SENDADDRV2 message after VERACK; this
+ // must be negotiated between VERSION and VERACK.
+ pfrom.fDisconnect = true;
+ return;
+ }
+ pfrom.m_wants_addrv2 = true;
+ return;
+ }
+
if (!pfrom.fSuccessfullyConnected) {
LogPrint(BCLog::NET, "Unsupported message \"%s\" prior to verack from peer=%d\n", SanitizeString(msg_type), pfrom.GetId());
return;
@@ -2602,11 +2588,6 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
return;
}
- if (msg_type == NetMsgType::SENDADDRV2) {
- pfrom.m_wants_addrv2 = true;
- return;
- }
-
if (msg_type == NetMsgType::SENDHEADERS) {
LOCK(cs_main);
State(pfrom.GetId())->fPreferHeaders = true;
@@ -2624,8 +2605,12 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
State(pfrom.GetId())->fProvidesHeaderAndIDs = true;
State(pfrom.GetId())->fWantsCmpctWitness = nCMPCTBLOCKVersion == 2;
}
- if (State(pfrom.GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) // ignore later version announces
+ if (State(pfrom.GetId())->fWantsCmpctWitness == (nCMPCTBLOCKVersion == 2)) { // ignore later version announces
State(pfrom.GetId())->fPreferHeaderAndIDs = fAnnounceUsingCMPCTBLOCK;
+ // save whether peer selects us as BIP152 high-bandwidth peer
+ // (receiving sendcmpct(1) signals high-bandwidth, sendcmpct(0) low-bandwidth)
+ pfrom.m_bip152_highbandwidth_from = fAnnounceUsingCMPCTBLOCK;
+ }
if (!State(pfrom.GetId())->fSupportsDesiredCmpctVersion) {
if (pfrom.GetLocalServices() & NODE_WITNESS)
State(pfrom.GetId())->fSupportsDesiredCmpctVersion = (nCMPCTBLOCKVersion == 2);
@@ -2647,7 +2632,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// We won't accept tx inv's if we're in blocks-only mode, or this is a
// block-relay-only peer
- bool fBlocksOnly = !g_relay_txes || (pfrom.m_tx_relay == nullptr);
+ bool fBlocksOnly = m_ignore_incoming_txs || (pfrom.m_tx_relay == nullptr);
// Allow peers with relay permission to send data other than blocks in blocks only mode
if (pfrom.HasPermission(PF_RELAY)) {
@@ -2924,7 +2909,7 @@ void PeerManager::ProcessMessage(CNode& pfrom, const std::string& msg_type, CDat
// Stop processing the transaction early if
// 1) We are in blocks only mode and peer has no relay permission
// 2) This peer is a block-relay-only peer
- if ((!g_relay_txes && !pfrom.HasPermission(PF_RELAY)) || (pfrom.m_tx_relay == nullptr))
+ if ((m_ignore_incoming_txs && !pfrom.HasPermission(PF_RELAY)) || (pfrom.m_tx_relay == nullptr))
{
LogPrint(BCLog::NET, "transaction sent in violation of protocol peer=%d\n", pfrom.GetId());
pfrom.fDisconnect = true;
@@ -3922,11 +3907,54 @@ void PeerManager::ConsiderEviction(CNode& pto, int64_t time_in_seconds)
void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds)
{
- // Check whether we have too many outbound peers
- int extra_peers = m_connman.GetExtraOutboundCount();
- if (extra_peers > 0) {
- // If we have more outbound peers than we target, disconnect one.
- // Pick the outbound peer that least recently announced
+ // If we have any extra block-relay-only peers, disconnect the youngest unless
+ // it's given us a block -- in which case, compare with the second-youngest, and
+ // out of those two, disconnect the peer who least recently gave us a block.
+ // The youngest block-relay-only peer would be the extra peer we connected
+ // to temporarily in order to sync our tip; see net.cpp.
+ // Note that we use higher nodeid as a measure for most recent connection.
+ if (m_connman.GetExtraBlockRelayCount() > 0) {
+ std::pair<NodeId, int64_t> youngest_peer{-1, 0}, next_youngest_peer{-1, 0};
+
+ m_connman.ForEachNode([&](CNode* pnode) {
+ if (!pnode->IsBlockOnlyConn() || pnode->fDisconnect) return;
+ if (pnode->GetId() > youngest_peer.first) {
+ next_youngest_peer = youngest_peer;
+ youngest_peer.first = pnode->GetId();
+ youngest_peer.second = pnode->nLastBlockTime;
+ }
+ });
+ NodeId to_disconnect = youngest_peer.first;
+ if (youngest_peer.second > next_youngest_peer.second) {
+ // Our newest block-relay-only peer gave us a block more recently;
+ // disconnect our second youngest.
+ to_disconnect = next_youngest_peer.first;
+ }
+ m_connman.ForNode(to_disconnect, [&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
+ AssertLockHeld(::cs_main);
+ // Make sure we're not getting a block right now, and that
+ // we've been connected long enough for this eviction to happen
+ // at all.
+ // Note that we only request blocks from a peer if we learn of a
+ // valid headers chain with at least as much work as our tip.
+ CNodeState *node_state = State(pnode->GetId());
+ if (node_state == nullptr ||
+ (time_in_seconds - pnode->nTimeConnected >= MINIMUM_CONNECT_TIME && node_state->nBlocksInFlight == 0)) {
+ pnode->fDisconnect = true;
+ LogPrint(BCLog::NET, "disconnecting extra block-relay-only peer=%d (last block received at time %d)\n", pnode->GetId(), pnode->nLastBlockTime);
+ return true;
+ } else {
+ LogPrint(BCLog::NET, "keeping block-relay-only peer=%d chosen for eviction (connect time: %d, blocks_in_flight: %d)\n",
+ pnode->GetId(), pnode->nTimeConnected, node_state->nBlocksInFlight);
+ }
+ return false;
+ });
+ }
+
+ // Check whether we have too many OUTBOUND_FULL_RELAY peers
+ if (m_connman.GetExtraFullOutboundCount() > 0) {
+ // If we have more OUTBOUND_FULL_RELAY peers than we target, disconnect one.
+ // Pick the OUTBOUND_FULL_RELAY peer that least recently announced
// us a new block, with ties broken by choosing the more recent
// connection (higher node id)
NodeId worst_peer = -1;
@@ -3935,14 +3963,13 @@ void PeerManager::EvictExtraOutboundPeers(int64_t time_in_seconds)
m_connman.ForEachNode([&](CNode* pnode) EXCLUSIVE_LOCKS_REQUIRED(::cs_main) {
AssertLockHeld(::cs_main);
- // Ignore non-outbound peers, or nodes marked for disconnect already
- if (!pnode->IsOutboundOrBlockRelayConn() || pnode->fDisconnect) return;
+ // Only consider OUTBOUND_FULL_RELAY peers that are not already
+ // marked for disconnection
+ if (!pnode->IsFullOutboundConn() || pnode->fDisconnect) return;
CNodeState *state = State(pnode->GetId());
if (state == nullptr) return; // shouldn't be possible, but just in case
// Don't evict our protected peers
if (state->m_chain_sync.m_protect) return;
- // Don't evict our block-relay-only peers.
- if (pnode->m_tx_relay == nullptr) return;
if (state->m_last_block_announcement < oldest_block_announcement || (state->m_last_block_announcement == oldest_block_announcement && pnode->GetId() > worst_peer)) {
worst_peer = pnode->GetId();
oldest_block_announcement = state->m_last_block_announcement;
@@ -3998,6 +4025,11 @@ void PeerManager::CheckForStaleTipAndEvictPeers()
}
m_stale_tip_check_time = time_in_seconds + STALE_CHECK_INTERVAL;
}
+
+ if (!m_initial_sync_finished && CanDirectFetch(m_chainparams.GetConsensus())) {
+ m_connman.StartExtraBlockRelayPeers();
+ m_initial_sync_finished = true;
+ }
}
namespace {
@@ -4074,6 +4106,15 @@ bool PeerManager::SendMessages(CNode* pto)
auto current_time = GetTime<std::chrono::microseconds>();
if (pto->RelayAddrsWithConn() && !::ChainstateActive().IsInitialBlockDownload() && pto->m_next_local_addr_send < current_time) {
+ // If we've sent before, clear the bloom filter for the peer, so that our
+ // self-announcement will actually go out.
+ // This might be unnecessary if the bloom filter has already rolled
+ // over since our last self-announcement, but there is only a small
+ // bandwidth cost that we can incur by doing this (which happens
+ // once a day on average).
+ if (pto->m_next_local_addr_send != 0us) {
+ pto->m_addr_known->reset();
+ }
AdvertiseLocal(pto);
pto->m_next_local_addr_send = PoissonNextSend(current_time, AVG_LOCAL_ADDRESS_BROADCAST_INTERVAL);
}
diff --git a/src/net_processing.h b/src/net_processing.h
index 87eee566de..12a4e9c38f 100644
--- a/src/net_processing.h
+++ b/src/net_processing.h
@@ -32,10 +32,52 @@ static const bool DEFAULT_PEERBLOCKFILTERS = false;
/** Threshold for marking a node to be discouraged, e.g. disconnected and added to the discouragement filter. */
static const int DISCOURAGEMENT_THRESHOLD{100};
+struct CNodeStateStats {
+ int m_misbehavior_score = 0;
+ int nSyncHeight = -1;
+ int nCommonHeight = -1;
+ std::vector<int> vHeightInFlight;
+};
+
+/**
+ * Data structure for an individual peer. This struct is not protected by
+ * cs_main since it does not contain validation-critical data.
+ *
+ * Memory is owned by shared pointers and this object is destructed when
+ * the refcount drops to zero.
+ *
+ * TODO: move most members from CNodeState to this structure.
+ * TODO: move remaining application-layer data members from CNode to this structure.
+ */
+struct Peer {
+ /** Same id as the CNode object for this peer */
+ const NodeId m_id{0};
+
+ /** Protects misbehavior data members */
+ Mutex m_misbehavior_mutex;
+ /** Accumulated misbehavior score for this peer */
+ int m_misbehavior_score GUARDED_BY(m_misbehavior_mutex){0};
+ /** Whether this peer should be disconnected and marked as discouraged (unless it has the noban permission). */
+ bool m_should_discourage GUARDED_BY(m_misbehavior_mutex){false};
+
+ /** Set of txids to reconsider once their parent transactions have been accepted **/
+ std::set<uint256> m_orphan_work_set GUARDED_BY(g_cs_orphans);
+
+ /** Protects m_getdata_requests **/
+ Mutex m_getdata_requests_mutex;
+ /** Work queue of items requested by this peer **/
+ std::deque<CInv> m_getdata_requests GUARDED_BY(m_getdata_requests_mutex);
+
+ explicit Peer(NodeId id) : m_id(id) {}
+};
+
+using PeerRef = std::shared_ptr<Peer>;
+
class PeerManager final : public CValidationInterface, public NetEventsInterface {
public:
PeerManager(const CChainParams& chainparams, CConnman& connman, BanMan* banman,
- CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool);
+ CScheduler& scheduler, ChainstateManager& chainman, CTxMemPool& pool,
+ bool ignore_incoming_txs);
/**
* Overridden from CValidationInterface.
@@ -94,7 +136,21 @@ public:
*/
void Misbehaving(const NodeId pnode, const int howmuch, const std::string& message);
+ /** Get statistics from node state */
+ bool GetNodeStateStats(NodeId nodeid, CNodeStateStats& stats);
+
+ /** Whether this node ignores txs received over p2p. */
+ bool IgnoresIncomingTxs() {return m_ignore_incoming_txs;};
+
private:
+ /** Get a shared pointer to the Peer object.
+ * May return an empty shared_ptr if the Peer object can't be found. */
+ PeerRef GetPeerRef(NodeId id) const;
+
+ /** Get a shared pointer to the Peer object and remove it from m_peer_map.
+ * May return an empty shared_ptr if the Peer object can't be found. */
+ PeerRef RemovePeer(NodeId id);
+
/**
* Potentially mark a node discouraged based on the contents of a BlockValidationState object
*
@@ -134,6 +190,9 @@ private:
void AddTxAnnouncement(const CNode& node, const GenTxid& gtxid, std::chrono::microseconds current_time)
EXCLUSIVE_LOCKS_REQUIRED(::cs_main);
+ /** Send a version message to a peer */
+ void PushNodeVersion(CNode& pnode, int64_t nTime);
+
const CChainParams& m_chainparams;
CConnman& m_connman;
/** Pointer to this node's banman. May be nullptr - check existence before dereferencing. */
@@ -143,17 +202,24 @@ private:
TxRequestTracker m_txrequest GUARDED_BY(::cs_main);
int64_t m_stale_tip_check_time; //!< Next time to check for stale tip
-};
-struct CNodeStateStats {
- int m_misbehavior_score = 0;
- int nSyncHeight = -1;
- int nCommonHeight = -1;
- std::vector<int> vHeightInFlight;
-};
+ //* Whether this node is running in blocks only mode */
+ const bool m_ignore_incoming_txs;
+
+ /** Whether we've completed initial sync yet, for determining when to turn
+ * on extra block-relay-only peers. */
+ bool m_initial_sync_finished{false};
-/** Get statistics from node state */
-bool GetNodeStateStats(NodeId nodeid, CNodeStateStats &stats);
+ /** Protects m_peer_map */
+ mutable Mutex m_peer_mutex;
+ /**
+ * Map of all Peer objects, keyed by peer id. This map is protected
+ * by the m_peer_mutex. Once a shared pointer reference is
+ * taken, the lock may be released. Individual fields are protected by
+ * their own locks.
+ */
+ std::map<NodeId, PeerRef> m_peer_map GUARDED_BY(m_peer_mutex);
+};
/** Relay transaction to every node */
void RelayTransaction(const uint256& txid, const uint256& wtxid, const CConnman& connman) EXCLUSIVE_LOCKS_REQUIRED(cs_main);
diff --git a/src/node/context.cpp b/src/node/context.cpp
index 49d0c37235..958221a913 100644
--- a/src/node/context.cpp
+++ b/src/node/context.cpp
@@ -8,6 +8,7 @@
#include <interfaces/chain.h>
#include <net.h>
#include <net_processing.h>
+#include <policy/fees.h>
#include <scheduler.h>
#include <txmempool.h>
diff --git a/src/node/context.h b/src/node/context.h
index 3228831ed1..9b611bf8f5 100644
--- a/src/node/context.h
+++ b/src/node/context.h
@@ -12,6 +12,7 @@
class ArgsManager;
class BanMan;
+class CBlockPolicyEstimator;
class CConnman;
class CScheduler;
class CTxMemPool;
@@ -36,6 +37,7 @@ class WalletClient;
struct NodeContext {
std::unique_ptr<CConnman> connman;
std::unique_ptr<CTxMemPool> mempool;
+ std::unique_ptr<CBlockPolicyEstimator> fee_estimator;
std::unique_ptr<PeerManager> peerman;
ChainstateManager* chainman{nullptr}; // Currently a raw pointer because the memory is not managed by this struct
std::unique_ptr<BanMan> banman;
diff --git a/src/node/interfaces.cpp b/src/node/interfaces.cpp
index a8c8be05fb..317a5c7cbe 100644
--- a/src/node/interfaces.cpp
+++ b/src/node/interfaces.cpp
@@ -121,11 +121,13 @@ public:
}
// Try to retrieve the CNodeStateStats for each node.
- TRY_LOCK(::cs_main, lockMain);
- if (lockMain) {
- for (auto& node_stats : stats) {
- std::get<1>(node_stats) =
- GetNodeStateStats(std::get<0>(node_stats).nodeid, std::get<2>(node_stats));
+ if (m_context->peerman) {
+ TRY_LOCK(::cs_main, lockMain);
+ if (lockMain) {
+ for (auto& node_stats : stats) {
+ std::get<1>(node_stats) =
+ m_context->peerman->GetNodeStateStats(std::get<0>(node_stats).nodeid, std::get<2>(node_stats));
+ }
}
}
return true;
@@ -221,15 +223,6 @@ public:
}
}
bool getNetworkActive() override { return m_context->connman && m_context->connman->GetNetworkActive(); }
- CFeeRate estimateSmartFee(int num_blocks, bool conservative, int* returned_target = nullptr) override
- {
- FeeCalculation fee_calc;
- CFeeRate result = ::feeEstimator.estimateSmartFee(num_blocks, &fee_calc, conservative);
- if (returned_target) {
- *returned_target = fee_calc.returnedTarget;
- }
- return result;
- }
CFeeRate getDustRelayFee() override { return ::dustRelayFee; }
UniValue executeRpc(const std::string& command, const UniValue& params, const std::string& uri) override
{
@@ -312,7 +305,7 @@ public:
util::Ref m_context_ref;
};
-bool FillBlock(const CBlockIndex* index, const FoundBlock& block, UniqueLock<RecursiveMutex>& lock)
+bool FillBlock(const CBlockIndex* index, const FoundBlock& block, UniqueLock<RecursiveMutex>& lock, const CChain& active)
{
if (!index) return false;
if (block.m_hash) *block.m_hash = index->GetBlockHash();
@@ -320,6 +313,8 @@ bool FillBlock(const CBlockIndex* index, const FoundBlock& block, UniqueLock<Rec
if (block.m_time) *block.m_time = index->GetBlockTime();
if (block.m_max_time) *block.m_max_time = index->GetBlockTimeMax();
if (block.m_mtp_time) *block.m_mtp_time = index->GetMedianTimePast();
+ if (block.m_in_active_chain) *block.m_in_active_chain = active[index->nHeight] == index;
+ if (block.m_next_block) FillBlock(active[index->nHeight] == index ? active[index->nHeight + 1] : nullptr, *block.m_next_block, lock, active);
if (block.m_data) {
REVERSE_LOCK(lock);
if (!ReadBlockFromDisk(*block.m_data, index, Params().GetConsensus())) block.m_data->SetNull();
@@ -422,48 +417,33 @@ public:
Optional<int> getHeight() override
{
LOCK(::cs_main);
- int height = ::ChainActive().Height();
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
+ int height = active.Height();
if (height >= 0) {
return height;
}
return nullopt;
}
- Optional<int> getBlockHeight(const uint256& hash) override
- {
- LOCK(::cs_main);
- CBlockIndex* block = LookupBlockIndex(hash);
- if (block && ::ChainActive().Contains(block)) {
- return block->nHeight;
- }
- return nullopt;
- }
uint256 getBlockHash(int height) override
{
LOCK(::cs_main);
- CBlockIndex* block = ::ChainActive()[height];
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
+ CBlockIndex* block = active[height];
assert(block);
return block->GetBlockHash();
}
bool haveBlockOnDisk(int height) override
{
LOCK(cs_main);
- CBlockIndex* block = ::ChainActive()[height];
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
+ CBlockIndex* block = active[height];
return block && ((block->nStatus & BLOCK_HAVE_DATA) != 0) && block->nTx > 0;
}
- Optional<int> findFirstBlockWithTimeAndHeight(int64_t time, int height, uint256* hash) override
- {
- LOCK(cs_main);
- CBlockIndex* block = ::ChainActive().FindEarliestAtLeast(time, height);
- if (block) {
- if (hash) *hash = block->GetBlockHash();
- return block->nHeight;
- }
- return nullopt;
- }
CBlockLocator getTipLocator() override
{
LOCK(cs_main);
- return ::ChainActive().GetLocator();
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
+ return active.GetLocator();
}
bool checkFinalTx(const CTransaction& tx) override
{
@@ -473,7 +453,8 @@ public:
Optional<int> findLocatorFork(const CBlockLocator& locator) override
{
LOCK(cs_main);
- if (CBlockIndex* fork = FindForkInGlobalIndex(::ChainActive(), locator)) {
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
+ if (CBlockIndex* fork = FindForkInGlobalIndex(active, locator)) {
return fork->nHeight;
}
return nullopt;
@@ -481,47 +462,45 @@ public:
bool findBlock(const uint256& hash, const FoundBlock& block) override
{
WAIT_LOCK(cs_main, lock);
- return FillBlock(LookupBlockIndex(hash), block, lock);
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
+ return FillBlock(LookupBlockIndex(hash), block, lock, active);
}
bool findFirstBlockWithTimeAndHeight(int64_t min_time, int min_height, const FoundBlock& block) override
{
WAIT_LOCK(cs_main, lock);
- return FillBlock(ChainActive().FindEarliestAtLeast(min_time, min_height), block, lock);
- }
- bool findNextBlock(const uint256& block_hash, int block_height, const FoundBlock& next, bool* reorg) override {
- WAIT_LOCK(cs_main, lock);
- CBlockIndex* block = ChainActive()[block_height];
- if (block && block->GetBlockHash() != block_hash) block = nullptr;
- if (reorg) *reorg = !block;
- return FillBlock(block ? ChainActive()[block_height + 1] : nullptr, next, lock);
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
+ return FillBlock(active.FindEarliestAtLeast(min_time, min_height), block, lock, active);
}
bool findAncestorByHeight(const uint256& block_hash, int ancestor_height, const FoundBlock& ancestor_out) override
{
WAIT_LOCK(cs_main, lock);
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
if (const CBlockIndex* block = LookupBlockIndex(block_hash)) {
if (const CBlockIndex* ancestor = block->GetAncestor(ancestor_height)) {
- return FillBlock(ancestor, ancestor_out, lock);
+ return FillBlock(ancestor, ancestor_out, lock, active);
}
}
- return FillBlock(nullptr, ancestor_out, lock);
+ return FillBlock(nullptr, ancestor_out, lock, active);
}
bool findAncestorByHash(const uint256& block_hash, const uint256& ancestor_hash, const FoundBlock& ancestor_out) override
{
WAIT_LOCK(cs_main, lock);
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
const CBlockIndex* block = LookupBlockIndex(block_hash);
const CBlockIndex* ancestor = LookupBlockIndex(ancestor_hash);
if (block && ancestor && block->GetAncestor(ancestor->nHeight) != ancestor) ancestor = nullptr;
- return FillBlock(ancestor, ancestor_out, lock);
+ return FillBlock(ancestor, ancestor_out, lock, active);
}
bool findCommonAncestor(const uint256& block_hash1, const uint256& block_hash2, const FoundBlock& ancestor_out, const FoundBlock& block1_out, const FoundBlock& block2_out) override
{
WAIT_LOCK(cs_main, lock);
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
const CBlockIndex* block1 = LookupBlockIndex(block_hash1);
const CBlockIndex* block2 = LookupBlockIndex(block_hash2);
const CBlockIndex* ancestor = block1 && block2 ? LastCommonAncestor(block1, block2) : nullptr;
// Using & instead of && below to avoid short circuiting and leaving
// output uninitialized.
- return FillBlock(ancestor, ancestor_out, lock) & FillBlock(block1, block1_out, lock) & FillBlock(block2, block2_out, lock);
+ return FillBlock(ancestor, ancestor_out, lock, active) & FillBlock(block1, block1_out, lock, active) & FillBlock(block2, block2_out, lock, active);
}
void findCoins(std::map<COutPoint, Coin>& coins) override { return FindCoins(m_node, coins); }
double guessVerificationProgress(const uint256& block_hash) override
@@ -601,11 +580,13 @@ public:
}
CFeeRate estimateSmartFee(int num_blocks, bool conservative, FeeCalculation* calc) override
{
- return ::feeEstimator.estimateSmartFee(num_blocks, calc, conservative);
+ if (!m_node.fee_estimator) return {};
+ return m_node.fee_estimator->estimateSmartFee(num_blocks, calc, conservative);
}
unsigned int estimateMaxBlocks() override
{
- return ::feeEstimator.HighestTargetTracked(FeeEstimateHorizon::LONG_HALFLIFE);
+ if (!m_node.fee_estimator) return 0;
+ return m_node.fee_estimator->HighestTargetTracked(FeeEstimateHorizon::LONG_HALFLIFE);
}
CFeeRate mempoolMinFee() override
{
@@ -639,7 +620,8 @@ public:
{
if (!old_tip.IsNull()) {
LOCK(::cs_main);
- if (old_tip == ::ChainActive().Tip()->GetBlockHash()) return;
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
+ if (old_tip == active.Tip()->GetBlockHash()) return;
}
SyncWithValidationInterfaceQueue();
}
diff --git a/src/policy/fees.cpp b/src/policy/fees.cpp
index 0f31093dbb..cfa4cf8421 100644
--- a/src/policy/fees.cpp
+++ b/src/policy/fees.cpp
@@ -6,10 +6,14 @@
#include <policy/fees.h>
#include <clientversion.h>
+#include <fs.h>
+#include <logging.h>
#include <streams.h>
#include <txmempool.h>
#include <util/system.h>
+static const char* FEE_ESTIMATES_FILENAME="fee_estimates.dat";
+
static constexpr double INF_FEERATE = 1e99;
std::string StringForFeeEstimateHorizon(FeeEstimateHorizon horizon) {
@@ -489,6 +493,7 @@ CBlockPolicyEstimator::CBlockPolicyEstimator()
{
static_assert(MIN_BUCKET_FEERATE > 0, "Min feerate must be nonzero");
size_t bucketIndex = 0;
+
for (double bucketBoundary = MIN_BUCKET_FEERATE; bucketBoundary <= MAX_BUCKET_FEERATE; bucketBoundary *= FEE_SPACING, bucketIndex++) {
buckets.push_back(bucketBoundary);
bucketMap[bucketBoundary] = bucketIndex;
@@ -500,6 +505,13 @@ CBlockPolicyEstimator::CBlockPolicyEstimator()
feeStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE));
shortStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE));
longStats = std::unique_ptr<TxConfirmStats>(new TxConfirmStats(buckets, bucketMap, LONG_BLOCK_PERIODS, LONG_DECAY, LONG_SCALE));
+
+ // If the fee estimation file is present, read recorded estimations
+ fs::path est_filepath = GetDataDir() / FEE_ESTIMATES_FILENAME;
+ CAutoFile est_file(fsbridge::fopen(est_filepath, "rb"), SER_DISK, CLIENT_VERSION);
+ if (est_file.IsNull() || !Read(est_file)) {
+ LogPrintf("Failed to read fee estimates from %s. Continue anyway.\n", est_filepath.string());
+ }
}
CBlockPolicyEstimator::~CBlockPolicyEstimator()
@@ -856,6 +868,15 @@ CFeeRate CBlockPolicyEstimator::estimateSmartFee(int confTarget, FeeCalculation
return CFeeRate(llround(median));
}
+void CBlockPolicyEstimator::Flush() {
+ FlushUnconfirmed();
+
+ fs::path est_filepath = GetDataDir() / FEE_ESTIMATES_FILENAME;
+ CAutoFile est_file(fsbridge::fopen(est_filepath, "wb"), SER_DISK, CLIENT_VERSION);
+ if (est_file.IsNull() || !Write(est_file)) {
+ LogPrintf("Failed to write fee estimates to %s. Continue anyway.\n", est_filepath.string());
+ }
+}
bool CBlockPolicyEstimator::Write(CAutoFile& fileout) const
{
@@ -888,8 +909,9 @@ bool CBlockPolicyEstimator::Read(CAutoFile& filein)
LOCK(m_cs_fee_estimator);
int nVersionRequired, nVersionThatWrote;
filein >> nVersionRequired >> nVersionThatWrote;
- if (nVersionRequired > CLIENT_VERSION)
- return error("CBlockPolicyEstimator::Read(): up-version (%d) fee estimate file", nVersionRequired);
+ if (nVersionRequired > CLIENT_VERSION) {
+ throw std::runtime_error(strprintf("up-version (%d) fee estimate file", nVersionRequired));
+ }
// Read fee estimates file into temporary variables so existing data
// structures aren't corrupted if there is an exception.
@@ -907,8 +929,9 @@ bool CBlockPolicyEstimator::Read(CAutoFile& filein)
std::vector<double> fileBuckets;
filein >> fileBuckets;
size_t numBuckets = fileBuckets.size();
- if (numBuckets <= 1 || numBuckets > 1000)
+ if (numBuckets <= 1 || numBuckets > 1000) {
throw std::runtime_error("Corrupt estimates file. Must have between 2 and 1000 feerate buckets");
+ }
std::unique_ptr<TxConfirmStats> fileFeeStats(new TxConfirmStats(buckets, bucketMap, MED_BLOCK_PERIODS, MED_DECAY, MED_SCALE));
std::unique_ptr<TxConfirmStats> fileShortStats(new TxConfirmStats(buckets, bucketMap, SHORT_BLOCK_PERIODS, SHORT_DECAY, SHORT_SCALE));
diff --git a/src/policy/fees.h b/src/policy/fees.h
index 8ea8816dc3..dd9f530c99 100644
--- a/src/policy/fees.h
+++ b/src/policy/fees.h
@@ -215,6 +215,9 @@ public:
/** Calculation of highest target that estimates are tracked for */
unsigned int HighestTargetTracked(FeeEstimateHorizon horizon) const;
+ /** Drop still unconfirmed transactions and record current estimations, if the fee estimation file is present. */
+ void Flush();
+
private:
mutable RecursiveMutex m_cs_fee_estimator;
diff --git a/src/protocol.cpp b/src/protocol.cpp
index dc8f795a0c..d7b73dfa40 100644
--- a/src/protocol.cpp
+++ b/src/protocol.cpp
@@ -203,7 +203,6 @@ static std::string serviceFlagToStr(size_t bit)
switch ((ServiceFlags)service_flag) {
case NODE_NONE: abort(); // impossible
case NODE_NETWORK: return "NETWORK";
- case NODE_GETUTXO: return "GETUTXO";
case NODE_BLOOM: return "BLOOM";
case NODE_WITNESS: return "WITNESS";
case NODE_COMPACT_FILTERS: return "COMPACT_FILTERS";
diff --git a/src/protocol.h b/src/protocol.h
index 309fac621c..8af34f58bd 100644
--- a/src/protocol.h
+++ b/src/protocol.h
@@ -273,10 +273,6 @@ enum ServiceFlags : uint64_t {
// NODE_NETWORK means that the node is capable of serving the complete block chain. It is currently
// set by all Bitcoin Core non pruned nodes, and is unset by SPV clients or other light clients.
NODE_NETWORK = (1 << 0),
- // NODE_GETUTXO means the node is capable of responding to the getutxo protocol request.
- // Bitcoin Core does not support this but a patch set called Bitcoin XT does.
- // See BIP 64 for details on how this is implemented.
- NODE_GETUTXO = (1 << 1),
// NODE_BLOOM means the node is capable and willing to handle bloom-filtered connections.
// Bitcoin Core nodes used to support this by default, without advertising this bit,
// but no longer do as of protocol version 70011 (= NO_BLOOM_VERSION)
diff --git a/src/qt/test/rpcnestedtests.cpp b/src/qt/test/rpcnestedtests.cpp
index ea7b5f0c9e..a5c9138798 100644
--- a/src/qt/test/rpcnestedtests.cpp
+++ b/src/qt/test/rpcnestedtests.cpp
@@ -43,41 +43,41 @@ void RPCNestedTests::rpcNestedTests()
tableRPC.appendCommand("rpcNestedTest", &vRPCCommands[0]);
TestingSetup test;
+ m_node.setContext(&test.m_node);
if (RPCIsInWarmup(nullptr)) SetRPCWarmupFinished();
std::string result;
std::string result2;
std::string filtered;
- interfaces::Node* node = &m_node;
- RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo()[chain]", &filtered); //simple result filtering with path
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo()[chain]", &filtered); //simple result filtering with path
QVERIFY(result=="main");
QVERIFY(filtered == "getblockchaininfo()[chain]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "getblock(getbestblockhash())"); //simple 2 level nesting
- RPCConsole::RPCExecuteCommandLine(*node, result, "getblock(getblock(getbestblockhash())[hash], true)");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "getblock(getbestblockhash())"); //simple 2 level nesting
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "getblock(getblock(getbestblockhash())[hash], true)");
- RPCConsole::RPCExecuteCommandLine(*node, result, "getblock( getblock( getblock(getbestblockhash())[hash] )[hash], true)"); //4 level nesting with whitespace, filtering path and boolean parameter
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "getblock( getblock( getblock(getbestblockhash())[hash] )[hash], true)"); //4 level nesting with whitespace, filtering path and boolean parameter
- RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo");
QVERIFY(result.substr(0,1) == "{");
- RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo()");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo()");
QVERIFY(result.substr(0,1) == "{");
- RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo "); //whitespace at the end will be tolerated
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo "); //whitespace at the end will be tolerated
QVERIFY(result.substr(0,1) == "{");
- (RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo()[\"chain\"]")); //Quote path identifier are allowed, but look after a child containing the quotes in the key
+ (RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo()[\"chain\"]")); //Quote path identifier are allowed, but look after a child containing the quotes in the key
QVERIFY(result == "null");
- (RPCConsole::RPCExecuteCommandLine(*node, result, "createrawtransaction [] {} 0")); //parameter not in brackets are allowed
- (RPCConsole::RPCExecuteCommandLine(*node, result2, "createrawtransaction([],{},0)")); //parameter in brackets are allowed
+ (RPCConsole::RPCExecuteCommandLine(m_node, result, "createrawtransaction [] {} 0")); //parameter not in brackets are allowed
+ (RPCConsole::RPCExecuteCommandLine(m_node, result2, "createrawtransaction([],{},0)")); //parameter in brackets are allowed
QVERIFY(result == result2);
- (RPCConsole::RPCExecuteCommandLine(*node, result2, "createrawtransaction( [], {} , 0 )")); //whitespace between parameters is allowed
+ (RPCConsole::RPCExecuteCommandLine(m_node, result2, "createrawtransaction( [], {} , 0 )")); //whitespace between parameters is allowed
QVERIFY(result == result2);
- RPCConsole::RPCExecuteCommandLine(*node, result, "getblock(getbestblockhash())[tx][0]", &filtered);
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "getblock(getbestblockhash())[tx][0]", &filtered);
QVERIFY(result == "4a5e1e4baab89f3a32518a88c31bc87f618f76673e2cc77ab2127b7afdeda33b");
QVERIFY(filtered == "getblock(getbestblockhash())[tx][0]");
@@ -102,35 +102,35 @@ void RPCNestedTests::rpcNestedTests()
RPCConsole::RPCParseCommandLine(nullptr, result, "help(importprivkey(abc), walletpassphrase(def))", false, &filtered);
QVERIFY(filtered == "help(importprivkey(…), walletpassphrase(…))");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest");
QVERIFY(result == "[]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest ''");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest ''");
QVERIFY(result == "[\"\"]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest \"\"");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest \"\"");
QVERIFY(result == "[\"\"]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest '' abc");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest '' abc");
QVERIFY(result == "[\"\",\"abc\"]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest abc '' abc");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest abc '' abc");
QVERIFY(result == "[\"abc\",\"\",\"abc\"]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest abc abc");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest abc abc");
QVERIFY(result == "[\"abc\",\"abc\"]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest abc\t\tabc");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest abc\t\tabc");
QVERIFY(result == "[\"abc\",\"abc\"]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest(abc )");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest(abc )");
QVERIFY(result == "[\"abc\"]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest( abc )");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest( abc )");
QVERIFY(result == "[\"abc\"]");
- RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest( abc , cba )");
+ RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest( abc , cba )");
QVERIFY(result == "[\"abc\",\"cba\"]");
// do the QVERIFY_EXCEPTION_THROWN checks only with Qt5.3 and higher (QVERIFY_EXCEPTION_THROWN was introduced in Qt5.3)
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo() .\n"), std::runtime_error); //invalid syntax
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo() getblockchaininfo()"), std::runtime_error); //invalid syntax
- (RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo(")); //tolerate non closing brackets if we have no arguments
- (RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo()()()")); //tolerate non command brackts
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(*node, result, "getblockchaininfo(True)"), UniValue); //invalid argument
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(*node, result, "a(getblockchaininfo(True))"), UniValue); //method not found
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest abc,,abc"), std::runtime_error); //don't tollerate empty arguments when using ,
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest(abc,,abc)"), std::runtime_error); //don't tollerate empty arguments when using ,
- QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(*node, result, "rpcNestedTest(abc,,)"), std::runtime_error); //don't tollerate empty arguments when using ,
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo() .\n"), std::runtime_error); //invalid syntax
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo() getblockchaininfo()"), std::runtime_error); //invalid syntax
+ (RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo(")); //tolerate non closing brackets if we have no arguments
+ (RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo()()()")); //tolerate non command brackts
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "getblockchaininfo(True)"), UniValue); //invalid argument
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "a(getblockchaininfo(True))"), UniValue); //method not found
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest abc,,abc"), std::runtime_error); //don't tollerate empty arguments when using ,
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest(abc,,abc)"), std::runtime_error); //don't tollerate empty arguments when using ,
+ QVERIFY_EXCEPTION_THROWN(RPCConsole::RPCExecuteCommandLine(m_node, result, "rpcNestedTest(abc,,)"), std::runtime_error); //don't tollerate empty arguments when using ,
}
diff --git a/src/randomenv.cpp b/src/randomenv.cpp
index 5e07c3db40..9248db1539 100644
--- a/src/randomenv.cpp
+++ b/src/randomenv.cpp
@@ -69,7 +69,7 @@ void RandAddSeedPerfmon(CSHA512& hasher)
// This can take up to 2 seconds, so only do it every 10 minutes.
// Initialize last_perfmon to 0 seconds, we don't skip the first call.
- static std::atomic<std::chrono::seconds> last_perfmon{std::chrono::seconds{0}};
+ static std::atomic<std::chrono::seconds> last_perfmon{0s};
auto last_time = last_perfmon.load();
auto current_time = GetTime<std::chrono::seconds>();
if (current_time < last_time + std::chrono::minutes{10}) return;
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 392073d047..57327e6004 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -17,6 +17,7 @@
#include <node/coinstats.h>
#include <node/context.h>
#include <node/utxo_snapshot.h>
+#include <policy/fees.h>
#include <policy/feerate.h>
#include <policy/policy.h>
#include <policy/rbf.h>
@@ -81,6 +82,15 @@ ChainstateManager& EnsureChainman(const util::Ref& context)
return *node.chainman;
}
+CBlockPolicyEstimator& EnsureFeeEstimator(const util::Ref& context)
+{
+ NodeContext& node = EnsureNodeContext(context);
+ if (!node.fee_estimator) {
+ throw JSONRPCError(RPC_INTERNAL_ERROR, "Fee estimation disabled");
+ }
+ return *node.fee_estimator;
+}
+
/* Calculate the difficulty for a given block index.
*/
double GetDifficulty(const CBlockIndex* blockindex)
diff --git a/src/rpc/blockchain.h b/src/rpc/blockchain.h
index 5b362bf211..91766aacc9 100644
--- a/src/rpc/blockchain.h
+++ b/src/rpc/blockchain.h
@@ -15,6 +15,7 @@ extern RecursiveMutex cs_main;
class CBlock;
class CBlockIndex;
+class CBlockPolicyEstimator;
class CTxMemPool;
class ChainstateManager;
class UniValue;
@@ -54,5 +55,6 @@ void CalculatePercentilesByWeight(CAmount result[NUM_GETBLOCKSTATS_PERCENTILES],
NodeContext& EnsureNodeContext(const util::Ref& context);
CTxMemPool& EnsureMemPool(const util::Ref& context);
ChainstateManager& EnsureChainman(const util::Ref& context);
+CBlockPolicyEstimator& EnsureFeeEstimator(const util::Ref& context);
#endif
diff --git a/src/rpc/mining.cpp b/src/rpc/mining.cpp
index 7d45ad9434..965b278bfa 100644
--- a/src/rpc/mining.cpp
+++ b/src/rpc/mining.cpp
@@ -1022,21 +1022,19 @@ static RPCHelpMan submitheader()
static RPCHelpMan estimatesmartfee()
{
return RPCHelpMan{"estimatesmartfee",
- "\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n"
- "confirmation within conf_target blocks if possible and return the number of blocks\n"
- "for which the estimate is valid. Uses virtual transaction size as defined\n"
- "in BIP 141 (witness data is discounted).\n",
- {
- {"conf_target", RPCArg::Type::NUM, RPCArg::Optional::NO, "Confirmation target in blocks (1 - 1008)"},
- {"estimate_mode", RPCArg::Type::STR, /* default */ "CONSERVATIVE", "The fee estimate mode.\n"
+ "\nEstimates the approximate fee per kilobyte needed for a transaction to begin\n"
+ "confirmation within conf_target blocks if possible and return the number of blocks\n"
+ "for which the estimate is valid. Uses virtual transaction size as defined\n"
+ "in BIP 141 (witness data is discounted).\n",
+ {
+ {"conf_target", RPCArg::Type::NUM, RPCArg::Optional::NO, "Confirmation target in blocks (1 - 1008)"},
+ {"estimate_mode", RPCArg::Type::STR, /* default */ "conservative", "The fee estimate mode.\n"
" Whether to return a more conservative estimate which also satisfies\n"
" a longer history. A conservative estimate potentially returns a\n"
" higher feerate and is more likely to be sufficient for the desired\n"
" target, but is not as responsive to short term drops in the\n"
- " prevailing fee market. Must be one of:\n"
- " \"UNSET\"\n"
- " \"ECONOMICAL\"\n"
- " \"CONSERVATIVE\""},
+ " prevailing fee market. Must be one of (case insensitive):\n"
+ "\"" + FeeModes("\"\n\"") + "\""},
},
RPCResult{
RPCResult::Type::OBJ, "", "",
@@ -1059,7 +1057,10 @@ static RPCHelpMan estimatesmartfee()
{
RPCTypeCheck(request.params, {UniValue::VNUM, UniValue::VSTR});
RPCTypeCheckArgument(request.params[0], UniValue::VNUM);
- unsigned int max_target = ::feeEstimator.HighestTargetTracked(FeeEstimateHorizon::LONG_HALFLIFE);
+
+ CBlockPolicyEstimator& fee_estimator = EnsureFeeEstimator(request.context);
+
+ unsigned int max_target = fee_estimator.HighestTargetTracked(FeeEstimateHorizon::LONG_HALFLIFE);
unsigned int conf_target = ParseConfirmTarget(request.params[0], max_target);
bool conservative = true;
if (!request.params[1].isNull()) {
@@ -1073,7 +1074,7 @@ static RPCHelpMan estimatesmartfee()
UniValue result(UniValue::VOBJ);
UniValue errors(UniValue::VARR);
FeeCalculation feeCalc;
- CFeeRate feeRate = ::feeEstimator.estimateSmartFee(conf_target, &feeCalc, conservative);
+ CFeeRate feeRate = fee_estimator.estimateSmartFee(conf_target, &feeCalc, conservative);
if (feeRate != CFeeRate(0)) {
result.pushKV("feerate", ValueFromAmount(feeRate.GetFeePerK()));
} else {
@@ -1144,7 +1145,10 @@ static RPCHelpMan estimaterawfee()
{
RPCTypeCheck(request.params, {UniValue::VNUM, UniValue::VNUM}, true);
RPCTypeCheckArgument(request.params[0], UniValue::VNUM);
- unsigned int max_target = ::feeEstimator.HighestTargetTracked(FeeEstimateHorizon::LONG_HALFLIFE);
+
+ CBlockPolicyEstimator& fee_estimator = EnsureFeeEstimator(request.context);
+
+ unsigned int max_target = fee_estimator.HighestTargetTracked(FeeEstimateHorizon::LONG_HALFLIFE);
unsigned int conf_target = ParseConfirmTarget(request.params[0], max_target);
double threshold = 0.95;
if (!request.params[1].isNull()) {
@@ -1161,9 +1165,9 @@ static RPCHelpMan estimaterawfee()
EstimationResult buckets;
// Only output results for horizons which track the target
- if (conf_target > ::feeEstimator.HighestTargetTracked(horizon)) continue;
+ if (conf_target > fee_estimator.HighestTargetTracked(horizon)) continue;
- feeRate = ::feeEstimator.estimateRawFee(conf_target, threshold, horizon, &buckets);
+ feeRate = fee_estimator.estimateRawFee(conf_target, threshold, horizon, &buckets);
UniValue horizon_result(UniValue::VOBJ);
UniValue errors(UniValue::VARR);
UniValue passbucket(UniValue::VOBJ);
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index e72ef24d12..973d730218 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -126,6 +126,8 @@ static RPCHelpMan getpeerinfo()
{RPCResult::Type::NUM, "version", "The peer version, such as 70001"},
{RPCResult::Type::STR, "subver", "The string version"},
{RPCResult::Type::BOOL, "inbound", "Inbound (true) or Outbound (false)"},
+ {RPCResult::Type::BOOL, "bip152_hb_to", "Whether we selected peer as (compact blocks) high-bandwidth peer"},
+ {RPCResult::Type::BOOL, "bip152_hb_from", "Whether peer selected us as (compact blocks) high-bandwidth peer"},
{RPCResult::Type::BOOL, "addnode", "Whether connection was due to addnode/-connect or if it was an automatic/inbound connection\n"
"(DEPRECATED, returned only if the config option -deprecatedrpc=getpeerinfo_addnode is passed)"},
{RPCResult::Type::STR, "connection_type", "Type of connection: \n" + Join(CONNECTION_TYPE_DOC, ",\n") + ".\n"
@@ -165,8 +167,9 @@ static RPCHelpMan getpeerinfo()
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
NodeContext& node = EnsureNodeContext(request.context);
- if(!node.connman)
+ if(!node.connman || !node.peerman) {
throw JSONRPCError(RPC_CLIENT_P2P_DISABLED, "Error: Peer-to-peer functionality missing or disabled");
+ }
std::vector<CNodeStats> vstats;
node.connman->GetNodeStats(vstats);
@@ -176,7 +179,7 @@ static RPCHelpMan getpeerinfo()
for (const CNodeStats& stats : vstats) {
UniValue obj(UniValue::VOBJ);
CNodeStateStats statestats;
- bool fStateStats = GetNodeStateStats(stats.nodeid, statestats);
+ bool fStateStats = node.peerman->GetNodeStateStats(stats.nodeid, statestats);
obj.pushKV("id", stats.nodeid);
obj.pushKV("addr", stats.addrName);
if (stats.addrBind.IsValid()) {
@@ -215,6 +218,8 @@ static RPCHelpMan getpeerinfo()
// their ver message.
obj.pushKV("subver", stats.cleanSubVer);
obj.pushKV("inbound", stats.fInbound);
+ obj.pushKV("bip152_hb_to", stats.m_bip152_highbandwidth_to);
+ obj.pushKV("bip152_hb_from", stats.m_bip152_highbandwidth_from);
if (IsDeprecatedRPCEnabled("getpeerinfo_addnode")) {
// addnode is deprecated in v0.21 for removal in v0.22
obj.pushKV("addnode", stats.m_manual_connection);
@@ -577,7 +582,9 @@ static RPCHelpMan getnetworkinfo()
obj.pushKV("localservices", strprintf("%016x", services));
obj.pushKV("localservicesnames", GetServicesNames(services));
}
- obj.pushKV("localrelay", g_relay_txes);
+ if (node.peerman) {
+ obj.pushKV("localrelay", !node.peerman->IgnoresIncomingTxs());
+ }
obj.pushKV("timeoffset", GetTimeOffset());
if (node.connman) {
obj.pushKV("networkactive", node.connman->GetNetworkActive());
diff --git a/src/sync.cpp b/src/sync.cpp
index f07916041a..acfbe8fe29 100644
--- a/src/sync.cpp
+++ b/src/sync.cpp
@@ -228,20 +228,28 @@ template void EnterCritical(const char*, const char*, int, boost::mutex*, bool);
void CheckLastCritical(void* cs, std::string& lockname, const char* guardname, const char* file, int line)
{
- {
- LockData& lockdata = GetLockData();
- std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
-
- const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
- if (!lock_stack.empty()) {
- const auto& lastlock = lock_stack.back();
- if (lastlock.first == cs) {
- lockname = lastlock.second.Name();
- return;
- }
+ LockData& lockdata = GetLockData();
+ std::lock_guard<std::mutex> lock(lockdata.dd_mutex);
+
+ const LockStack& lock_stack = lockdata.m_lock_stacks[std::this_thread::get_id()];
+ if (!lock_stack.empty()) {
+ const auto& lastlock = lock_stack.back();
+ if (lastlock.first == cs) {
+ lockname = lastlock.second.Name();
+ return;
}
}
- throw std::system_error(EPERM, std::generic_category(), strprintf("%s:%s %s was not most recent critical section locked", file, line, guardname));
+
+ LogPrintf("INCONSISTENT LOCK ORDER DETECTED\n");
+ LogPrintf("Current lock order (least recent first) is:\n");
+ for (const LockStackItem& i : lock_stack) {
+ LogPrintf(" %s\n", i.second.ToString());
+ }
+ if (g_debug_lockorder_abort) {
+ tfm::format(std::cerr, "%s:%s %s was not most recent critical section locked, details in debug log.\n", file, line, guardname);
+ abort();
+ }
+ throw std::logic_error(strprintf("%s was not most recent critical section locked", guardname));
}
void LeaveCritical()
diff --git a/src/sync.h b/src/sync.h
index 0948083c7f..749bf5575c 100644
--- a/src/sync.h
+++ b/src/sync.h
@@ -242,10 +242,12 @@ using DebugLock = UniqueLock<typename std::remove_reference<typename std::remove
(cs).lock(); \
}
-#define LEAVE_CRITICAL_SECTION(cs) \
- { \
- (cs).unlock(); \
- LeaveCritical(); \
+#define LEAVE_CRITICAL_SECTION(cs) \
+ { \
+ std::string lockname; \
+ CheckLastCritical((void*)(&cs), lockname, #cs, __FILE__, __LINE__); \
+ (cs).unlock(); \
+ LeaveCritical(); \
}
//! Run code while locking a mutex.
diff --git a/src/test/denialofservice_tests.cpp b/src/test/denialofservice_tests.cpp
index c399da900f..8f6fdd04d0 100644
--- a/src/test/denialofservice_tests.cpp
+++ b/src/test/denialofservice_tests.cpp
@@ -80,7 +80,8 @@ BOOST_AUTO_TEST_CASE(outbound_slow_chain_eviction)
{
const CChainParams& chainparams = Params();
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerManager>(chainparams, *connman, nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ auto peerLogic = std::make_unique<PeerManager>(chainparams, *connman, nullptr, *m_node.scheduler,
+ *m_node.chainman, *m_node.mempool, false);
// Mock an outbound peer
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -149,7 +150,8 @@ BOOST_AUTO_TEST_CASE(stale_tip_peer_management)
{
const CChainParams& chainparams = Params();
auto connman = MakeUnique<CConnmanTest>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerManager>(chainparams, *connman, nullptr, *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ auto peerLogic = std::make_unique<PeerManager>(chainparams, *connman, nullptr, *m_node.scheduler,
+ *m_node.chainman, *m_node.mempool, false);
constexpr int max_outbound_full_relay = MAX_OUTBOUND_FULL_RELAY_CONNECTIONS;
CConnman::Options options;
@@ -222,7 +224,8 @@ BOOST_AUTO_TEST_CASE(peer_discouragement)
const CChainParams& chainparams = Params();
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerManager>(chainparams, *connman, banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ auto peerLogic = std::make_unique<PeerManager>(chainparams, *connman, banman.get(), *m_node.scheduler,
+ *m_node.chainman, *m_node.mempool, false);
banman->ClearBanned();
CAddress addr1(ip(0xa0b0c001), NODE_NONE);
@@ -268,7 +271,8 @@ BOOST_AUTO_TEST_CASE(DoS_bantime)
const CChainParams& chainparams = Params();
auto banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
auto connman = MakeUnique<CConnman>(0x1337, 0x1337);
- auto peerLogic = MakeUnique<PeerManager>(chainparams, *connman, banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
+ auto peerLogic = std::make_unique<PeerManager>(chainparams, *connman, banman.get(), *m_node.scheduler,
+ *m_node.chainman, *m_node.mempool, false);
banman->ClearBanned();
int64_t nStartTime = GetTime();
diff --git a/src/test/fuzz/connman.cpp b/src/test/fuzz/connman.cpp
index 6521c3f3b2..bb97f58cf2 100644
--- a/src/test/fuzz/connman.cpp
+++ b/src/test/fuzz/connman.cpp
@@ -145,7 +145,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
}
(void)connman.GetAddedNodeInfo();
(void)connman.GetBestHeight();
- (void)connman.GetExtraOutboundCount();
+ (void)connman.GetExtraFullOutboundCount();
(void)connman.GetLocalServices();
(void)connman.GetMaxOutboundTarget();
(void)connman.GetMaxOutboundTimeframe();
diff --git a/src/test/fuzz/net.cpp b/src/test/fuzz/net.cpp
index a0c8b7aac5..81e36b3f06 100644
--- a/src/test/fuzz/net.cpp
+++ b/src/test/fuzz/net.cpp
@@ -48,8 +48,9 @@ void test_one_input(const std::vector<uint8_t>& buffer)
fuzzed_data_provider.ConsumeRandomLengthString(32),
fuzzed_data_provider.PickValueInArray({ConnectionType::INBOUND, ConnectionType::OUTBOUND_FULL_RELAY, ConnectionType::MANUAL, ConnectionType::FEELER, ConnectionType::BLOCK_RELAY, ConnectionType::ADDR_FETCH}),
fuzzed_data_provider.ConsumeBool()};
+ node.SetCommonVersion(fuzzed_data_provider.ConsumeIntegral<int>());
while (fuzzed_data_provider.ConsumeBool()) {
- switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 11)) {
+ switch (fuzzed_data_provider.ConsumeIntegralInRange<int>(0, 10)) {
case 0: {
node.CloseSocketDisconnect();
break;
@@ -59,10 +60,6 @@ void test_one_input(const std::vector<uint8_t>& buffer)
break;
}
case 2: {
- node.SetCommonVersion(fuzzed_data_provider.ConsumeIntegral<int>());
- break;
- }
- case 3: {
const std::vector<bool> asmap = ConsumeRandomLengthBitVector(fuzzed_data_provider);
if (!SanityCheckASMap(asmap)) {
break;
@@ -71,18 +68,18 @@ void test_one_input(const std::vector<uint8_t>& buffer)
node.copyStats(stats, asmap);
break;
}
- case 4: {
+ case 3: {
const CNode* add_ref_node = node.AddRef();
assert(add_ref_node == &node);
break;
}
- case 5: {
+ case 4: {
if (node.GetRefCount() > 0) {
node.Release();
}
break;
}
- case 6: {
+ case 5: {
if (node.m_addr_known == nullptr) {
break;
}
@@ -93,7 +90,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
node.AddAddressKnown(*addr_opt);
break;
}
- case 7: {
+ case 6: {
if (node.m_addr_known == nullptr) {
break;
}
@@ -105,7 +102,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
node.PushAddress(*addr_opt, fast_random_context);
break;
}
- case 8: {
+ case 7: {
const std::optional<CInv> inv_opt = ConsumeDeserializable<CInv>(fuzzed_data_provider);
if (!inv_opt) {
break;
@@ -113,11 +110,11 @@ void test_one_input(const std::vector<uint8_t>& buffer)
node.AddKnownTx(inv_opt->hash);
break;
}
- case 9: {
+ case 8: {
node.PushTxInventory(ConsumeUInt256(fuzzed_data_provider));
break;
}
- case 10: {
+ case 9: {
const std::optional<CService> service_opt = ConsumeDeserializable<CService>(fuzzed_data_provider);
if (!service_opt) {
break;
@@ -125,7 +122,7 @@ void test_one_input(const std::vector<uint8_t>& buffer)
node.SetAddrLocal(*service_opt);
break;
}
- case 11: {
+ case 10: {
const std::vector<uint8_t> b = ConsumeRandomLengthByteVector(fuzzed_data_provider);
bool complete;
node.ReceiveMsgBytes(b, complete);
diff --git a/src/test/interfaces_tests.cpp b/src/test/interfaces_tests.cpp
index b0d4de89f3..73463b071e 100644
--- a/src/test/interfaces_tests.cpp
+++ b/src/test/interfaces_tests.cpp
@@ -17,8 +17,8 @@ BOOST_FIXTURE_TEST_SUITE(interfaces_tests, TestChain100Setup)
BOOST_AUTO_TEST_CASE(findBlock)
{
- auto chain = interfaces::MakeChain(m_node);
- auto& active = ChainActive();
+ auto& chain = m_node.chain;
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
uint256 hash;
BOOST_CHECK(chain->findBlock(active[10]->GetBlockHash(), FoundBlock().hash(hash)));
@@ -44,13 +44,25 @@ BOOST_AUTO_TEST_CASE(findBlock)
BOOST_CHECK(chain->findBlock(active[60]->GetBlockHash(), FoundBlock().mtpTime(mtp_time)));
BOOST_CHECK_EQUAL(mtp_time, active[60]->GetMedianTimePast());
+ bool cur_active{false}, next_active{false};
+ uint256 next_hash;
+ BOOST_CHECK_EQUAL(active.Height(), 100);
+ BOOST_CHECK(chain->findBlock(active[99]->GetBlockHash(), FoundBlock().inActiveChain(cur_active).nextBlock(FoundBlock().inActiveChain(next_active).hash(next_hash))));
+ BOOST_CHECK(cur_active);
+ BOOST_CHECK(next_active);
+ BOOST_CHECK_EQUAL(next_hash, active[100]->GetBlockHash());
+ cur_active = next_active = false;
+ BOOST_CHECK(chain->findBlock(active[100]->GetBlockHash(), FoundBlock().inActiveChain(cur_active).nextBlock(FoundBlock().inActiveChain(next_active))));
+ BOOST_CHECK(cur_active);
+ BOOST_CHECK(!next_active);
+
BOOST_CHECK(!chain->findBlock({}, FoundBlock()));
}
BOOST_AUTO_TEST_CASE(findFirstBlockWithTimeAndHeight)
{
- auto chain = interfaces::MakeChain(m_node);
- auto& active = ChainActive();
+ auto& chain = m_node.chain;
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
uint256 hash;
int height;
BOOST_CHECK(chain->findFirstBlockWithTimeAndHeight(/* min_time= */ 0, /* min_height= */ 5, FoundBlock().hash(hash).height(height)));
@@ -59,25 +71,10 @@ BOOST_AUTO_TEST_CASE(findFirstBlockWithTimeAndHeight)
BOOST_CHECK(!chain->findFirstBlockWithTimeAndHeight(/* min_time= */ active.Tip()->GetBlockTimeMax() + 1, /* min_height= */ 0));
}
-BOOST_AUTO_TEST_CASE(findNextBlock)
-{
- auto chain = interfaces::MakeChain(m_node);
- auto& active = ChainActive();
- bool reorg;
- uint256 hash;
- BOOST_CHECK(chain->findNextBlock(active[20]->GetBlockHash(), 20, FoundBlock().hash(hash), &reorg));
- BOOST_CHECK_EQUAL(hash, active[21]->GetBlockHash());
- BOOST_CHECK_EQUAL(reorg, false);
- BOOST_CHECK(!chain->findNextBlock(uint256(), 20, {}, &reorg));
- BOOST_CHECK_EQUAL(reorg, true);
- BOOST_CHECK(!chain->findNextBlock(active.Tip()->GetBlockHash(), active.Height(), {}, &reorg));
- BOOST_CHECK_EQUAL(reorg, false);
-}
-
BOOST_AUTO_TEST_CASE(findAncestorByHeight)
{
- auto chain = interfaces::MakeChain(m_node);
- auto& active = ChainActive();
+ auto& chain = m_node.chain;
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
uint256 hash;
BOOST_CHECK(chain->findAncestorByHeight(active[20]->GetBlockHash(), 10, FoundBlock().hash(hash)));
BOOST_CHECK_EQUAL(hash, active[10]->GetBlockHash());
@@ -86,8 +83,8 @@ BOOST_AUTO_TEST_CASE(findAncestorByHeight)
BOOST_AUTO_TEST_CASE(findAncestorByHash)
{
- auto chain = interfaces::MakeChain(m_node);
- auto& active = ChainActive();
+ auto& chain = m_node.chain;
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
int height = -1;
BOOST_CHECK(chain->findAncestorByHash(active[20]->GetBlockHash(), active[10]->GetBlockHash(), FoundBlock().height(height)));
BOOST_CHECK_EQUAL(height, 10);
@@ -96,8 +93,8 @@ BOOST_AUTO_TEST_CASE(findAncestorByHash)
BOOST_AUTO_TEST_CASE(findCommonAncestor)
{
- auto chain = interfaces::MakeChain(m_node);
- auto& active = ChainActive();
+ auto& chain = m_node.chain;
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
auto* orig_tip = active.Tip();
for (int i = 0; i < 10; ++i) {
BlockValidationState state;
@@ -126,8 +123,8 @@ BOOST_AUTO_TEST_CASE(findCommonAncestor)
BOOST_AUTO_TEST_CASE(hasBlocks)
{
- auto chain = interfaces::MakeChain(m_node);
- auto& active = ChainActive();
+ auto& chain = m_node.chain;
+ const CChain& active = Assert(m_node.chainman)->ActiveChain();
// Test ranges
BOOST_CHECK(chain->hasBlocks(active.Tip()->GetBlockHash(), 10, 90));
diff --git a/src/test/reverselock_tests.cpp b/src/test/reverselock_tests.cpp
index a42608a66d..7da364d316 100644
--- a/src/test/reverselock_tests.cpp
+++ b/src/test/reverselock_tests.cpp
@@ -48,12 +48,14 @@ BOOST_AUTO_TEST_CASE(reverselock_errors)
WAIT_LOCK(mutex, lock);
#ifdef DEBUG_LOCKORDER
+ bool prev = g_debug_lockorder_abort;
+ g_debug_lockorder_abort = false;
+
// Make sure trying to reverse lock a previous lock fails
- try {
- REVERSE_LOCK(lock2);
- BOOST_CHECK(false); // REVERSE_LOCK(lock2) succeeded
- } catch(...) { }
+ BOOST_CHECK_EXCEPTION(REVERSE_LOCK(lock2), std::logic_error, HasReason("lock2 was not most recent critical section locked"));
BOOST_CHECK(lock2.owns_lock());
+
+ g_debug_lockorder_abort = prev;
#endif
// Make sure trying to reverse lock an unlocked lock fails
diff --git a/src/test/sync_tests.cpp b/src/test/sync_tests.cpp
index 14145ced7e..71275f69d9 100644
--- a/src/test/sync_tests.cpp
+++ b/src/test/sync_tests.cpp
@@ -62,6 +62,19 @@ void TestDoubleLock(bool should_throw)
g_debug_lockorder_abort = prev;
}
#endif /* DEBUG_LOCKORDER */
+
+template <typename MutexType>
+void TestInconsistentLockOrderDetected(MutexType& mutex1, MutexType& mutex2) NO_THREAD_SAFETY_ANALYSIS
+{
+ ENTER_CRITICAL_SECTION(mutex1);
+ ENTER_CRITICAL_SECTION(mutex2);
+#ifdef DEBUG_LOCKORDER
+ BOOST_CHECK_EXCEPTION(LEAVE_CRITICAL_SECTION(mutex1), std::logic_error, HasReason("mutex1 was not most recent critical section locked"));
+#endif // DEBUG_LOCKORDER
+ LEAVE_CRITICAL_SECTION(mutex2);
+ LEAVE_CRITICAL_SECTION(mutex1);
+ BOOST_CHECK(LockStackEmpty());
+}
} // namespace
BOOST_FIXTURE_TEST_SUITE(sync_tests, BasicTestingSetup)
@@ -108,4 +121,28 @@ BOOST_AUTO_TEST_CASE(double_lock_recursive_mutex)
}
#endif /* DEBUG_LOCKORDER */
+BOOST_AUTO_TEST_CASE(inconsistent_lock_order_detected)
+{
+#ifdef DEBUG_LOCKORDER
+ bool prev = g_debug_lockorder_abort;
+ g_debug_lockorder_abort = false;
+#endif // DEBUG_LOCKORDER
+
+ RecursiveMutex rmutex1, rmutex2;
+ TestInconsistentLockOrderDetected(rmutex1, rmutex2);
+ // By checking lock order consistency (CheckLastCritical) before any unlocking (LeaveCritical)
+ // the lock tracking data must not have been broken by exception.
+ TestInconsistentLockOrderDetected(rmutex1, rmutex2);
+
+ Mutex mutex1, mutex2;
+ TestInconsistentLockOrderDetected(mutex1, mutex2);
+ // By checking lock order consistency (CheckLastCritical) before any unlocking (LeaveCritical)
+ // the lock tracking data must not have been broken by exception.
+ TestInconsistentLockOrderDetected(mutex1, mutex2);
+
+#ifdef DEBUG_LOCKORDER
+ g_debug_lockorder_abort = prev;
+#endif // DEBUG_LOCKORDER
+}
+
BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/test/util/setup_common.cpp b/src/test/util/setup_common.cpp
index adf5970206..db8b43d039 100644
--- a/src/test/util/setup_common.cpp
+++ b/src/test/util/setup_common.cpp
@@ -16,6 +16,7 @@
#include <net.h>
#include <net_processing.h>
#include <noui.h>
+#include <policy/fees.h>
#include <pow.h>
#include <rpc/blockchain.h>
#include <rpc/register.h>
@@ -124,40 +125,21 @@ BasicTestingSetup::~BasicTestingSetup()
ECC_Stop();
}
-TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const char*>& extra_args)
+ChainTestingSetup::ChainTestingSetup(const std::string& chainName, const std::vector<const char*>& extra_args)
: BasicTestingSetup(chainName, extra_args)
{
- const CChainParams& chainparams = Params();
- // Ideally we'd move all the RPC tests to the functional testing framework
- // instead of unit tests, but for now we need these here.
- RegisterAllCoreRPCCommands(tableRPC);
-
- m_node.scheduler = MakeUnique<CScheduler>();
-
// We have to run a scheduler thread to prevent ActivateBestChain
// from blocking due to queue overrun.
+ m_node.scheduler = MakeUnique<CScheduler>();
threadGroup.create_thread([&] { TraceThread("scheduler", [&] { m_node.scheduler->serviceQueue(); }); });
GetMainSignals().RegisterBackgroundSignalScheduler(*m_node.scheduler);
pblocktree.reset(new CBlockTreeDB(1 << 20, true));
- m_node.mempool = MakeUnique<CTxMemPool>(&::feeEstimator, 1);
+ m_node.fee_estimator = std::make_unique<CBlockPolicyEstimator>();
+ m_node.mempool = std::make_unique<CTxMemPool>(m_node.fee_estimator.get(), 1);
m_node.chainman = &::g_chainman;
- m_node.chainman->InitializeChainstate(*m_node.mempool);
- ::ChainstateActive().InitCoinsDB(
- /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
- assert(!::ChainstateActive().CanFlushToDisk());
- ::ChainstateActive().InitCoinsCache(1 << 23);
- assert(::ChainstateActive().CanFlushToDisk());
- if (!LoadGenesisBlock(chainparams)) {
- throw std::runtime_error("LoadGenesisBlock failed.");
- }
-
- BlockValidationState state;
- if (!ActivateBestChain(state, chainparams)) {
- throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
- }
// Start script-checking threads. Set g_parallel_script_checks to true so they are used.
constexpr int script_check_threads = 2;
@@ -165,18 +147,9 @@ TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const
threadGroup.create_thread([i]() { return ThreadScriptCheck(i); });
}
g_parallel_script_checks = true;
-
- m_node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
- m_node.connman = MakeUnique<CConnman>(0x1337, 0x1337); // Deterministic randomness for tests.
- m_node.peerman = MakeUnique<PeerManager>(chainparams, *m_node.connman, m_node.banman.get(), *m_node.scheduler, *m_node.chainman, *m_node.mempool);
- {
- CConnman::Options options;
- options.m_msgproc = m_node.peerman.get();
- m_node.connman->Init(options);
- }
}
-TestingSetup::~TestingSetup()
+ChainTestingSetup::~ChainTestingSetup()
{
if (m_node.scheduler) m_node.scheduler->stop();
threadGroup.interrupt_all();
@@ -194,6 +167,41 @@ TestingSetup::~TestingSetup()
pblocktree.reset();
}
+TestingSetup::TestingSetup(const std::string& chainName, const std::vector<const char*>& extra_args)
+ : ChainTestingSetup(chainName, extra_args)
+{
+ const CChainParams& chainparams = Params();
+ // Ideally we'd move all the RPC tests to the functional testing framework
+ // instead of unit tests, but for now we need these here.
+ RegisterAllCoreRPCCommands(tableRPC);
+
+ m_node.chainman->InitializeChainstate(*m_node.mempool);
+ ::ChainstateActive().InitCoinsDB(
+ /* cache_size_bytes */ 1 << 23, /* in_memory */ true, /* should_wipe */ false);
+ assert(!::ChainstateActive().CanFlushToDisk());
+ ::ChainstateActive().InitCoinsCache(1 << 23);
+ assert(::ChainstateActive().CanFlushToDisk());
+ if (!LoadGenesisBlock(chainparams)) {
+ throw std::runtime_error("LoadGenesisBlock failed.");
+ }
+
+ BlockValidationState state;
+ if (!ActivateBestChain(state, chainparams)) {
+ throw std::runtime_error(strprintf("ActivateBestChain failed. (%s)", state.ToString()));
+ }
+
+ m_node.banman = MakeUnique<BanMan>(GetDataDir() / "banlist.dat", nullptr, DEFAULT_MISBEHAVING_BANTIME);
+ m_node.connman = MakeUnique<CConnman>(0x1337, 0x1337); // Deterministic randomness for tests.
+ m_node.peerman = std::make_unique<PeerManager>(chainparams, *m_node.connman, m_node.banman.get(),
+ *m_node.scheduler, *m_node.chainman, *m_node.mempool,
+ false);
+ {
+ CConnman::Options options;
+ options.m_msgproc = m_node.peerman.get();
+ m_node.connman->Init(options);
+ }
+}
+
TestChain100Setup::TestChain100Setup()
{
// Generate a 100-block chain:
diff --git a/src/test/util/setup_common.h b/src/test/util/setup_common.h
index 1812ce1666..0498e7d182 100644
--- a/src/test/util/setup_common.h
+++ b/src/test/util/setup_common.h
@@ -83,14 +83,21 @@ private:
const fs::path m_path_root;
};
-/** Testing setup that configures a complete environment.
- * Included are coins database, script check threads setup.
+/** Testing setup that performs all steps up until right before
+ * ChainstateManager gets initialized. Meant for testing ChainstateManager
+ * initialization behaviour.
*/
-struct TestingSetup : public BasicTestingSetup {
+struct ChainTestingSetup : public BasicTestingSetup {
boost::thread_group threadGroup;
+ explicit ChainTestingSetup(const std::string& chainName = CBaseChainParams::MAIN, const std::vector<const char*>& extra_args = {});
+ ~ChainTestingSetup();
+};
+
+/** Testing setup that configures a complete environment.
+ */
+struct TestingSetup : public ChainTestingSetup {
explicit TestingSetup(const std::string& chainName = CBaseChainParams::MAIN, const std::vector<const char*>& extra_args = {});
- ~TestingSetup();
};
/** Identical to TestingSetup, but chain set to regtest */
diff --git a/src/test/validation_chainstatemanager_tests.cpp b/src/test/validation_chainstatemanager_tests.cpp
index 36badafc4e..75939e0140 100644
--- a/src/test/validation_chainstatemanager_tests.cpp
+++ b/src/test/validation_chainstatemanager_tests.cpp
@@ -15,15 +15,16 @@
#include <boost/test/unit_test.hpp>
-BOOST_FIXTURE_TEST_SUITE(validation_chainstatemanager_tests, TestingSetup)
+BOOST_FIXTURE_TEST_SUITE(validation_chainstatemanager_tests, ChainTestingSetup)
//! Basic tests for ChainstateManager.
//!
//! First create a legacy (IBD) chainstate, then create a snapshot chainstate.
BOOST_AUTO_TEST_CASE(chainstatemanager)
{
- ChainstateManager manager;
- CTxMemPool mempool;
+ ChainstateManager& manager = *m_node.chainman;
+ CTxMemPool& mempool = *m_node.mempool;
+
std::vector<CChainState*> chainstates;
const CChainParams& chainparams = Params();
@@ -104,8 +105,9 @@ BOOST_AUTO_TEST_CASE(chainstatemanager)
//! Test rebalancing the caches associated with each chainstate.
BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches)
{
- ChainstateManager manager;
- CTxMemPool mempool;
+ ChainstateManager& manager = *m_node.chainman;
+ CTxMemPool& mempool = *m_node.mempool;
+
size_t max_cache = 10000;
manager.m_total_coinsdb_cache = max_cache;
manager.m_total_coinstip_cache = max_cache;
@@ -122,6 +124,7 @@ BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches)
{
LOCK(::cs_main);
c1.InitCoinsCache(1 << 23);
+ BOOST_REQUIRE(c1.LoadGenesisBlock(Params()));
c1.CoinsTip().SetBestBlock(InsecureRand256());
manager.MaybeRebalanceCaches();
}
@@ -139,6 +142,7 @@ BOOST_AUTO_TEST_CASE(chainstatemanager_rebalance_caches)
{
LOCK(::cs_main);
c2.InitCoinsCache(1 << 23);
+ BOOST_REQUIRE(c2.LoadGenesisBlock(Params()));
c2.CoinsTip().SetBestBlock(InsecureRand256());
manager.MaybeRebalanceCaches();
}
diff --git a/src/util/time.h b/src/util/time.h
index af934e423b..c69f604dc6 100644
--- a/src/util/time.h
+++ b/src/util/time.h
@@ -6,9 +6,11 @@
#ifndef BITCOIN_UTIL_TIME_H
#define BITCOIN_UTIL_TIME_H
+#include <chrono>
#include <stdint.h>
#include <string>
-#include <chrono>
+
+using namespace std::chrono_literals;
void UninterruptibleSleep(const std::chrono::microseconds& n);
diff --git a/src/validation.cpp b/src/validation.cpp
index 3aec96bc5a..2585345dee 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -22,7 +22,6 @@
#include <logging/timer.h>
#include <node/ui_interface.h>
#include <optional.h>
-#include <policy/fees.h>
#include <policy/policy.h>
#include <policy/settings.h>
#include <pow.h>
@@ -148,8 +147,6 @@ arith_uint256 nMinimumChainWork;
CFeeRate minRelayTxFee = CFeeRate(DEFAULT_MIN_RELAY_TX_FEE);
-CBlockPolicyEstimator feeEstimator;
-
// Internal stuff
namespace {
CBlockIndex* pindexBestInvalid = nullptr;
diff --git a/src/validation.h b/src/validation.h
index ffb038ad75..6d8c6d431a 100644
--- a/src/validation.h
+++ b/src/validation.h
@@ -42,7 +42,6 @@ class CChainParams;
class CInv;
class CConnman;
class CScriptCheck;
-class CBlockPolicyEstimator;
class CTxMemPool;
class ChainstateManager;
class TxValidationState;
@@ -110,7 +109,6 @@ enum class SynchronizationState {
};
extern RecursiveMutex cs_main;
-extern CBlockPolicyEstimator feeEstimator;
typedef std::unordered_map<uint256, CBlockIndex*, BlockHasher> BlockMap;
extern Mutex g_best_block_mutex;
extern std::condition_variable g_best_block_cv;
diff --git a/src/wallet/bdb.cpp b/src/wallet/bdb.cpp
index 85aae0170d..6ed48593fb 100644
--- a/src/wallet/bdb.cpp
+++ b/src/wallet/bdb.cpp
@@ -53,16 +53,13 @@ bool WalletDatabaseFileId::operator==(const WalletDatabaseFileId& rhs) const
}
/**
- * @param[in] wallet_path Path to wallet directory. Or (for backwards compatibility only) a path to a berkeley btree data file inside a wallet directory.
- * @param[out] database_filename Filename of berkeley btree data file inside the wallet directory.
+ * @param[in] env_directory Path to environment directory
* @return A shared pointer to the BerkeleyEnvironment object for the wallet directory, never empty because ~BerkeleyEnvironment
* erases the weak pointer from the g_dbenvs map.
* @post A new BerkeleyEnvironment weak pointer is inserted into g_dbenvs if the directory path key was not already in the map.
*/
-std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename)
+std::shared_ptr<BerkeleyEnvironment> GetBerkeleyEnv(const fs::path& env_directory)
{
- fs::path env_directory;
- SplitWalletPath(wallet_path, env_directory, database_filename);
LOCK(cs_db);
auto inserted = g_dbenvs.emplace(env_directory.string(), std::weak_ptr<BerkeleyEnvironment>());
if (inserted.second) {
@@ -808,21 +805,14 @@ std::unique_ptr<DatabaseBatch> BerkeleyDatabase::MakeBatch(bool flush_on_close)
return MakeUnique<BerkeleyBatch>(*this, false, flush_on_close);
}
-bool ExistsBerkeleyDatabase(const fs::path& path)
-{
- fs::path env_directory;
- std::string data_filename;
- SplitWalletPath(path, env_directory, data_filename);
- return IsBDBFile(env_directory / data_filename);
-}
-
std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error)
{
+ fs::path data_file = BDBDataFile(path);
std::unique_ptr<BerkeleyDatabase> db;
{
LOCK(cs_db); // Lock env.m_databases until insert in BerkeleyDatabase constructor
- std::string data_filename;
- std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(path, data_filename);
+ std::string data_filename = data_file.filename().string();
+ std::shared_ptr<BerkeleyEnvironment> env = GetBerkeleyEnv(data_file.parent_path());
if (env->m_databases.count(data_filename)) {
error = Untranslated(strprintf("Refusing to load database. Data file '%s' is already loaded.", (env->Directory() / data_filename).string()));
status = DatabaseStatus::FAILED_ALREADY_LOADED;
@@ -839,28 +829,3 @@ std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, con
status = DatabaseStatus::SUCCESS;
return db;
}
-
-bool IsBDBFile(const fs::path& path)
-{
- if (!fs::exists(path)) return false;
-
- // A Berkeley DB Btree file has at least 4K.
- // This check also prevents opening lock files.
- boost::system::error_code ec;
- auto size = fs::file_size(path, ec);
- if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
- if (size < 4096) return false;
-
- fsbridge::ifstream file(path, std::ios::binary);
- if (!file.is_open()) return false;
-
- file.seekg(12, std::ios::beg); // Magic bytes start at offset 12
- uint32_t data = 0;
- file.read((char*) &data, sizeof(data)); // Read 4 bytes of file to compare against magic
-
- // Berkeley DB Btree magic bytes, from:
- // https://github.com/file/file/blob/5824af38469ec1ca9ac3ffd251e7afe9dc11e227/magic/Magdir/database#L74-L75
- // - big endian systems - 00 05 31 62
- // - little endian systems - 62 31 05 00
- return data == 0x00053162 || data == 0x62310500;
-}
diff --git a/src/wallet/bdb.h b/src/wallet/bdb.h
index 4f97665f08..bf1617d67f 100644
--- a/src/wallet/bdb.h
+++ b/src/wallet/bdb.h
@@ -83,11 +83,8 @@ public:
}
};
-/** Get BerkeleyEnvironment and database filename given a wallet path. */
-std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& wallet_path, std::string& database_filename);
-
-/** Check format of database file */
-bool IsBDBFile(const fs::path& path);
+/** Get BerkeleyEnvironment given a directory path. */
+std::shared_ptr<BerkeleyEnvironment> GetBerkeleyEnv(const fs::path& env_directory);
class BerkeleyBatch;
@@ -226,9 +223,6 @@ public:
std::string BerkeleyDatabaseVersion();
-//! Check if Berkeley database exists at specified path.
-bool ExistsBerkeleyDatabase(const fs::path& path);
-
//! Return object giving access to Berkeley database at specified path.
std::unique_ptr<BerkeleyDatabase> MakeBerkeleyDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
diff --git a/src/wallet/db.cpp b/src/wallet/db.cpp
index bd1d114730..cd49baeb78 100644
--- a/src/wallet/db.cpp
+++ b/src/wallet/db.cpp
@@ -3,23 +3,130 @@
// Distributed under the MIT software license, see the accompanying
// file COPYING or http://www.opensource.org/licenses/mit-license.php.
+#include <chainparams.h>
#include <fs.h>
+#include <logging.h>
#include <wallet/db.h>
#include <string>
-void SplitWalletPath(const fs::path& wallet_path, fs::path& env_directory, std::string& database_filename)
+std::vector<fs::path> ListDatabases(const fs::path& wallet_dir)
+{
+ const size_t offset = wallet_dir.string().size() + 1;
+ std::vector<fs::path> paths;
+ boost::system::error_code ec;
+
+ for (auto it = fs::recursive_directory_iterator(wallet_dir, ec); it != fs::recursive_directory_iterator(); it.increment(ec)) {
+ if (ec) {
+ LogPrintf("%s: %s %s\n", __func__, ec.message(), it->path().string());
+ continue;
+ }
+
+ try {
+ // Get wallet path relative to walletdir by removing walletdir from the wallet path.
+ // This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60.
+ const fs::path path = it->path().string().substr(offset);
+
+ if (it->status().type() == fs::directory_file &&
+ (IsBDBFile(BDBDataFile(it->path())) || IsSQLiteFile(SQLiteDataFile(it->path())))) {
+ // Found a directory which contains wallet.dat btree file, add it as a wallet.
+ paths.emplace_back(path);
+ } else if (it.level() == 0 && it->symlink_status().type() == fs::regular_file && IsBDBFile(it->path())) {
+ if (it->path().filename() == "wallet.dat") {
+ // Found top-level wallet.dat btree file, add top level directory ""
+ // as a wallet.
+ paths.emplace_back();
+ } else {
+ // Found top-level btree file not called wallet.dat. Current bitcoin
+ // software will never create these files but will allow them to be
+ // opened in a shared database environment for backwards compatibility.
+ // Add it to the list of available wallets.
+ paths.emplace_back(path);
+ }
+ }
+ } catch (const std::exception& e) {
+ LogPrintf("%s: Error scanning %s: %s\n", __func__, it->path().string(), e.what());
+ it.no_push();
+ }
+ }
+
+ return paths;
+}
+
+fs::path BDBDataFile(const fs::path& wallet_path)
{
if (fs::is_regular_file(wallet_path)) {
// Special case for backwards compatibility: if wallet path points to an
// existing file, treat it as the path to a BDB data file in a parent
// directory that also contains BDB log files.
- env_directory = wallet_path.parent_path();
- database_filename = wallet_path.filename().string();
+ return wallet_path;
} else {
// Normal case: Interpret wallet path as a directory path containing
// data and log files.
- env_directory = wallet_path;
- database_filename = "wallet.dat";
+ return wallet_path / "wallet.dat";
}
}
+
+fs::path SQLiteDataFile(const fs::path& path)
+{
+ return path / "wallet.dat";
+}
+
+bool IsBDBFile(const fs::path& path)
+{
+ if (!fs::exists(path)) return false;
+
+ // A Berkeley DB Btree file has at least 4K.
+ // This check also prevents opening lock files.
+ boost::system::error_code ec;
+ auto size = fs::file_size(path, ec);
+ if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
+ if (size < 4096) return false;
+
+ fsbridge::ifstream file(path, std::ios::binary);
+ if (!file.is_open()) return false;
+
+ file.seekg(12, std::ios::beg); // Magic bytes start at offset 12
+ uint32_t data = 0;
+ file.read((char*) &data, sizeof(data)); // Read 4 bytes of file to compare against magic
+
+ // Berkeley DB Btree magic bytes, from:
+ // https://github.com/file/file/blob/5824af38469ec1ca9ac3ffd251e7afe9dc11e227/magic/Magdir/database#L74-L75
+ // - big endian systems - 00 05 31 62
+ // - little endian systems - 62 31 05 00
+ return data == 0x00053162 || data == 0x62310500;
+}
+
+bool IsSQLiteFile(const fs::path& path)
+{
+ if (!fs::exists(path)) return false;
+
+ // A SQLite Database file is at least 512 bytes.
+ boost::system::error_code ec;
+ auto size = fs::file_size(path, ec);
+ if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
+ if (size < 512) return false;
+
+ fsbridge::ifstream file(path, std::ios::binary);
+ if (!file.is_open()) return false;
+
+ // Magic is at beginning and is 16 bytes long
+ char magic[16];
+ file.read(magic, 16);
+
+ // Application id is at offset 68 and 4 bytes long
+ file.seekg(68, std::ios::beg);
+ char app_id[4];
+ file.read(app_id, 4);
+
+ file.close();
+
+ // Check the magic, see https://sqlite.org/fileformat2.html
+ std::string magic_str(magic, 16);
+ if (magic_str != std::string("SQLite format 3", 16)) {
+ return false;
+ }
+
+ // Check the application id matches our network magic
+ return memcmp(Params().MessageStart(), app_id, 4) == 0;
+}
diff --git a/src/wallet/db.h b/src/wallet/db.h
index 940d1cd242..2c75486a44 100644
--- a/src/wallet/db.h
+++ b/src/wallet/db.h
@@ -223,6 +223,14 @@ enum class DatabaseStatus {
FAILED_ENCRYPT,
};
+/** Recursively list database paths in directory. */
+std::vector<fs::path> ListDatabases(const fs::path& path);
+
std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
+fs::path BDBDataFile(const fs::path& path);
+fs::path SQLiteDataFile(const fs::path& path);
+bool IsBDBFile(const fs::path& path);
+bool IsSQLiteFile(const fs::path& path);
+
#endif // BITCOIN_WALLET_DB_H
diff --git a/src/wallet/interfaces.cpp b/src/wallet/interfaces.cpp
index e8dbc20e56..e4e8c50f4f 100644
--- a/src/wallet/interfaces.cpp
+++ b/src/wallet/interfaces.cpp
@@ -551,7 +551,7 @@ public:
std::vector<std::string> listWalletDir() override
{
std::vector<std::string> paths;
- for (auto& path : ListWalletDir()) {
+ for (auto& path : ListDatabases(GetWalletDir())) {
paths.push_back(path.string());
}
return paths;
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 7ea6a214b2..94a73b67df 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -2537,7 +2537,7 @@ static RPCHelpMan listwalletdir()
[&](const RPCHelpMan& self, const JSONRPCRequest& request) -> UniValue
{
UniValue wallets(UniValue::VARR);
- for (const auto& path : ListWalletDir()) {
+ for (const auto& path : ListDatabases(GetWalletDir())) {
UniValue wallet(UniValue::VOBJ);
wallet.pushKV("name", path.string());
wallets.push_back(wallet);
@@ -4089,7 +4089,7 @@ static RPCHelpMan send()
UniValueType(), // outputs (ARR or OBJ, checked later)
UniValue::VNUM, // conf_target
UniValue::VSTR, // estimate_mode
- UniValue::VNUM, // fee_rate
+ UniValueType(), // fee_rate, will be checked by AmountFromValue() in SetFeeEstimateMode()
UniValue::VOBJ, // options
}, true
);
diff --git a/src/wallet/salvage.cpp b/src/wallet/salvage.cpp
index da5ca7858f..09a9ec68cd 100644
--- a/src/wallet/salvage.cpp
+++ b/src/wallet/salvage.cpp
@@ -32,8 +32,9 @@ bool RecoverDatabaseFile(const fs::path& file_path, bilingual_str& error, std::v
std::unique_ptr<WalletDatabase> database = MakeDatabase(file_path, options, status, error);
if (!database) return false;
- std::string filename;
- std::shared_ptr<BerkeleyEnvironment> env = GetWalletEnv(file_path, filename);
+ BerkeleyDatabase& berkeley_database = static_cast<BerkeleyDatabase&>(*database);
+ std::string filename = berkeley_database.Filename();
+ std::shared_ptr<BerkeleyEnvironment> env = berkeley_database.env;
if (!env->Open(error)) {
return false;
diff --git a/src/wallet/sqlite.cpp b/src/wallet/sqlite.cpp
index d278d96476..0fb3b1d3c4 100644
--- a/src/wallet/sqlite.cpp
+++ b/src/wallet/sqlite.cpp
@@ -17,7 +17,6 @@
#include <sqlite3.h>
#include <stdint.h>
-static const char* const DATABASE_FILENAME = "wallet.dat";
static constexpr int32_t WALLET_SCHEMA_VERSION = 0;
static Mutex g_sqlite_mutex;
@@ -568,17 +567,11 @@ bool SQLiteBatch::TxnAbort()
return res == SQLITE_OK;
}
-bool ExistsSQLiteDatabase(const fs::path& path)
-{
- const fs::path file = path / DATABASE_FILENAME;
- return fs::symlink_status(file).type() == fs::regular_file && IsSQLiteFile(file);
-}
-
std::unique_ptr<SQLiteDatabase> MakeSQLiteDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error)
{
- const fs::path file = path / DATABASE_FILENAME;
try {
- auto db = MakeUnique<SQLiteDatabase>(path, file);
+ fs::path data_file = SQLiteDataFile(path);
+ auto db = MakeUnique<SQLiteDatabase>(data_file.parent_path(), data_file);
if (options.verify && !db->Verify(error)) {
status = DatabaseStatus::FAILED_VERIFY;
return nullptr;
@@ -596,37 +589,3 @@ std::string SQLiteDatabaseVersion()
{
return std::string(sqlite3_libversion());
}
-
-bool IsSQLiteFile(const fs::path& path)
-{
- if (!fs::exists(path)) return false;
-
- // A SQLite Database file is at least 512 bytes.
- boost::system::error_code ec;
- auto size = fs::file_size(path, ec);
- if (ec) LogPrintf("%s: %s %s\n", __func__, ec.message(), path.string());
- if (size < 512) return false;
-
- fsbridge::ifstream file(path, std::ios::binary);
- if (!file.is_open()) return false;
-
- // Magic is at beginning and is 16 bytes long
- char magic[16];
- file.read(magic, 16);
-
- // Application id is at offset 68 and 4 bytes long
- file.seekg(68, std::ios::beg);
- char app_id[4];
- file.read(app_id, 4);
-
- file.close();
-
- // Check the magic, see https://sqlite.org/fileformat2.html
- std::string magic_str(magic, 16);
- if (magic_str != std::string("SQLite format 3", 16)) {
- return false;
- }
-
- // Check the application id matches our network magic
- return memcmp(Params().MessageStart(), app_id, 4) == 0;
-}
diff --git a/src/wallet/sqlite.h b/src/wallet/sqlite.h
index 693a2ef55a..442563184e 100644
--- a/src/wallet/sqlite.h
+++ b/src/wallet/sqlite.h
@@ -113,10 +113,8 @@ public:
sqlite3* m_db{nullptr};
};
-bool ExistsSQLiteDatabase(const fs::path& path);
std::unique_ptr<SQLiteDatabase> MakeSQLiteDatabase(const fs::path& path, const DatabaseOptions& options, DatabaseStatus& status, bilingual_str& error);
std::string SQLiteDatabaseVersion();
-bool IsSQLiteFile(const fs::path& path);
#endif // BITCOIN_WALLET_SQLITE_H
diff --git a/src/wallet/test/coinselector_tests.cpp b/src/wallet/test/coinselector_tests.cpp
index f38ccba384..4127cd45f8 100644
--- a/src/wallet/test/coinselector_tests.cpp
+++ b/src/wallet/test/coinselector_tests.cpp
@@ -283,7 +283,7 @@ BOOST_AUTO_TEST_CASE(bnb_search_test)
// Make sure that can use BnB when there are preset inputs
empty_wallet();
{
- std::unique_ptr<CWallet> wallet = MakeUnique<CWallet>(m_chain.get(), "", CreateMockWalletDatabase());
+ std::unique_ptr<CWallet> wallet = MakeUnique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
bool firstRun;
wallet->LoadWallet(firstRun);
wallet->SetupLegacyScriptPubKeyMan();
diff --git a/src/wallet/test/db_tests.cpp b/src/wallet/test/db_tests.cpp
index 8f0083cd2e..1a28852a6b 100644
--- a/src/wallet/test/db_tests.cpp
+++ b/src/wallet/test/db_tests.cpp
@@ -13,6 +13,13 @@
BOOST_FIXTURE_TEST_SUITE(db_tests, BasicTestingSetup)
+static std::shared_ptr<BerkeleyEnvironment> GetWalletEnv(const fs::path& path, std::string& database_filename)
+{
+ fs::path data_file = BDBDataFile(path);
+ database_filename = data_file.filename().string();
+ return GetBerkeleyEnv(data_file.parent_path());
+}
+
BOOST_AUTO_TEST_CASE(getwalletenv_file)
{
std::string test_name = "test_name.dat";
diff --git a/src/wallet/test/init_test_fixture.cpp b/src/wallet/test/init_test_fixture.cpp
index c80310045a..334e4ae0d8 100644
--- a/src/wallet/test/init_test_fixture.cpp
+++ b/src/wallet/test/init_test_fixture.cpp
@@ -10,7 +10,7 @@
InitWalletDirTestingSetup::InitWalletDirTestingSetup(const std::string& chainName) : BasicTestingSetup(chainName)
{
- m_wallet_client = MakeWalletClient(*m_chain, *Assert(m_node.args));
+ m_wallet_client = MakeWalletClient(*m_node.chain, *Assert(m_node.args));
std::string sep;
sep += fs::path::preferred_separator;
diff --git a/src/wallet/test/init_test_fixture.h b/src/wallet/test/init_test_fixture.h
index f5bade77df..f666c45a34 100644
--- a/src/wallet/test/init_test_fixture.h
+++ b/src/wallet/test/init_test_fixture.h
@@ -19,7 +19,6 @@ struct InitWalletDirTestingSetup: public BasicTestingSetup {
fs::path m_datadir;
fs::path m_cwd;
std::map<std::string, fs::path> m_walletdir_path_cases;
- std::unique_ptr<interfaces::Chain> m_chain = interfaces::MakeChain(m_node);
std::unique_ptr<interfaces::WalletClient> m_wallet_client;
};
diff --git a/src/wallet/test/ismine_tests.cpp b/src/wallet/test/ismine_tests.cpp
index d5aed99d99..0ef8b9c4bf 100644
--- a/src/wallet/test/ismine_tests.cpp
+++ b/src/wallet/test/ismine_tests.cpp
@@ -27,8 +27,7 @@ BOOST_AUTO_TEST_CASE(ismine_standard)
CKey uncompressedKey;
uncompressedKey.MakeNewKey(false);
CPubKey uncompressedPubkey = uncompressedKey.GetPubKey();
- NodeContext node;
- std::unique_ptr<interfaces::Chain> chain = interfaces::MakeChain(node);
+ std::unique_ptr<interfaces::Chain>& chain = m_node.chain;
CScript scriptPubKey;
isminetype result;
diff --git a/src/wallet/test/scriptpubkeyman_tests.cpp b/src/wallet/test/scriptpubkeyman_tests.cpp
index f7c1337b0d..347a436429 100644
--- a/src/wallet/test/scriptpubkeyman_tests.cpp
+++ b/src/wallet/test/scriptpubkeyman_tests.cpp
@@ -17,9 +17,7 @@ BOOST_FIXTURE_TEST_SUITE(scriptpubkeyman_tests, BasicTestingSetup)
BOOST_AUTO_TEST_CASE(CanProvide)
{
// Set up wallet and keyman variables.
- NodeContext node;
- std::unique_ptr<interfaces::Chain> chain = interfaces::MakeChain(node);
- CWallet wallet(chain.get(), "", CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
LegacyScriptPubKeyMan& keyman = *wallet.GetOrCreateLegacyScriptPubKeyMan();
// Make a 1 of 2 multisig script
diff --git a/src/wallet/test/wallet_test_fixture.cpp b/src/wallet/test/wallet_test_fixture.cpp
index 4d6f427618..badf2eb459 100644
--- a/src/wallet/test/wallet_test_fixture.cpp
+++ b/src/wallet/test/wallet_test_fixture.cpp
@@ -6,10 +6,10 @@
WalletTestingSetup::WalletTestingSetup(const std::string& chainName)
: TestingSetup(chainName),
- m_wallet(m_chain.get(), "", CreateMockWalletDatabase())
+ m_wallet(m_node.chain.get(), "", CreateMockWalletDatabase())
{
bool fFirstRun;
m_wallet.LoadWallet(fFirstRun);
- m_chain_notifications_handler = m_chain->handleNotifications({ &m_wallet, [](CWallet*) {} });
+ m_chain_notifications_handler = m_node.chain->handleNotifications({ &m_wallet, [](CWallet*) {} });
m_wallet_client->registerRpcs();
}
diff --git a/src/wallet/test/wallet_test_fixture.h b/src/wallet/test/wallet_test_fixture.h
index ba8a5ff1f3..ab7fb8c42b 100644
--- a/src/wallet/test/wallet_test_fixture.h
+++ b/src/wallet/test/wallet_test_fixture.h
@@ -20,8 +20,7 @@
struct WalletTestingSetup : public TestingSetup {
explicit WalletTestingSetup(const std::string& chainName = CBaseChainParams::MAIN);
- std::unique_ptr<interfaces::Chain> m_chain = interfaces::MakeChain(m_node);
- std::unique_ptr<interfaces::WalletClient> m_wallet_client = interfaces::MakeWalletClient(*m_chain, *Assert(m_node.args));
+ std::unique_ptr<interfaces::WalletClient> m_wallet_client = interfaces::MakeWalletClient(*m_node.chain, *Assert(m_node.args));
CWallet m_wallet;
std::unique_ptr<interfaces::Handler> m_chain_notifications_handler;
};
diff --git a/src/wallet/test/wallet_tests.cpp b/src/wallet/test/wallet_tests.cpp
index 4911af08c6..a6db261914 100644
--- a/src/wallet/test/wallet_tests.cpp
+++ b/src/wallet/test/wallet_tests.cpp
@@ -28,6 +28,8 @@ RPCHelpMan importmulti();
RPCHelpMan dumpwallet();
RPCHelpMan importwallet();
+extern RecursiveMutex cs_wallets;
+
// Ensure that fee levels defined in the wallet are at least as high
// as the default levels for node policy.
static_assert(DEFAULT_TRANSACTION_MINFEE >= DEFAULT_MIN_RELAY_TX_FEE, "wallet minimum fee is smaller than default relay fee");
@@ -83,12 +85,9 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
CBlockIndex* newTip = ::ChainActive().Tip();
- NodeContext node;
- auto chain = interfaces::MakeChain(node);
-
// Verify ScanForWalletTransactions fails to read an unknown start block.
{
- CWallet wallet(chain.get(), "", CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
LOCK(wallet.cs_wallet);
wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash());
@@ -107,7 +106,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Verify ScanForWalletTransactions picks up transactions in both the old
// and new block files.
{
- CWallet wallet(chain.get(), "", CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
LOCK(wallet.cs_wallet);
wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash());
@@ -133,7 +132,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Verify ScanForWalletTransactions only picks transactions in the new block
// file.
{
- CWallet wallet(chain.get(), "", CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
LOCK(wallet.cs_wallet);
wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash());
@@ -158,7 +157,7 @@ BOOST_FIXTURE_TEST_CASE(scan_for_wallet_transactions, TestChain100Setup)
// Verify ScanForWalletTransactions scans no blocks.
{
- CWallet wallet(chain.get(), "", CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
LOCK(wallet.cs_wallet);
wallet.SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash());
@@ -183,9 +182,6 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
CBlockIndex* newTip = ::ChainActive().Tip();
- NodeContext node;
- auto chain = interfaces::MakeChain(node);
-
// Prune the older block file.
{
LOCK(cs_main);
@@ -197,7 +193,7 @@ BOOST_FIXTURE_TEST_CASE(importmulti_rescan, TestChain100Setup)
// before the missing block, and success for a key whose creation time is
// after.
{
- std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), "", CreateDummyWalletDatabase());
+ std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
wallet->SetupLegacyScriptPubKeyMan();
WITH_LOCK(wallet->cs_wallet, wallet->SetLastBlockProcessed(newTip->nHeight, newTip->GetBlockHash()));
AddWallet(wallet);
@@ -255,14 +251,11 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
SetMockTime(KEY_TIME);
m_coinbase_txns.emplace_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
- NodeContext node;
- auto chain = interfaces::MakeChain(node);
-
std::string backup_file = (GetDataDir() / "wallet.backup").string();
// Import key into wallet and call dumpwallet to create backup file.
{
- std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), "", CreateDummyWalletDatabase());
+ std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
{
auto spk_man = wallet->GetOrCreateLegacyScriptPubKeyMan();
LOCK2(wallet->cs_wallet, spk_man->cs_KeyStore);
@@ -284,7 +277,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
// Call importwallet RPC and verify all blocks with timestamps >= BLOCK_TIME
// were scanned, and no prior blocks were scanned.
{
- std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), "", CreateDummyWalletDatabase());
+ std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
LOCK(wallet->cs_wallet);
wallet->SetupLegacyScriptPubKeyMan();
@@ -317,10 +310,7 @@ BOOST_FIXTURE_TEST_CASE(importwallet_rescan, TestChain100Setup)
// debit functions.
BOOST_FIXTURE_TEST_CASE(coin_mark_dirty_immature_credit, TestChain100Setup)
{
- NodeContext node;
- auto chain = interfaces::MakeChain(node);
-
- CWallet wallet(chain.get(), "", CreateDummyWalletDatabase());
+ CWallet wallet(m_node.chain.get(), "", CreateDummyWalletDatabase());
auto spk_man = wallet.GetOrCreateLegacyScriptPubKeyMan();
CWalletTx wtx(&wallet, m_coinbase_txns.back());
@@ -495,7 +485,7 @@ public:
ListCoinsTestingSetup()
{
CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey()));
- wallet = MakeUnique<CWallet>(m_chain.get(), "", CreateMockWalletDatabase());
+ wallet = MakeUnique<CWallet>(m_node.chain.get(), "", CreateMockWalletDatabase());
{
LOCK2(wallet->cs_wallet, ::cs_main);
wallet->SetLastBlockProcessed(::ChainActive().Height(), ::ChainActive().Tip()->GetBlockHash());
@@ -545,7 +535,6 @@ public:
return it->second;
}
- std::unique_ptr<interfaces::Chain> m_chain = interfaces::MakeChain(m_node);
std::unique_ptr<CWallet> wallet;
};
@@ -612,9 +601,7 @@ BOOST_FIXTURE_TEST_CASE(ListCoins, ListCoinsTestingSetup)
BOOST_FIXTURE_TEST_CASE(wallet_disableprivkeys, TestChain100Setup)
{
- NodeContext node;
- auto chain = interfaces::MakeChain(node);
- std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(chain.get(), "", CreateDummyWalletDatabase());
+ std::shared_ptr<CWallet> wallet = std::make_shared<CWallet>(m_node.chain.get(), "", CreateDummyWalletDatabase());
wallet->SetupLegacyScriptPubKeyMan();
wallet->SetMinVersion(FEATURE_LATEST);
wallet->SetWalletFlag(WALLET_FLAG_DISABLE_PRIVATE_KEYS);
@@ -709,8 +696,7 @@ BOOST_FIXTURE_TEST_CASE(wallet_descriptor_test, BasicTestingSetup)
BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
{
// Create new wallet with known key and unload it.
- auto chain = interfaces::MakeChain(m_node);
- auto wallet = TestLoadWallet(*chain);
+ auto wallet = TestLoadWallet(*m_node.chain);
CKey key;
key.MakeNewKey(true);
AddKey(*wallet, key);
@@ -745,12 +731,12 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
auto block_tx = TestSimpleSpend(*m_coinbase_txns[0], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey()));
m_coinbase_txns.push_back(CreateAndProcessBlock({block_tx}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
auto mempool_tx = TestSimpleSpend(*m_coinbase_txns[1], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey()));
- BOOST_CHECK(chain->broadcastTransaction(MakeTransactionRef(mempool_tx), DEFAULT_TRANSACTION_MAXFEE, false, error));
+ BOOST_CHECK(m_node.chain->broadcastTransaction(MakeTransactionRef(mempool_tx), DEFAULT_TRANSACTION_MAXFEE, false, error));
// Reload wallet and make sure new transactions are detected despite events
// being blocked
- wallet = TestLoadWallet(*chain);
+ wallet = TestLoadWallet(*m_node.chain);
BOOST_CHECK(rescan_completed);
BOOST_CHECK_EQUAL(addtx_count, 2);
{
@@ -777,18 +763,20 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
// deadlock during the sync and simulates a new block notification happening
// as soon as possible.
addtx_count = 0;
- auto handler = HandleLoadWallet([&](std::unique_ptr<interfaces::Wallet> wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet->wallet()->cs_wallet) {
+ auto handler = HandleLoadWallet([&](std::unique_ptr<interfaces::Wallet> wallet) EXCLUSIVE_LOCKS_REQUIRED(wallet->wallet()->cs_wallet, cs_wallets) {
BOOST_CHECK(rescan_completed);
m_coinbase_txns.push_back(CreateAndProcessBlock({}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
block_tx = TestSimpleSpend(*m_coinbase_txns[2], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey()));
m_coinbase_txns.push_back(CreateAndProcessBlock({block_tx}, GetScriptForRawPubKey(coinbaseKey.GetPubKey())).vtx[0]);
mempool_tx = TestSimpleSpend(*m_coinbase_txns[3], 0, coinbaseKey, GetScriptForRawPubKey(key.GetPubKey()));
- BOOST_CHECK(chain->broadcastTransaction(MakeTransactionRef(mempool_tx), DEFAULT_TRANSACTION_MAXFEE, false, error));
+ BOOST_CHECK(m_node.chain->broadcastTransaction(MakeTransactionRef(mempool_tx), DEFAULT_TRANSACTION_MAXFEE, false, error));
+ LEAVE_CRITICAL_SECTION(cs_wallets);
LEAVE_CRITICAL_SECTION(wallet->wallet()->cs_wallet);
SyncWithValidationInterfaceQueue();
ENTER_CRITICAL_SECTION(wallet->wallet()->cs_wallet);
+ ENTER_CRITICAL_SECTION(cs_wallets);
});
- wallet = TestLoadWallet(*chain);
+ wallet = TestLoadWallet(*m_node.chain);
BOOST_CHECK_EQUAL(addtx_count, 4);
{
LOCK(wallet->cs_wallet);
@@ -802,8 +790,7 @@ BOOST_FIXTURE_TEST_CASE(CreateWallet, TestChain100Setup)
BOOST_FIXTURE_TEST_CASE(ZapSelectTx, TestChain100Setup)
{
- auto chain = interfaces::MakeChain(m_node);
- auto wallet = TestLoadWallet(*chain);
+ auto wallet = TestLoadWallet(*m_node.chain);
CKey key;
key.MakeNewKey(true);
AddKey(*wallet, key);
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index 330559d0dd..3e37491a23 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -52,7 +52,7 @@ const std::map<uint64_t,std::string> WALLET_FLAG_CAVEATS{
static const size_t OUTPUT_GROUP_MAX_ENTRIES = 10;
-static RecursiveMutex cs_wallets;
+RecursiveMutex cs_wallets;
static std::vector<std::shared_ptr<CWallet>> vpwallets GUARDED_BY(cs_wallets);
static std::list<LoadWalletFn> g_load_wallet_fns GUARDED_BY(cs_wallets);
@@ -946,11 +946,12 @@ bool CWallet::LoadToWallet(const uint256& hash, const UpdateWalletTxFn& fill_wtx
}
// If wallet doesn't have a chain (e.g wallet-tool), don't bother to update txn.
if (HaveChain()) {
- Optional<int> block_height = chain().getBlockHeight(wtx.m_confirm.hashBlock);
- if (block_height) {
+ bool active;
+ int height;
+ if (chain().findBlock(wtx.m_confirm.hashBlock, FoundBlock().inActiveChain(active).height(height)) && active) {
// Update cached block height variable since it not stored in the
// serialized transaction.
- wtx.m_confirm.block_height = *block_height;
+ wtx.m_confirm.block_height = height;
} else if (wtx.isConflicted() || wtx.isConfirmed()) {
// If tx block (or conflicting block) was reorged out of chain
// while the wallet was shutdown, change tx status to UNCONFIRMED
@@ -1771,18 +1772,22 @@ CWallet::ScanResult CWallet::ScanForWalletTransactions(const uint256& start_bloc
WalletLogPrintf("Still rescanning. At block %d. Progress=%f\n", block_height, progress_current);
}
+ // Read block data
CBlock block;
- bool next_block;
+ chain().findBlock(block_hash, FoundBlock().data(block));
+
+ // Find next block separately from reading data above, because reading
+ // is slow and there might be a reorg while it is read.
+ bool block_still_active = false;
+ bool next_block = false;
uint256 next_block_hash;
- bool reorg = false;
- if (chain().findBlock(block_hash, FoundBlock().data(block)) && !block.IsNull()) {
+ chain().findBlock(block_hash, FoundBlock().inActiveChain(block_still_active).nextBlock(FoundBlock().inActiveChain(next_block).hash(next_block_hash)));
+
+ if (!block.IsNull()) {
LOCK(cs_wallet);
- next_block = chain().findNextBlock(block_hash, block_height, FoundBlock().hash(next_block_hash), &reorg);
- if (reorg) {
+ if (!block_still_active) {
// Abort scan if current block is no longer active, to prevent
// marking transactions as coming from the wrong block.
- // TODO: This should return success instead of failure, see
- // https://github.com/bitcoin/bitcoin/pull/14711#issuecomment-458342518
result.last_failed_block = block_hash;
result.status = ScanResult::FAILURE;
break;
@@ -1797,13 +1802,12 @@ CWallet::ScanResult CWallet::ScanForWalletTransactions(const uint256& start_bloc
// could not scan block, keep scanning but record this block as the most recent failure
result.last_failed_block = block_hash;
result.status = ScanResult::FAILURE;
- next_block = chain().findNextBlock(block_hash, block_height, FoundBlock().hash(next_block_hash), &reorg);
}
if (max_height && block_height >= *max_height) {
break;
}
{
- if (!next_block || reorg) {
+ if (!next_block) {
// break successfully when rescan has reached the tip, or
// previous block is no longer on the chain due to a reorg
break;
@@ -4059,9 +4063,7 @@ std::shared_ptr<CWallet> CWallet::Create(interfaces::Chain& chain, const std::st
if (!time_first_key || time < *time_first_key) time_first_key = time;
}
if (time_first_key) {
- if (Optional<int> first_block = chain.findFirstBlockWithTimeAndHeight(*time_first_key - TIMESTAMP_WINDOW, rescan_height, nullptr)) {
- rescan_height = *first_block;
- }
+ chain.findFirstBlockWithTimeAndHeight(*time_first_key - TIMESTAMP_WINDOW, rescan_height, FoundBlock().height(rescan_height));
}
{
diff --git a/src/wallet/walletdb.cpp b/src/wallet/walletdb.cpp
index c0521d3386..5b72a01939 100644
--- a/src/wallet/walletdb.cpp
+++ b/src/wallet/walletdb.cpp
@@ -1013,13 +1013,10 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
Optional<DatabaseFormat> format;
if (exists) {
-#ifdef USE_BDB
- if (ExistsBerkeleyDatabase(path)) {
+ if (IsBDBFile(BDBDataFile(path))) {
format = DatabaseFormat::BERKELEY;
}
-#endif
-#ifdef USE_SQLITE
- if (ExistsSQLiteDatabase(path)) {
+ if (IsSQLiteFile(SQLiteDataFile(path))) {
if (format) {
error = Untranslated(strprintf("Failed to load database path '%s'. Data is in ambiguous format.", path.string()));
status = DatabaseStatus::FAILED_BAD_FORMAT;
@@ -1027,7 +1024,6 @@ std::unique_ptr<WalletDatabase> MakeDatabase(const fs::path& path, const Databas
}
format = DatabaseFormat::SQLITE;
}
-#endif
} else if (options.require_existing) {
error = Untranslated(strprintf("Failed to load database path '%s'. Path does not exist.", path.string()));
status = DatabaseStatus::FAILED_NOT_FOUND;
diff --git a/src/wallet/walletutil.cpp b/src/wallet/walletutil.cpp
index d6e6f015db..16ddad3a84 100644
--- a/src/wallet/walletutil.cpp
+++ b/src/wallet/walletutil.cpp
@@ -7,17 +7,6 @@
#include <logging.h>
#include <util/system.h>
-#ifdef USE_BDB
-bool ExistsBerkeleyDatabase(const fs::path& path);
-#else
-# define ExistsBerkeleyDatabase(path) (false)
-#endif
-#ifdef USE_SQLITE
-bool ExistsSQLiteDatabase(const fs::path& path);
-#else
-# define ExistsSQLiteDatabase(path) (false)
-#endif
-
fs::path GetWalletDir()
{
fs::path path;
@@ -40,50 +29,6 @@ fs::path GetWalletDir()
return path;
}
-std::vector<fs::path> ListWalletDir()
-{
- const fs::path wallet_dir = GetWalletDir();
- const size_t offset = wallet_dir.string().size() + 1;
- std::vector<fs::path> paths;
- boost::system::error_code ec;
-
- for (auto it = fs::recursive_directory_iterator(wallet_dir, ec); it != fs::recursive_directory_iterator(); it.increment(ec)) {
- if (ec) {
- LogPrintf("%s: %s %s\n", __func__, ec.message(), it->path().string());
- continue;
- }
-
- try {
- // Get wallet path relative to walletdir by removing walletdir from the wallet path.
- // This can be replaced by boost::filesystem::lexically_relative once boost is bumped to 1.60.
- const fs::path path = it->path().string().substr(offset);
-
- if (it->status().type() == fs::directory_file &&
- (ExistsBerkeleyDatabase(it->path()) || ExistsSQLiteDatabase(it->path()))) {
- // Found a directory which contains wallet.dat btree file, add it as a wallet.
- paths.emplace_back(path);
- } else if (it.level() == 0 && it->symlink_status().type() == fs::regular_file && ExistsBerkeleyDatabase(it->path())) {
- if (it->path().filename() == "wallet.dat") {
- // Found top-level wallet.dat btree file, add top level directory ""
- // as a wallet.
- paths.emplace_back();
- } else {
- // Found top-level btree file not called wallet.dat. Current bitcoin
- // software will never create these files but will allow them to be
- // opened in a shared database environment for backwards compatibility.
- // Add it to the list of available wallets.
- paths.emplace_back(path);
- }
- }
- } catch (const std::exception& e) {
- LogPrintf("%s: Error scanning %s: %s\n", __func__, it->path().string(), e.what());
- it.no_push();
- }
- }
-
- return paths;
-}
-
bool IsFeatureSupported(int wallet_version, int feature_version)
{
return wallet_version >= feature_version;
diff --git a/src/wallet/walletutil.h b/src/wallet/walletutil.h
index 27521abd81..d4143ceff4 100644
--- a/src/wallet/walletutil.h
+++ b/src/wallet/walletutil.h
@@ -65,9 +65,6 @@ enum WalletFlags : uint64_t {
//! Get the path of the wallet directory.
fs::path GetWalletDir();
-//! Get wallets in wallet directory.
-std::vector<fs::path> ListWalletDir();
-
/** Descriptor with some wallet metadata */
class WalletDescriptor
{
diff --git a/test/functional/feature_fee_estimation.py b/test/functional/feature_fee_estimation.py
index 8a8a0c7614..8f522aee66 100755
--- a/test/functional/feature_fee_estimation.py
+++ b/test/functional/feature_fee_estimation.py
@@ -13,6 +13,7 @@ from test_framework.util import (
assert_equal,
assert_greater_than,
assert_greater_than_or_equal,
+ assert_raises_rpc_error,
satoshi_round,
)
@@ -262,6 +263,11 @@ class EstimateFeeTest(BitcoinTestFramework):
self.log.info("Final estimates after emptying mempools")
check_estimates(self.nodes[1], self.fees_per_kb)
+ self.log.info("Testing that fee estimation is disabled in blocksonly.")
+ self.restart_node(0, ["-blocksonly"])
+ assert_raises_rpc_error(-32603, "Fee estimation disabled",
+ self.nodes[0].estimatesmartfee, 2)
+
if __name__ == '__main__':
EstimateFeeTest().main()
diff --git a/test/functional/feature_taproot.py b/test/functional/feature_taproot.py
index 116eb7e3d7..6ee2b72c11 100755
--- a/test/functional/feature_taproot.py
+++ b/test/functional/feature_taproot.py
@@ -1444,6 +1444,10 @@ class TaprootTest(BitcoinTestFramework):
self.nodes[1].generate(101)
self.test_spenders(self.nodes[1], spenders_taproot_active(), input_counts=[1, 2, 2, 2, 2, 3])
+ # Re-connect nodes in case they have been disconnected
+ self.disconnect_nodes(0, 1)
+ self.connect_nodes(0, 1)
+
# Transfer value of the largest 500 coins to pre-taproot node.
addr = self.nodes[0].getnewaddress()
diff --git a/test/functional/p2p_compactblocks.py b/test/functional/p2p_compactblocks.py
index 611bffb25f..9a9df73049 100755
--- a/test/functional/p2p_compactblocks.py
+++ b/test/functional/p2p_compactblocks.py
@@ -764,6 +764,34 @@ class CompactBlocksTest(BitcoinTestFramework):
stalling_peer.send_and_ping(msg)
assert_equal(int(node.getbestblockhash(), 16), block.sha256)
+ def test_highbandwidth_mode_states_via_getpeerinfo(self):
+ # create new p2p connection for a fresh state w/o any prior sendcmpct messages sent
+ hb_test_node = self.nodes[0].add_p2p_connection(TestP2PConn(cmpct_version=2))
+
+ # assert the RPC getpeerinfo boolean fields `bip152_hb_{to, from}`
+ # match the given parameters for the last peer of a given node
+ def assert_highbandwidth_states(node, hb_to, hb_from):
+ peerinfo = node.getpeerinfo()[-1]
+ assert_equal(peerinfo['bip152_hb_to'], hb_to)
+ assert_equal(peerinfo['bip152_hb_from'], hb_from)
+
+ # initially, neither node has selected the other peer as high-bandwidth yet
+ assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=False)
+
+ # peer requests high-bandwidth mode by sending sendcmpct(1)
+ hb_test_node.send_and_ping(msg_sendcmpct(announce=True, version=2))
+ assert_highbandwidth_states(self.nodes[0], hb_to=False, hb_from=True)
+
+ # peer generates a block and sends it to node, which should
+ # select the peer as high-bandwidth (up to 3 peers according to BIP 152)
+ block = self.build_block_on_tip(self.nodes[0])
+ hb_test_node.send_and_ping(msg_block(block))
+ assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=True)
+
+ # peer requests low-bandwidth mode by sending sendcmpct(0)
+ hb_test_node.send_and_ping(msg_sendcmpct(announce=False, version=2))
+ assert_highbandwidth_states(self.nodes[0], hb_to=True, hb_from=False)
+
def run_test(self):
# Get the nodes out of IBD
self.nodes[0].generate(1)
@@ -822,6 +850,9 @@ class CompactBlocksTest(BitcoinTestFramework):
self.log.info("Testing invalid index in cmpctblock message...")
self.test_invalid_cmpctblock_message()
+ self.log.info("Testing high-bandwidth mode states via getpeerinfo...")
+ self.test_highbandwidth_mode_states_via_getpeerinfo()
+
if __name__ == '__main__':
CompactBlocksTest().main()
diff --git a/test/functional/p2p_invalid_messages.py b/test/functional/p2p_invalid_messages.py
index db72a361d9..c0b3c2cb12 100755
--- a/test/functional/p2p_invalid_messages.py
+++ b/test/functional/p2p_invalid_messages.py
@@ -18,6 +18,7 @@ from test_framework.messages import (
msg_inv,
msg_ping,
MSG_TX,
+ msg_version,
ser_string,
)
from test_framework.p2p import (
@@ -60,6 +61,7 @@ class InvalidMessagesTest(BitcoinTestFramework):
def run_test(self):
self.test_buffer()
+ self.test_duplicate_version_msg()
self.test_magic_bytes()
self.test_checksum()
self.test_size()
@@ -92,6 +94,13 @@ class InvalidMessagesTest(BitcoinTestFramework):
conn.sync_with_ping(timeout=1)
self.nodes[0].disconnect_p2ps()
+ def test_duplicate_version_msg(self):
+ self.log.info("Test duplicate version message is ignored")
+ conn = self.nodes[0].add_p2p_connection(P2PDataStore())
+ with self.nodes[0].assert_debug_log(['redundant version message from peer']):
+ conn.send_and_ping(msg_version())
+ self.nodes[0].disconnect_p2ps()
+
def test_magic_bytes(self):
self.log.info("Test message with invalid magic bytes disconnects peer")
conn = self.nodes[0].add_p2p_connection(P2PDataStore())
diff --git a/test/functional/p2p_leak.py b/test/functional/p2p_leak.py
index 4b32d60db0..ca8bf908a9 100755
--- a/test/functional/p2p_leak.py
+++ b/test/functional/p2p_leak.py
@@ -24,8 +24,6 @@ from test_framework.util import (
assert_greater_than_or_equal,
)
-DISCOURAGEMENT_THRESHOLD = 100
-
class LazyPeer(P2PInterface):
def __init__(self):
@@ -93,27 +91,16 @@ class P2PLeakTest(BitcoinTestFramework):
self.num_nodes = 1
def run_test(self):
- # Peer that never sends a version. We will send a bunch of messages
- # from this peer anyway and verify eventual disconnection.
- no_version_disconnect_peer = self.nodes[0].add_p2p_connection(
- LazyPeer(), send_version=False, wait_for_verack=False)
-
# Another peer that never sends a version, nor any other messages. It shouldn't receive anything from the node.
no_version_idle_peer = self.nodes[0].add_p2p_connection(LazyPeer(), send_version=False, wait_for_verack=False)
# Peer that sends a version but not a verack.
no_verack_idle_peer = self.nodes[0].add_p2p_connection(NoVerackIdlePeer(), wait_for_verack=False)
- # Send enough ping messages (any non-version message will do) prior to sending
- # version to reach the peer discouragement threshold. This should get us disconnected.
- for _ in range(DISCOURAGEMENT_THRESHOLD):
- no_version_disconnect_peer.send_message(msg_ping())
-
# Wait until we got the verack in response to the version. Though, don't wait for the node to receive the
# verack, since we never sent one
no_verack_idle_peer.wait_for_verack()
- no_version_disconnect_peer.wait_until(lambda: no_version_disconnect_peer.ever_connected, check_connected=False)
no_version_idle_peer.wait_until(lambda: no_version_idle_peer.ever_connected)
no_verack_idle_peer.wait_until(lambda: no_verack_idle_peer.version_received)
@@ -123,13 +110,9 @@ class P2PLeakTest(BitcoinTestFramework):
#Give the node enough time to possibly leak out a message
time.sleep(5)
- # Expect this peer to be disconnected for misbehavior
- assert not no_version_disconnect_peer.is_connected
-
self.nodes[0].disconnect_p2ps()
# Make sure no unexpected messages came in
- assert no_version_disconnect_peer.unexpected_msg == False
assert no_version_idle_peer.unexpected_msg == False
assert no_verack_idle_peer.unexpected_msg == False
@@ -148,7 +131,7 @@ class P2PLeakTest(BitcoinTestFramework):
p2p_old_peer = self.nodes[0].add_p2p_connection(P2PInterface(), send_version=False, wait_for_verack=False)
old_version_msg = msg_version()
old_version_msg.nVersion = 31799
- with self.nodes[0].assert_debug_log(['peer=4 using obsolete version 31799; disconnecting']):
+ with self.nodes[0].assert_debug_log(['peer=3 using obsolete version 31799; disconnecting']):
p2p_old_peer.send_message(old_version_msg)
p2p_old_peer.wait_for_disconnect()
diff --git a/test/functional/p2p_timeouts.py b/test/functional/p2p_timeouts.py
index ce12ce26ce..47832b04bf 100755
--- a/test/functional/p2p_timeouts.py
+++ b/test/functional/p2p_timeouts.py
@@ -57,8 +57,10 @@ class TimeoutsTest(BitcoinTestFramework):
assert no_version_node.is_connected
assert no_send_node.is_connected
- no_verack_node.send_message(msg_ping())
- no_version_node.send_message(msg_ping())
+ with self.nodes[0].assert_debug_log(['Unsupported message "ping" prior to verack from peer=0']):
+ no_verack_node.send_message(msg_ping())
+ with self.nodes[0].assert_debug_log(['non-version message before version handshake. Message "ping" from peer=1']):
+ no_version_node.send_message(msg_ping())
sleep(1)
diff --git a/test/functional/rpc_estimatefee.py b/test/functional/rpc_estimatefee.py
index 3b76c7dd1e..81862ac69e 100755
--- a/test/functional/rpc_estimatefee.py
+++ b/test/functional/rpc_estimatefee.py
@@ -41,6 +41,8 @@ class EstimateFeeTest(BitcoinTestFramework):
self.nodes[0].estimatesmartfee(1)
# self.nodes[0].estimatesmartfee(1, None)
self.nodes[0].estimatesmartfee(1, 'ECONOMICAL')
+ self.nodes[0].estimatesmartfee(1, 'unset')
+ self.nodes[0].estimatesmartfee(1, 'conservative')
self.nodes[0].estimaterawfee(1)
self.nodes[0].estimaterawfee(1, None)
diff --git a/test/functional/rpc_fundrawtransaction.py b/test/functional/rpc_fundrawtransaction.py
index 8ee0ecab0a..569471dc87 100755
--- a/test/functional/rpc_fundrawtransaction.py
+++ b/test/functional/rpc_fundrawtransaction.py
@@ -717,10 +717,10 @@ class RawTransactionsTest(BitcoinTestFramework):
result = node.fundrawtransaction(rawtx) # uses self.min_relay_tx_fee (set by settxfee)
btc_kvb_to_sat_vb = 100000 # (1e5)
- result1 = node.fundrawtransaction(rawtx, {"fee_rate": 2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee})
+ result1 = node.fundrawtransaction(rawtx, {"fee_rate": str(2 * btc_kvb_to_sat_vb * self.min_relay_tx_fee)})
result2 = node.fundrawtransaction(rawtx, {"feeRate": 2 * self.min_relay_tx_fee})
result3 = node.fundrawtransaction(rawtx, {"fee_rate": 10 * btc_kvb_to_sat_vb * self.min_relay_tx_fee})
- result4 = node.fundrawtransaction(rawtx, {"feeRate": 10 * self.min_relay_tx_fee})
+ result4 = node.fundrawtransaction(rawtx, {"feeRate": str(10 * self.min_relay_tx_fee)})
# Test that funding non-standard "zero-fee" transactions is valid.
result5 = self.nodes[3].fundrawtransaction(rawtx, {"fee_rate": 0})
result6 = self.nodes[3].fundrawtransaction(rawtx, {"feeRate": 0})
diff --git a/test/functional/rpc_psbt.py b/test/functional/rpc_psbt.py
index 5840801b00..b364077a9a 100755
--- a/test/functional/rpc_psbt.py
+++ b/test/functional/rpc_psbt.py
@@ -190,11 +190,11 @@ class PSBTTest(BitcoinTestFramework):
self.log.info("Test walletcreatefundedpsbt fee rate of 10000 sat/vB and 0.1 BTC/kvB produces a total fee at or slightly below -maxtxfee (~0.05290000)")
res1 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 10000, "add_inputs": True})
assert_approx(res1["fee"], 0.055, 0.005)
- res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.1, "add_inputs": True})
+ res2 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": "0.1", "add_inputs": True})
assert_approx(res2["fee"], 0.055, 0.005)
self.log.info("Test min fee rate checks with walletcreatefundedpsbt are bypassed, e.g. a fee_rate under 1 sat/vB is allowed")
- res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": 0.99999999, "add_inputs": True})
+ res3 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"fee_rate": "0.99999999", "add_inputs": True})
assert_approx(res3["fee"], 0.00000381, 0.0000001)
res4 = self.nodes[1].walletcreatefundedpsbt(inputs, outputs, 0, {"feeRate": 0.00000999, "add_inputs": True})
assert_approx(res4["fee"], 0.00000381, 0.0000001)
diff --git a/test/functional/rpc_rawtransaction.py b/test/functional/rpc_rawtransaction.py
index 554c30c0d2..60e66a27c9 100755
--- a/test/functional/rpc_rawtransaction.py
+++ b/test/functional/rpc_rawtransaction.py
@@ -372,6 +372,13 @@ class RawTransactionsTest(BitcoinTestFramework):
encrawtx = "01000000010000000000000072c1a6a246ae63f74f931e8365e15a089c68d61900000000000000000000ffffffff0100e1f505000000000000000000"
decrawtx = self.nodes[0].decoderawtransaction(encrawtx, False) # decode as non-witness transaction
assert_equal(decrawtx['vout'][0]['value'], Decimal('1.00000000'))
+ # known ambiguous transaction in the chain (see https://github.com/bitcoin/bitcoin/issues/20579)
+ encrawtx = "020000000001010000000000000000000000000000000000000000000000000000000000000000ffffffff4b03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000ffffffff03f4c1fb4b0000000016001497cfc76442fe717f2a3f0cc9c175f7561b6619970000000000000000266a24aa21a9ed957d1036a80343e0d1b659497e1b48a38ebe876a056d45965fac4a85cda84e1900000000000000002952534b424c4f434b3a8e092581ab01986cbadc84f4b43f4fa4bb9e7a2e2a0caf9b7cf64d939028e22c0120000000000000000000000000000000000000000000000000000000000000000000000000"
+ decrawtx = self.nodes[0].decoderawtransaction(encrawtx)
+ decrawtx_wit = self.nodes[0].decoderawtransaction(encrawtx, True)
+ assert_raises_rpc_error(-22, 'TX decode failed', self.nodes[0].decoderawtransaction, encrawtx, False) # fails to decode as non-witness transaction
+ assert_equal(decrawtx, decrawtx_wit) # the witness interpretation should be chosen
+ assert_equal(decrawtx['vin'][0]['coinbase'], "03c68708046ff8415c622f4254432e434f4d2ffabe6d6de1965d02c68f928e5b244ab1965115a36f56eb997633c7f690124bbf43644e23080000000ca3d3af6d005a65ff0200fd00000000")
# Basic signrawtransaction test
addr = self.nodes[1].getnewaddress()
diff --git a/test/functional/test_framework/messages.py b/test/functional/test_framework/messages.py
index ff7f73bdf4..bab4ad0008 100755
--- a/test/functional/test_framework/messages.py
+++ b/test/functional/test_framework/messages.py
@@ -51,7 +51,6 @@ MAX_HEADERS_RESULTS = 2000 # Number of headers sent in one getheaders result
MAX_INV_SIZE = 50000 # Maximum number of entries in an 'inv' protocol message
NODE_NETWORK = (1 << 0)
-NODE_GETUTXO = (1 << 1)
NODE_BLOOM = (1 << 2)
NODE_WITNESS = (1 << 3)
NODE_COMPACT_FILTERS = (1 << 6)
diff --git a/test/functional/test_framework/p2p.py b/test/functional/test_framework/p2p.py
index 6846d31221..8b79a4dc2f 100755
--- a/test/functional/test_framework/p2p.py
+++ b/test/functional/test_framework/p2p.py
@@ -396,9 +396,9 @@ class P2PInterface(P2PConnection):
assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
if message.nVersion >= 70016:
self.send_message(msg_wtxidrelay())
- self.send_message(msg_verack())
if self.support_addrv2:
self.send_message(msg_sendaddrv2())
+ self.send_message(msg_verack())
self.nServices = message.nServices
# Connection helper methods
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 0a5b7f551c..a618706a77 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -482,11 +482,8 @@ class TestNode():
tempfile.NamedTemporaryFile(dir=self.stdout_dir, delete=False) as log_stdout:
try:
self.start(extra_args, stdout=log_stdout, stderr=log_stderr, *args, **kwargs)
- self.wait_for_rpc_connection()
- self.stop_node()
- self.wait_until_stopped()
- except FailedToStartError as e:
- self.log.debug('bitcoind failed to start: %s', e)
+ ret = self.process.wait(timeout=self.rpc_timeout)
+ self.log.debug(self._node_msg(f'bitcoind exited with status {ret} during initialization'))
self.running = False
self.process = None
# Check stderr for expected message
@@ -505,11 +502,15 @@ class TestNode():
if expected_msg != stderr:
self._raise_assertion_error(
'Expected message "{}" does not fully match stderr:\n"{}"'.format(expected_msg, stderr))
- else:
+ except subprocess.TimeoutExpired:
+ self.process.kill()
+ self.running = False
+ self.process = None
+ assert_msg = f'bitcoind should have exited within {self.rpc_timeout}s '
if expected_msg is None:
- assert_msg = "bitcoind should have exited with an error"
+ assert_msg += "with an error"
else:
- assert_msg = "bitcoind should have exited with expected error " + expected_msg
+ assert_msg += "with expected error " + expected_msg
self._raise_assertion_error(assert_msg)
def add_p2p_connection(self, p2p_conn, *, wait_for_verack=True, **kwargs):
diff --git a/test/functional/wallet_basic.py b/test/functional/wallet_basic.py
index ac4a6e4948..3cbddaf6da 100755
--- a/test/functional/wallet_basic.py
+++ b/test/functional/wallet_basic.py
@@ -235,7 +235,8 @@ class WalletTest(BitcoinTestFramework):
fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
explicit_fee_rate_btc_kvb = Decimal(fee_rate_btc_kvb) / 1000
- txid = self.nodes[2].sendmany(amounts={address: 10}, fee_rate=fee_rate_sat_vb)
+ # Test passing fee_rate as a string
+ txid = self.nodes[2].sendmany(amounts={address: 10}, fee_rate=str(fee_rate_sat_vb))
self.nodes[2].generate(1)
self.sync_all(self.nodes[0:3])
balance = self.nodes[2].getbalance()
@@ -244,6 +245,17 @@ class WalletTest(BitcoinTestFramework):
node_0_bal += Decimal('10')
assert_equal(self.nodes[0].getbalance(), node_0_bal)
+ # Test passing fee_rate as an integer
+ amount = Decimal("0.0001")
+ txid = self.nodes[2].sendmany(amounts={address: amount}, fee_rate=fee_rate_sat_vb)
+ self.nodes[2].generate(1)
+ self.sync_all(self.nodes[0:3])
+ balance = self.nodes[2].getbalance()
+ node_2_bal = self.check_fee_amount(balance, node_2_bal - amount, explicit_fee_rate_btc_kvb, self.get_vsize(self.nodes[2].gettransaction(txid)['hex']))
+ assert_equal(balance, node_2_bal)
+ node_0_bal += amount
+ assert_equal(self.nodes[0].getbalance(), node_0_bal)
+
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-8, "Unknown named parameter key", self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=1, key=1)
@@ -405,7 +417,7 @@ class WalletTest(BitcoinTestFramework):
amount = 3
fee_rate_sat_vb = 2
fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
-
+ # Test passing fee_rate as an integer
txid = self.nodes[2].sendtoaddress(address=address, amount=amount, fee_rate=fee_rate_sat_vb)
tx_size = self.get_vsize(self.nodes[2].gettransaction(txid)['hex'])
self.nodes[0].generate(1)
@@ -414,6 +426,19 @@ class WalletTest(BitcoinTestFramework):
fee = prebalance - postbalance - Decimal(amount)
assert_fee_amount(fee, tx_size, Decimal(fee_rate_btc_kvb))
+ prebalance = self.nodes[2].getbalance()
+ amount = Decimal("0.001")
+ fee_rate_sat_vb = 1.23
+ fee_rate_btc_kvb = fee_rate_sat_vb * 1e3 / 1e8
+ # Test passing fee_rate as a string
+ txid = self.nodes[2].sendtoaddress(address=address, amount=amount, fee_rate=str(fee_rate_sat_vb))
+ tx_size = self.get_vsize(self.nodes[2].gettransaction(txid)['hex'])
+ self.nodes[0].generate(1)
+ self.sync_all(self.nodes[0:3])
+ postbalance = self.nodes[2].getbalance()
+ fee = prebalance - postbalance - amount
+ assert_fee_amount(fee, tx_size, Decimal(fee_rate_btc_kvb))
+
for key in ["totalFee", "feeRate"]:
assert_raises_rpc_error(-8, "Unknown named parameter key", self.nodes[2].sendtoaddress, address=address, amount=1, fee_rate=1, key=1)
diff --git a/test/functional/wallet_bumpfee.py b/test/functional/wallet_bumpfee.py
index 99c9737258..c8c1f2e374 100755
--- a/test/functional/wallet_bumpfee.py
+++ b/test/functional/wallet_bumpfee.py
@@ -149,7 +149,7 @@ def test_simple_bumpfee_succeeds(self, mode, rbf_node, peer_node, dest_address):
self.sync_mempools((rbf_node, peer_node))
assert rbfid in rbf_node.getrawmempool() and rbfid in peer_node.getrawmempool()
if mode == "fee_rate":
- bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": NORMAL})
+ bumped_psbt = rbf_node.psbtbumpfee(rbfid, {"fee_rate": str(NORMAL)})
bumped_tx = rbf_node.bumpfee(rbfid, {"fee_rate": NORMAL})
else:
bumped_psbt = rbf_node.psbtbumpfee(rbfid)
diff --git a/test/functional/wallet_send.py b/test/functional/wallet_send.py
index 192e9065e6..9835c5a2af 100755
--- a/test/functional/wallet_send.py
+++ b/test/functional/wallet_send.py
@@ -256,8 +256,8 @@ class WalletSendTest(BitcoinTestFramework):
assert res["complete"]
self.log.info("Test setting explicit fee rate")
- res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate=1, add_to_wallet=False)
- res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=1, add_to_wallet=False)
+ res1 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, arg_fee_rate="1", add_to_wallet=False)
+ res2 = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate="1", add_to_wallet=False)
assert_equal(self.nodes[1].decodepsbt(res1["psbt"])["fee"], self.nodes[1].decodepsbt(res2["psbt"])["fee"])
res = self.test_send(from_wallet=w0, to_wallet=w1, amount=1, fee_rate=7, add_to_wallet=False)
diff --git a/test/lint/README.md b/test/lint/README.md
index d15c061288..7e06308347 100644
--- a/test/lint/README.md
+++ b/test/lint/README.md
@@ -15,7 +15,16 @@ git-subtree-check.sh
Run this script from the root of the repository to verify that a subtree matches the contents of
the commit it claims to have been updated to.
-To use, make sure that you have fetched the upstream repository branch in which the subtree is
+```
+Usage: test/lint/git-subtree-check.sh [-r] DIR [COMMIT]
+ test/lint/git-subtree-check.sh -?
+```
+
+- `DIR` is the prefix within the repository to check.
+- `COMMIT` is the commit to check, if it is not provided, HEAD will be used.
+- `-r` checks that subtree commit is present in repository.
+
+To do a full check with `-r`, make sure that you have fetched the upstream repository branch in which the subtree is
maintained:
* for `src/secp256k1`: https://github.com/bitcoin-core/secp256k1.git (branch master)
* for `src/leveldb`: https://github.com/bitcoin-core/leveldb.git (branch bitcoin-fork)
@@ -29,10 +38,6 @@ To do so, add the upstream repository as remote:
git remote add --fetch secp256k1 https://github.com/bitcoin-core/secp256k1.git
```
-Usage: `git-subtree-check.sh DIR (COMMIT)`
-
-`COMMIT` may be omitted, in which case `HEAD` is used.
-
lint-all.sh
===========
Calls other scripts with the `lint-` prefix.
diff --git a/test/lint/git-subtree-check.sh b/test/lint/git-subtree-check.sh
index 5a0500df25..46aa6e7157 100755
--- a/test/lint/git-subtree-check.sh
+++ b/test/lint/git-subtree-check.sh
@@ -4,6 +4,39 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
export LC_ALL=C
+
+check_remote=0
+while getopts "?hr" opt; do
+ case $opt in
+ '?' | h)
+ echo "Usage: $0 [-r] DIR [COMMIT]"
+ echo " $0 -?"
+ echo ""
+ echo "Checks that a certain prefix is pure subtree, and optionally whether the"
+ echo "referenced commit is present in any fetched remote."
+ echo ""
+ echo "DIR is the prefix within the repository to check."
+ echo "COMMIT is the commit to check, if it is not provided, HEAD will be used."
+ echo ""
+ echo "-r Check that subtree commit is present in repository."
+ echo " To do this check, fetch the subtreed remote first. Example:"
+ echo ""
+ echo " git fetch https://github.com/bitcoin-core/secp256k1.git"
+ echo " test/lint/git-subtree-check.sh -r src/secp256k1"
+ exit 1
+ ;;
+ r)
+ check_remote=1
+ ;;
+ esac
+done
+shift $((OPTIND-1))
+
+if [ -z "$1" ]; then
+ echo "Need to provide a DIR, see $0 -?"
+ exit 1
+fi
+
# Strip trailing / from directory path (in case it was added by autocomplete)
DIR="${1%/}"
COMMIT="$2"
@@ -79,18 +112,20 @@ if [ "$tree_actual_tree" != "$tree_commit" ]; then
exit 1
fi
-# get the tree in the subtree commit referred to
-if [ "d$(git cat-file -t $rev 2>/dev/null)" != dcommit ]; then
- echo "subtree commit $rev unavailable: cannot compare. Did you add and fetch the remote?" >&2
- exit
-fi
-tree_subtree=$(git show -s --format="%T" $rev)
-echo "$DIR in $COMMIT was last updated to upstream commit $rev (tree $tree_subtree)"
+if [ "$check_remote" != "0" ]; then
+ # get the tree in the subtree commit referred to
+ if [ "d$(git cat-file -t $rev 2>/dev/null)" != dcommit ]; then
+ echo "subtree commit $rev unavailable: cannot compare. Did you add and fetch the remote?" >&2
+ exit 1
+ fi
+ tree_subtree=$(git show -s --format="%T" $rev)
+ echo "$DIR in $COMMIT was last updated to upstream commit $rev (tree $tree_subtree)"
-# ... and compare the actual tree with it
-if [ "$tree_actual_tree" != "$tree_subtree" ]; then
- echo "FAIL: subtree update commit differs from upstream tree!" >&2
- exit 1
+ # ... and compare the actual tree with it
+ if [ "$tree_actual_tree" != "$tree_subtree" ]; then
+ echo "FAIL: subtree update commit differs from upstream tree!" >&2
+ exit 1
+ fi
fi
echo "GOOD"
diff --git a/test/lint/lint-circular-dependencies.sh b/test/lint/lint-circular-dependencies.sh
index 6bd02d45ac..c4ad00e954 100755
--- a/test/lint/lint-circular-dependencies.sh
+++ b/test/lint/lint-circular-dependencies.sh
@@ -20,7 +20,6 @@ EXPECTED_CIRCULAR_DEPENDENCIES=(
"txmempool -> validation -> txmempool"
"wallet/fees -> wallet/wallet -> wallet/fees"
"wallet/wallet -> wallet/walletdb -> wallet/wallet"
- "policy/fees -> txmempool -> validation -> policy/fees"
)
EXIT_CODE=0
diff --git a/test/sanitizer_suppressions/tsan b/test/sanitizer_suppressions/tsan
index 48f81f3dbf..986e096056 100644
--- a/test/sanitizer_suppressions/tsan
+++ b/test/sanitizer_suppressions/tsan
@@ -32,7 +32,6 @@ deadlock:CConnman::ForNode
deadlock:CConnman::GetNodeStats
deadlock:CChainState::ConnectTip
deadlock:UpdateTip
-deadlock:wallet_tests::CreateWallet
# WalletBatch (unidentified deadlock)
deadlock:WalletBatch