aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.travis.yml3
-rwxr-xr-xcontrib/devtools/gen-manpages.sh2
-rwxr-xr-xcontrib/gitian-build.sh4
-rwxr-xr-xcontrib/macdeploy/detached-sig-create.sh2
-rwxr-xr-xcontrib/tidy_datadir.sh2
-rwxr-xr-xcontrib/verify-commits/gpg.sh10
-rwxr-xr-xcontrib/verify-commits/verify-commits.sh3
-rwxr-xr-xcontrib/verifybinaries/verify.sh4
-rwxr-xr-xcontrib/zmq/zmq_sub.py2
-rwxr-xr-xcontrib/zmq/zmq_sub3.4.py2
-rw-r--r--depends/packages/zeromq.mk15
-rw-r--r--depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch30
-rw-r--r--depends/patches/zeromq/9114d3957725acd34aa8b8d011585812f3369411.patch22
-rw-r--r--depends/patches/zeromq/9e6745c12e0b100cd38acecc16ce7db02905e27c.patch22
-rw-r--r--doc/build-osx.md2
-rw-r--r--doc/dependencies.md2
-rw-r--r--doc/release-notes.md3
-rwxr-xr-xshare/genbuild.sh2
-rwxr-xr-xshare/rpcuser/rpcuser.py2
-rw-r--r--src/addrman.cpp6
-rw-r--r--src/addrman.h10
-rw-r--r--src/chain.cpp11
-rw-r--r--src/dbwrapper.cpp2
-rw-r--r--src/init.cpp15
-rw-r--r--src/net.cpp22
-rw-r--r--src/net.h26
-rw-r--r--src/qt/bantablemodel.cpp6
-rw-r--r--src/qt/forms/sendcoinsdialog.ui4
-rw-r--r--src/qt/sendcoinsdialog.cpp2
-rw-r--r--src/qt/transactionfilterproxy.cpp13
-rw-r--r--src/qt/transactionfilterproxy.h4
-rw-r--r--src/qt/transactiontablemodel.cpp6
-rw-r--r--src/qt/transactionview.cpp14
-rw-r--r--src/qt/transactionview.h4
-rw-r--r--src/rpc/blockchain.cpp9
-rw-r--r--src/rpc/misc.cpp35
-rw-r--r--src/rpc/net.cpp6
-rw-r--r--src/rpc/server.cpp4
-rw-r--r--src/serialize.h4
-rw-r--r--src/test/README.md2
-rw-r--r--src/test/coins_tests.cpp22
-rw-r--r--src/util.cpp38
-rw-r--r--src/util.h4
-rw-r--r--src/validation.cpp40
-rw-r--r--src/validationinterface.cpp7
-rw-r--r--src/wallet/rpcdump.cpp2
-rw-r--r--src/wallet/rpcwallet.cpp10
-rw-r--r--src/wallet/wallet.cpp46
-rw-r--r--src/zmq/zmqnotificationinterface.cpp8
-rw-r--r--test/functional/README.md10
-rwxr-xr-xtest/functional/assumevalid.py6
-rwxr-xr-xtest/functional/bip65-cltv-p2p.py2
-rwxr-xr-xtest/functional/bip9-softforks.py2
-rwxr-xr-xtest/functional/bipdersig-p2p.py2
-rwxr-xr-xtest/functional/example_test.py25
-rwxr-xr-xtest/functional/feature_logging.py59
-rwxr-xr-xtest/functional/maxuploadtarget.py6
-rwxr-xr-xtest/functional/p2p-acceptblock.py12
-rwxr-xr-xtest/functional/p2p-compactblocks.py14
-rwxr-xr-xtest/functional/p2p-feefilter.py4
-rwxr-xr-xtest/functional/p2p-fingerprint.py4
-rwxr-xr-xtest/functional/p2p-leaktests.py75
-rwxr-xr-xtest/functional/p2p-mempool.py2
-rwxr-xr-xtest/functional/p2p-segwit.py282
-rwxr-xr-xtest/functional/p2p-timeouts.py4
-rwxr-xr-xtest/functional/p2p-versionbits-warning.py4
-rwxr-xr-xtest/functional/sendheaders.py15
-rwxr-xr-xtest/functional/test_framework/comptool.py127
-rwxr-xr-xtest/functional/test_framework/mininode.py368
-rwxr-xr-xtest/functional/test_framework/test_framework.py26
-rwxr-xr-xtest/functional/test_framework/test_node.py13
-rwxr-xr-xtest/functional/test_runner.py27
72 files changed, 877 insertions, 728 deletions
diff --git a/.travis.yml b/.travis.yml
index a643d2ff5d..d3dd37e76c 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -18,7 +18,6 @@ env:
- CCACHE_COMPRESS=1
- BASE_OUTDIR=$TRAVIS_BUILD_DIR/out
- SDK_URL=https://bitcoincore.org/depends-sources/sdks
- - PYTHON_DEBUG=1
- WINEDEBUG=fixme-all
matrix:
# ARM
@@ -79,7 +78,7 @@ script:
- export LD_LIBRARY_PATH=$TRAVIS_BUILD_DIR/depends/$HOST/lib
- if [ "$RUN_TESTS" = "true" ]; then travis_wait 30 make $MAKEJOBS check VERBOSE=1; fi
- if [ "$TRAVIS_EVENT_TYPE" = "cron" ]; then extended="--extended --exclude pruning,dbcrash"; fi
- - if [ "$RUN_TESTS" = "true" ]; then test/functional/test_runner.py --coverage --quiet ${extended}; fi
+ - if [ "$RUN_TESTS" = "true" ]; then test/functional/test_runner.py --combinedlogslen=4000 --coverage --quiet ${extended}; fi
after_script:
- echo $TRAVIS_COMMIT_RANGE
- echo $TRAVIS_COMMIT_LOG
diff --git a/contrib/devtools/gen-manpages.sh b/contrib/devtools/gen-manpages.sh
index 967717e1e0..925d6a6252 100755
--- a/contrib/devtools/gen-manpages.sh
+++ b/contrib/devtools/gen-manpages.sh
@@ -1,4 +1,4 @@
-#!/bin/sh
+#!/bin/bash
TOPDIR=${TOPDIR:-$(git rev-parse --show-toplevel)}
SRCDIR=${SRCDIR:-$TOPDIR/src}
diff --git a/contrib/gitian-build.sh b/contrib/gitian-build.sh
index 511c1a4c48..631fba9089 100755
--- a/contrib/gitian-build.sh
+++ b/contrib/gitian-build.sh
@@ -105,7 +105,7 @@ while :; do
fi
shift
else
- echo 'Error: "--os" requires an argument containing an l (for linux), w (for windows), or x (for Mac OSX)\n'
+ echo 'Error: "--os" requires an argument containing an l (for linux), w (for windows), or x (for Mac OSX)'
exit 1
fi
;;
@@ -188,7 +188,7 @@ then
fi
# Get signer
-if [[ -n"$1" ]]
+if [[ -n "$1" ]]
then
SIGNER=$1
shift
diff --git a/contrib/macdeploy/detached-sig-create.sh b/contrib/macdeploy/detached-sig-create.sh
index 7f017bb4f1..3379a4599c 100755
--- a/contrib/macdeploy/detached-sig-create.sh
+++ b/contrib/macdeploy/detached-sig-create.sh
@@ -40,7 +40,7 @@ grep CodeResources < "${TEMPLIST}" | while read i; do
RESOURCE="${TEMPDIR}/${OUTROOT}/${TARGETFILE}"
DIRNAME="`dirname "${RESOURCE}"`"
mkdir -p "${DIRNAME}"
- echo "Adding resource for: "${TARGETFILE}""
+ echo "Adding resource for: \"${TARGETFILE}\""
cp "${i}" "${RESOURCE}"
done
diff --git a/contrib/tidy_datadir.sh b/contrib/tidy_datadir.sh
index 8960f8811d..b845b34e41 100755
--- a/contrib/tidy_datadir.sh
+++ b/contrib/tidy_datadir.sh
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
if [ -d "$1" ]; then
- cd "$1"
+ cd "$1" || exit 1
else
echo "Usage: $0 <datadir>" >&2
echo "Removes obsolete Bitcoin database files" >&2
diff --git a/contrib/verify-commits/gpg.sh b/contrib/verify-commits/gpg.sh
index abd8f5fd9f..8f3e4b8063 100755
--- a/contrib/verify-commits/gpg.sh
+++ b/contrib/verify-commits/gpg.sh
@@ -9,7 +9,7 @@ REVSIG=false
IFS='
'
if [ "$BITCOIN_VERIFY_COMMITS_ALLOW_SHA1" = 1 ]; then
- GPG_RES="$(echo "$INPUT" | gpg --trust-model always "$@" 2>/dev/null)"
+ GPG_RES="$(printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null)"
else
# Note how we've disabled SHA1 with the --weak-digest option, disabling
# signatures - including selfsigs - that use SHA1. While you might think that
@@ -24,7 +24,7 @@ else
case "$LINE" in
"gpg (GnuPG) 1.4.1"*|"gpg (GnuPG) 2.0."*)
echo "Please upgrade to at least gpg 2.1.10 to check for weak signatures" > /dev/stderr
- GPG_RES="$(echo "$INPUT" | gpg --trust-model always "$@" 2>/dev/null)"
+ GPG_RES="$(printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null)"
;;
# We assume if you're running 2.1+, you're probably running 2.1.10+
# gpg will fail otherwise
@@ -32,7 +32,7 @@ else
# gpg will fail otherwise
esac
done
- [ "$GPG_RES" = "" ] && GPG_RES="$(echo "$INPUT" | gpg --trust-model always --weak-digest sha1 "$@" 2>/dev/null)"
+ [ "$GPG_RES" = "" ] && GPG_RES="$(printf '%s\n' "$INPUT" | gpg --trust-model always --weak-digest sha1 "$@" 2>/dev/null)"
fi
for LINE in $(echo "$GPG_RES"); do
case "$LINE" in
@@ -57,8 +57,8 @@ if ! $VALID; then
exit 1
fi
if $VALID && $REVSIG; then
- echo "$INPUT" | gpg --trust-model always "$@" 2>/dev/null | grep "\[GNUPG:\] \(NEWSIG\|SIG_ID\|VALIDSIG\)"
+ printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null | grep "\[GNUPG:\] \(NEWSIG\|SIG_ID\|VALIDSIG\)"
echo "$GOODREVSIG"
else
- echo "$INPUT" | gpg --trust-model always "$@" 2>/dev/null
+ printf '%s\n' "$INPUT" | gpg --trust-model always "$@" 2>/dev/null
fi
diff --git a/contrib/verify-commits/verify-commits.sh b/contrib/verify-commits/verify-commits.sh
index a1ef715fb3..532b97a438 100755
--- a/contrib/verify-commits/verify-commits.sh
+++ b/contrib/verify-commits/verify-commits.sh
@@ -33,10 +33,11 @@ fi
NO_SHA1=1
PREV_COMMIT=""
+INITIAL_COMMIT="${CURRENT_COMMIT}"
while true; do
if [ "$CURRENT_COMMIT" = $VERIFIED_ROOT ]; then
- echo "There is a valid path from "$CURRENT_COMMIT" to $VERIFIED_ROOT where all commits are signed!"
+ echo "There is a valid path from \"$INITIAL_COMMIT\" to $VERIFIED_ROOT where all commits are signed!"
exit 0
fi
diff --git a/contrib/verifybinaries/verify.sh b/contrib/verifybinaries/verify.sh
index 320add64d0..e0266bf08a 100755
--- a/contrib/verifybinaries/verify.sh
+++ b/contrib/verifybinaries/verify.sh
@@ -33,7 +33,7 @@ if [ ! -d "$WORKINGDIR" ]; then
mkdir "$WORKINGDIR"
fi
-cd "$WORKINGDIR"
+cd "$WORKINGDIR" || exit 1
#test if a version number has been passed as an argument
if [ -n "$1" ]; then
@@ -87,7 +87,7 @@ WGETOUT=$(wget -N "$HOST1$BASEDIR$SIGNATUREFILENAME" 2>&1)
#and then see if wget completed successfully
if [ $? -ne 0 ]; then
echo "Error: couldn't fetch signature file. Have you specified the version number in the following format?"
- echo "[$VERSIONPREFIX]<version>-[$RCVERSIONSTRING[0-9]] (example: "$VERSIONPREFIX"0.10.4-"$RCVERSIONSTRING"1)"
+ echo "[$VERSIONPREFIX]<version>-[$RCVERSIONSTRING[0-9]] (example: ${VERSIONPREFIX}0.10.4-${RCVERSIONSTRING}1)"
echo "wget output:"
echo "$WGETOUT"|sed 's/^/\t/g'
exit 2
diff --git a/contrib/zmq/zmq_sub.py b/contrib/zmq/zmq_sub.py
index 5cc19761d4..5efd3d2187 100755
--- a/contrib/zmq/zmq_sub.py
+++ b/contrib/zmq/zmq_sub.py
@@ -8,8 +8,8 @@
Bitcoin should be started with the command line arguments:
bitcoind -testnet -daemon \
- -zmqpubhashblock=tcp://127.0.0.1:28332 \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
+ -zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
diff --git a/contrib/zmq/zmq_sub3.4.py b/contrib/zmq/zmq_sub3.4.py
index bfb7ea9eae..7032d475e8 100755
--- a/contrib/zmq/zmq_sub3.4.py
+++ b/contrib/zmq/zmq_sub3.4.py
@@ -8,8 +8,8 @@
Bitcoin should be started with the command line arguments:
bitcoind -testnet -daemon \
- -zmqpubhashblock=tcp://127.0.0.1:28332 \
-zmqpubrawtx=tcp://127.0.0.1:28332 \
+ -zmqpubrawblock=tcp://127.0.0.1:28332 \
-zmqpubhashtx=tcp://127.0.0.1:28332 \
-zmqpubhashblock=tcp://127.0.0.1:28332
diff --git a/depends/packages/zeromq.mk b/depends/packages/zeromq.mk
index 01146c26f6..74bf4e9e11 100644
--- a/depends/packages/zeromq.mk
+++ b/depends/packages/zeromq.mk
@@ -1,19 +1,18 @@
package=zeromq
-$(package)_version=4.1.5
-$(package)_download_path=https://github.com/zeromq/zeromq4-1/releases/download/v$($(package)_version)/
+$(package)_version=4.2.2
+$(package)_download_path=https://github.com/zeromq/libzmq/releases/download/v$($(package)_version)/
$(package)_file_name=$(package)-$($(package)_version).tar.gz
-$(package)_sha256_hash=04aac57f081ffa3a2ee5ed04887be9e205df3a7ddade0027460b8042432bdbcf
-$(package)_patches=9114d3957725acd34aa8b8d011585812f3369411.patch 9e6745c12e0b100cd38acecc16ce7db02905e27c.patch
+$(package)_sha256_hash=5b23f4ca9ef545d5bd3af55d305765e3ee06b986263b31967435d285a3e6df6b
+$(package)_patches=0001-fix-build-with-older-mingw64.patch
define $(package)_set_vars
- $(package)_config_opts=--without-documentation --disable-shared --without-libsodium --disable-curve
+ $(package)_config_opts=--without-docs --disable-shared --without-libsodium --disable-curve --disable-curve-keygen --disable-perf
$(package)_config_opts_linux=--with-pic
$(package)_cxxflags=-std=c++11
endef
define $(package)_preprocess_cmds
- patch -p1 < $($(package)_patch_dir)/9114d3957725acd34aa8b8d011585812f3369411.patch && \
- patch -p1 < $($(package)_patch_dir)/9e6745c12e0b100cd38acecc16ce7db02905e27c.patch && \
+ patch -p1 < $($(package)_patch_dir)/0001-fix-build-with-older-mingw64.patch && \
./autogen.sh
endef
@@ -22,7 +21,7 @@ define $(package)_config_cmds
endef
define $(package)_build_cmds
- $(MAKE) libzmq.la
+ $(MAKE) src/libzmq.la
endef
define $(package)_stage_cmds
diff --git a/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch b/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch
new file mode 100644
index 0000000000..a6c508fb8a
--- /dev/null
+++ b/depends/patches/zeromq/0001-fix-build-with-older-mingw64.patch
@@ -0,0 +1,30 @@
+From 1a159c128c69a42d90819375c06a39994f3fbfc1 Mon Sep 17 00:00:00 2001
+From: Cory Fields <cory-nospam-@coryfields.com>
+Date: Tue, 28 Nov 2017 20:33:25 -0500
+Subject: [PATCH] fix build with older mingw64
+
+---
+ src/windows.hpp | 7 +++++++
+ 1 file changed, 7 insertions(+)
+
+diff --git a/src/windows.hpp b/src/windows.hpp
+index 99e889d..e69038e 100644
+--- a/src/windows.hpp
++++ b/src/windows.hpp
+@@ -55,6 +55,13 @@
+ #include <winsock2.h>
+ #include <windows.h>
+ #include <mswsock.h>
++
++#if defined __MINGW64_VERSION_MAJOR && __MINGW64_VERSION_MAJOR < 4
++// Workaround for mingw-w64 < v4.0 which did not include ws2ipdef.h in iphlpapi.h.
++// Fixed in mingw-w64 by 9bd8fe9148924840d315b4c915dd099955ea89d1.
++#include <ws2def.h>
++#include <ws2ipdef.h>
++#endif
+ #include <iphlpapi.h>
+
+ #if !defined __MINGW32__
+--
+2.7.4
+
diff --git a/depends/patches/zeromq/9114d3957725acd34aa8b8d011585812f3369411.patch b/depends/patches/zeromq/9114d3957725acd34aa8b8d011585812f3369411.patch
deleted file mode 100644
index f704b3d94f..0000000000
--- a/depends/patches/zeromq/9114d3957725acd34aa8b8d011585812f3369411.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-From 9114d3957725acd34aa8b8d011585812f3369411 Mon Sep 17 00:00:00 2001
-From: Jeroen Ooms <jeroenooms@gmail.com>
-Date: Tue, 20 Oct 2015 13:10:38 +0200
-Subject: [PATCH] enable static libraries on mingw
-
----
- configure.ac | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/configure.ac b/configure.ac
-index 393505b..e92131a 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -265,7 +265,7 @@ case "${host_os}" in
- libzmq_dso_visibility="no"
-
- if test "x$enable_static" = "xyes"; then
-- AC_MSG_ERROR([Building static libraries is not supported under MinGW32])
-+ CPPFLAGS="-DZMQ_STATIC"
- fi
-
- # Set FD_SETSIZE to 1024 \ No newline at end of file
diff --git a/depends/patches/zeromq/9e6745c12e0b100cd38acecc16ce7db02905e27c.patch b/depends/patches/zeromq/9e6745c12e0b100cd38acecc16ce7db02905e27c.patch
deleted file mode 100644
index 9aff2c179a..0000000000
--- a/depends/patches/zeromq/9e6745c12e0b100cd38acecc16ce7db02905e27c.patch
+++ /dev/null
@@ -1,22 +0,0 @@
-From 9e6745c12e0b100cd38acecc16ce7db02905e27c Mon Sep 17 00:00:00 2001
-From: David Millard <dmillard10@gmail.com>
-Date: Tue, 10 May 2016 13:53:53 -0700
-Subject: [PATCH] Fix autotools for static MinGW builds
-
----
- configure.ac | 2 +-
- 1 file changed, 1 insertion(+), 1 deletion(-)
-
-diff --git a/configure.ac b/configure.ac
-index 5a0fa14..def6ea7 100644
---- a/configure.ac
-+++ b/configure.ac
-@@ -259,7 +259,7 @@ case "${host_os}" in
- libzmq_dso_visibility="no"
-
- if test "x$enable_static" = "xyes"; then
-- CPPFLAGS="-DZMQ_STATIC"
-+ CPPFLAGS="-DZMQ_STATIC $CPPFLAGS"
- fi
-
- # Set FD_SETSIZE to 1024 \ No newline at end of file
diff --git a/doc/build-osx.md b/doc/build-osx.md
index 6f08562ff8..6663016ed8 100644
--- a/doc/build-osx.md
+++ b/doc/build-osx.md
@@ -110,6 +110,6 @@ Uncheck everything except Qt Creator during the installation process.
Notes
-----
-* Tested on OS X 10.8 through 10.12 on 64-bit Intel processors only.
+* Tested on OS X 10.8 through 10.13 on 64-bit Intel processors only.
* Building with downloaded Qt binaries is not officially supported. See the notes in [#7714](https://github.com/bitcoin/bitcoin/issues/7714)
diff --git a/doc/dependencies.md b/doc/dependencies.md
index 6e1d1ee992..5c5645de97 100644
--- a/doc/dependencies.md
+++ b/doc/dependencies.md
@@ -27,5 +27,5 @@ These are the dependencies currently used by Bitcoin Core. You can find instruct
| Qt | [5.7.1](https://download.qt.io/official_releases/qt/) | 4.7+ | No | | |
| XCB | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk#L94) (Linux only) |
| xkbcommon | | | | | [Yes](https://github.com/bitcoin/bitcoin/blob/master/depends/packages/qt.mk#L93) (Linux only) |
-| ZeroMQ | [4.1.5](https://github.com/zeromq/libzmq/releases) | | No | | |
+| ZeroMQ | [4.2.2](https://github.com/zeromq/libzmq/releases) | | No | | |
| zlib | [1.2.11](http://zlib.net/) | | | | No |
diff --git a/doc/release-notes.md b/doc/release-notes.md
index 2c63b1f88e..43009b0813 100644
--- a/doc/release-notes.md
+++ b/doc/release-notes.md
@@ -90,6 +90,9 @@ Low-level RPC changes
* `getmininginfo`
- The wallet RPC `getreceivedbyaddress` will return an error if called with an address not in the wallet.
+Changed command-line options
+-----------------------------
+- `-debuglogfile=<file>` can be used to specify an alternative debug logging file.
Credits
=======
diff --git a/share/genbuild.sh b/share/genbuild.sh
index 32ef2a5755..419e0da0fd 100755
--- a/share/genbuild.sh
+++ b/share/genbuild.sh
@@ -4,7 +4,7 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
if [ $# -gt 1 ]; then
- cd "$2"
+ cd "$2" || exit 1
fi
if [ $# -gt 0 ]; then
FILE="$1"
diff --git a/share/rpcuser/rpcuser.py b/share/rpcuser/rpcuser.py
index 63c69e308a..6d9b44f699 100755
--- a/share/rpcuser/rpcuser.py
+++ b/share/rpcuser/rpcuser.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env python2
+#!/usr/bin/env python
# Copyright (c) 2015-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
diff --git a/src/addrman.cpp b/src/addrman.cpp
index ddcdf4c2f4..372cac8483 100644
--- a/src/addrman.cpp
+++ b/src/addrman.cpp
@@ -390,9 +390,9 @@ int CAddrMan::Check_()
if (vRandom.size() != nTried + nNew)
return -7;
- for (std::map<int, CAddrInfo>::iterator it = mapInfo.begin(); it != mapInfo.end(); it++) {
- int n = (*it).first;
- CAddrInfo& info = (*it).second;
+ for (const auto& entry : mapInfo) {
+ int n = entry.first;
+ const CAddrInfo& info = entry.second;
if (info.fInTried) {
if (!info.nLastSuccess)
return -1;
diff --git a/src/addrman.h b/src/addrman.h
index b06b272962..ea289d508c 100644
--- a/src/addrman.h
+++ b/src/addrman.h
@@ -313,9 +313,9 @@ public:
s << nUBuckets;
std::map<int, int> mapUnkIds;
int nIds = 0;
- for (std::map<int, CAddrInfo>::const_iterator it = mapInfo.begin(); it != mapInfo.end(); it++) {
- mapUnkIds[(*it).first] = nIds;
- const CAddrInfo &info = (*it).second;
+ for (const auto& entry : mapInfo) {
+ mapUnkIds[entry.first] = nIds;
+ const CAddrInfo &info = entry.second;
if (info.nRefCount) {
assert(nIds != nNew); // this means nNew was wrong, oh ow
s << info;
@@ -323,8 +323,8 @@ public:
}
}
nIds = 0;
- for (std::map<int, CAddrInfo>::const_iterator it = mapInfo.begin(); it != mapInfo.end(); it++) {
- const CAddrInfo &info = (*it).second;
+ for (const auto& entry : mapInfo) {
+ const CAddrInfo &info = entry.second;
if (info.fInTried) {
assert(nIds != nTried); // this means nTried was wrong, oh ow
s << info;
diff --git a/src/chain.cpp b/src/chain.cpp
index 9f40c41fde..7ebc08a50b 100644
--- a/src/chain.cpp
+++ b/src/chain.cpp
@@ -80,12 +80,13 @@ int static inline GetSkipHeight(int height) {
return (height & 1) ? InvertLowestOne(InvertLowestOne(height - 1)) + 1 : InvertLowestOne(height);
}
-CBlockIndex* CBlockIndex::GetAncestor(int height)
+const CBlockIndex* CBlockIndex::GetAncestor(int height) const
{
- if (height > nHeight || height < 0)
+ if (height > nHeight || height < 0) {
return nullptr;
+ }
- CBlockIndex* pindexWalk = this;
+ const CBlockIndex* pindexWalk = this;
int heightWalk = nHeight;
while (heightWalk > height) {
int heightSkip = GetSkipHeight(heightWalk);
@@ -106,9 +107,9 @@ CBlockIndex* CBlockIndex::GetAncestor(int height)
return pindexWalk;
}
-const CBlockIndex* CBlockIndex::GetAncestor(int height) const
+CBlockIndex* CBlockIndex::GetAncestor(int height)
{
- return const_cast<CBlockIndex*>(this)->GetAncestor(height);
+ return const_cast<CBlockIndex*>(static_cast<const CBlockIndex*>(this)->GetAncestor(height));
}
void CBlockIndex::BuildSkip()
diff --git a/src/dbwrapper.cpp b/src/dbwrapper.cpp
index 92c4fe363c..eb29be05c5 100644
--- a/src/dbwrapper.cpp
+++ b/src/dbwrapper.cpp
@@ -64,7 +64,7 @@ public:
assert(p <= limit);
base[std::min(bufsize - 1, (int)(p - base))] = '\0';
- LogPrintStr(base);
+ LogPrintf("leveldb: %s", base);
if (base != buffer) {
delete[] base;
}
diff --git a/src/init.cpp b/src/init.cpp
index 3cc797a17c..871a585267 100644
--- a/src/init.cpp
+++ b/src/init.cpp
@@ -193,8 +193,8 @@ void Shutdown()
// Because these depend on each-other, we make sure that neither can be
// using the other before destroying them.
- UnregisterValidationInterface(peerLogic.get());
- if(g_connman) g_connman->Stop();
+ if (peerLogic) UnregisterValidationInterface(peerLogic.get());
+ if (g_connman) g_connman->Stop();
peerLogic.reset();
g_connman.reset();
@@ -342,6 +342,7 @@ std::string HelpMessage(HelpMessageMode mode)
if (showDebug)
strUsage += HelpMessageOpt("-feefilter", strprintf("Tell other nodes to filter invs to us by our mempool min fee (default: %u)", DEFAULT_FEEFILTER));
strUsage += HelpMessageOpt("-loadblock=<file>", _("Imports blocks from external blk000??.dat file on startup"));
+ strUsage += HelpMessageOpt("-debuglogfile=<file>", strprintf(_("Specify location of debug log file: this can be an absolute path or a path relative to the data directory (default: %s)"), DEFAULT_DEBUGLOGFILE));
strUsage += HelpMessageOpt("-maxorphantx=<n>", strprintf(_("Keep at most <n> unconnectable transactions in memory (default: %u)"), DEFAULT_MAX_ORPHAN_TRANSACTIONS));
strUsage += HelpMessageOpt("-maxmempool=<n>", strprintf(_("Keep the transaction memory pool below <n> megabytes (default: %u)"), DEFAULT_MAX_MEMPOOL_SIZE));
strUsage += HelpMessageOpt("-mempoolexpiry=<n>", strprintf(_("Do not keep transactions in the mempool longer than <n> hours (default: %u)"), DEFAULT_MEMPOOL_EXPIRY));
@@ -919,7 +920,8 @@ bool AppInitParameterInteraction()
// Special-case: if -debug=0/-nodebug is set, turn off debugging messages
const std::vector<std::string> categories = gArgs.GetArgs("-debug");
- if (find(categories.begin(), categories.end(), std::string("0")) == categories.end()) {
+ if (std::none_of(categories.begin(), categories.end(),
+ [](std::string cat){return cat == "0" || cat == "none";})) {
for (const auto& cat : categories) {
uint32_t flag = 0;
if (!GetLogCategory(&flag, &cat)) {
@@ -1208,8 +1210,11 @@ bool AppInitMain(boost::thread_group& threadGroup, CScheduler& scheduler)
ShrinkDebugFile();
}
- if (fPrintToDebugLog)
- OpenDebugLog();
+ if (fPrintToDebugLog) {
+ if (!OpenDebugLog()) {
+ return InitError(strprintf("Could not open debug log file %s", GetDebugLogPath().string()));
+ }
+ }
if (!fLogTimestamps)
LogPrintf("Startup time: %s\n", DateTimeStrFormat("%Y-%m-%d %H:%M:%S", GetTime()));
diff --git a/src/net.cpp b/src/net.cpp
index a8e5143d5e..7ac64ead90 100644
--- a/src/net.cpp
+++ b/src/net.cpp
@@ -110,13 +110,13 @@ bool GetLocal(CService& addr, const CNetAddr *paddrPeer)
int nBestReachability = -1;
{
LOCK(cs_mapLocalHost);
- for (std::map<CNetAddr, LocalServiceInfo>::iterator it = mapLocalHost.begin(); it != mapLocalHost.end(); it++)
+ for (const auto& entry : mapLocalHost)
{
- int nScore = (*it).second.nScore;
- int nReachability = (*it).first.GetReachabilityFrom(paddrPeer);
+ int nScore = entry.second.nScore;
+ int nReachability = entry.first.GetReachabilityFrom(paddrPeer);
if (nReachability > nBestReachability || (nReachability == nBestReachability && nScore > nBestScore))
{
- addr = CService((*it).first, (*it).second.nPort);
+ addr = CService(entry.first, entry.second.nPort);
nBestReachability = nReachability;
nBestScore = nScore;
}
@@ -2269,10 +2269,16 @@ bool CConnman::Start(CScheduler& scheduler, const Options& connOptions)
{
Init(connOptions);
- nTotalBytesRecv = 0;
- nTotalBytesSent = 0;
- nMaxOutboundTotalBytesSentInCycle = 0;
- nMaxOutboundCycleStartTime = 0;
+ {
+ LOCK(cs_totalBytesRecv);
+ nTotalBytesRecv = 0;
+ }
+ {
+ LOCK(cs_totalBytesSent);
+ nTotalBytesSent = 0;
+ nMaxOutboundTotalBytesSentInCycle = 0;
+ nMaxOutboundCycleStartTime = 0;
+ }
if (fListen && !InitBinds(connOptions.vBinds, connOptions.vWhiteBinds)) {
if (clientInterface) {
diff --git a/src/net.h b/src/net.h
index a7223050e7..d0fd050970 100644
--- a/src/net.h
+++ b/src/net.h
@@ -158,10 +158,16 @@ public:
m_msgproc = connOptions.m_msgproc;
nSendBufferMaxSize = connOptions.nSendBufferMaxSize;
nReceiveFloodSize = connOptions.nReceiveFloodSize;
- nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe;
- nMaxOutboundLimit = connOptions.nMaxOutboundLimit;
+ {
+ LOCK(cs_totalBytesSent);
+ nMaxOutboundTimeframe = connOptions.nMaxOutboundTimeframe;
+ nMaxOutboundLimit = connOptions.nMaxOutboundLimit;
+ }
vWhitelistedRange = connOptions.vWhitelistedRange;
- vAddedNodes = connOptions.m_added_nodes;
+ {
+ LOCK(cs_vAddedNodes);
+ vAddedNodes = connOptions.m_added_nodes;
+ }
}
CConnman(uint64_t seed0, uint64_t seed1);
@@ -364,14 +370,14 @@ private:
// Network usage totals
CCriticalSection cs_totalBytesRecv;
CCriticalSection cs_totalBytesSent;
- uint64_t nTotalBytesRecv;
- uint64_t nTotalBytesSent;
+ uint64_t nTotalBytesRecv GUARDED_BY(cs_totalBytesRecv);
+ uint64_t nTotalBytesSent GUARDED_BY(cs_totalBytesSent);
// outbound limit & stats
- uint64_t nMaxOutboundTotalBytesSentInCycle;
- uint64_t nMaxOutboundCycleStartTime;
- uint64_t nMaxOutboundLimit;
- uint64_t nMaxOutboundTimeframe;
+ uint64_t nMaxOutboundTotalBytesSentInCycle GUARDED_BY(cs_totalBytesSent);
+ uint64_t nMaxOutboundCycleStartTime GUARDED_BY(cs_totalBytesSent);
+ uint64_t nMaxOutboundLimit GUARDED_BY(cs_totalBytesSent);
+ uint64_t nMaxOutboundTimeframe GUARDED_BY(cs_totalBytesSent);
// Whitelisted ranges. Any node connecting from these is automatically
// whitelisted (as well as those connecting to whitelisted binds).
@@ -389,7 +395,7 @@ private:
CAddrMan addrman;
std::deque<std::string> vOneShots;
CCriticalSection cs_vOneShots;
- std::vector<std::string> vAddedNodes;
+ std::vector<std::string> vAddedNodes GUARDED_BY(cs_vAddedNodes);
CCriticalSection cs_vAddedNodes;
std::vector<CNode*> vNodes;
std::list<CNode*> vNodesDisconnected;
diff --git a/src/qt/bantablemodel.cpp b/src/qt/bantablemodel.cpp
index 84807da65d..c96bdfd5d7 100644
--- a/src/qt/bantablemodel.cpp
+++ b/src/qt/bantablemodel.cpp
@@ -55,11 +55,11 @@ public:
#if QT_VERSION >= 0x040700
cachedBanlist.reserve(banMap.size());
#endif
- for (banmap_t::iterator it = banMap.begin(); it != banMap.end(); it++)
+ for (const auto& entry : banMap)
{
CCombinedBan banEntry;
- banEntry.subnet = (*it).first;
- banEntry.banEntry = (*it).second;
+ banEntry.subnet = entry.first;
+ banEntry.banEntry = entry.second;
cachedBanlist.append(banEntry);
}
diff --git a/src/qt/forms/sendcoinsdialog.ui b/src/qt/forms/sendcoinsdialog.ui
index 9c89741afe..c6fd708cdf 100644
--- a/src/qt/forms/sendcoinsdialog.ui
+++ b/src/qt/forms/sendcoinsdialog.ui
@@ -1108,10 +1108,10 @@
<item>
<widget class="QCheckBox" name="optInRBF">
<property name="text">
- <string>Request Replace-By-Fee</string>
+ <string>Allow increasing fee</string>
</property>
<property name="toolTip">
- <string>Indicates that the sender may wish to replace this transaction with a new one paying higher fees (prior to being confirmed).</string>
+ <string>This allows you to increase the fee later if the transaction takes a long time to confirm. This will also cause the recommended fee to be lower. ("Replace-By-Fee", BIP 125)</string>
</property>
</widget>
</item>
diff --git a/src/qt/sendcoinsdialog.cpp b/src/qt/sendcoinsdialog.cpp
index 036b6ebcc0..e2b9177885 100644
--- a/src/qt/sendcoinsdialog.cpp
+++ b/src/qt/sendcoinsdialog.cpp
@@ -345,7 +345,7 @@ void SendCoinsDialog::on_sendButton_clicked()
if (ui->optInRBF->isChecked())
{
questionString.append("<hr /><span>");
- questionString.append(tr("This transaction signals replaceability (optin-RBF)."));
+ questionString.append(tr("You can increase the fee later (signals Replace-By-Fee)."));
questionString.append("</span>");
}
diff --git a/src/qt/transactionfilterproxy.cpp b/src/qt/transactionfilterproxy.cpp
index 15859bf36d..ada73a73df 100644
--- a/src/qt/transactionfilterproxy.cpp
+++ b/src/qt/transactionfilterproxy.cpp
@@ -20,7 +20,7 @@ TransactionFilterProxy::TransactionFilterProxy(QObject *parent) :
QSortFilterProxyModel(parent),
dateFrom(MIN_DATE),
dateTo(MAX_DATE),
- addrPrefix(),
+ m_search_string(),
typeFilter(ALL_TYPES),
watchOnlyFilter(WatchOnlyFilter_All),
minAmount(0),
@@ -38,6 +38,7 @@ bool TransactionFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex &
bool involvesWatchAddress = index.data(TransactionTableModel::WatchonlyRole).toBool();
QString address = index.data(TransactionTableModel::AddressRole).toString();
QString label = index.data(TransactionTableModel::LabelRole).toString();
+ QString txid = index.data(TransactionTableModel::TxIDRole).toString();
qint64 amount = llabs(index.data(TransactionTableModel::AmountRole).toLongLong());
int status = index.data(TransactionTableModel::StatusRole).toInt();
@@ -51,8 +52,11 @@ bool TransactionFilterProxy::filterAcceptsRow(int sourceRow, const QModelIndex &
return false;
if(datetime < dateFrom || datetime > dateTo)
return false;
- if (!address.contains(addrPrefix, Qt::CaseInsensitive) && !label.contains(addrPrefix, Qt::CaseInsensitive))
+ if (!address.contains(m_search_string, Qt::CaseInsensitive) &&
+ ! label.contains(m_search_string, Qt::CaseInsensitive) &&
+ ! txid.contains(m_search_string, Qt::CaseInsensitive)) {
return false;
+ }
if(amount < minAmount)
return false;
@@ -66,9 +70,10 @@ void TransactionFilterProxy::setDateRange(const QDateTime &from, const QDateTime
invalidateFilter();
}
-void TransactionFilterProxy::setAddressPrefix(const QString &_addrPrefix)
+void TransactionFilterProxy::setSearchString(const QString &search_string)
{
- this->addrPrefix = _addrPrefix;
+ if (m_search_string == search_string) return;
+ m_search_string = search_string;
invalidateFilter();
}
diff --git a/src/qt/transactionfilterproxy.h b/src/qt/transactionfilterproxy.h
index 4bdd9ea6c7..fea7502b26 100644
--- a/src/qt/transactionfilterproxy.h
+++ b/src/qt/transactionfilterproxy.h
@@ -35,7 +35,7 @@ public:
};
void setDateRange(const QDateTime &from, const QDateTime &to);
- void setAddressPrefix(const QString &addrPrefix);
+ void setSearchString(const QString &);
/**
@note Type filter takes a bit field created with TYPE() or ALL_TYPES
*/
@@ -57,7 +57,7 @@ protected:
private:
QDateTime dateFrom;
QDateTime dateTo;
- QString addrPrefix;
+ QString m_search_string;
quint32 typeFilter;
WatchOnlyFilter watchOnlyFilter;
CAmount minAmount;
diff --git a/src/qt/transactiontablemodel.cpp b/src/qt/transactiontablemodel.cpp
index cc0dc5ef48..67580f11ae 100644
--- a/src/qt/transactiontablemodel.cpp
+++ b/src/qt/transactiontablemodel.cpp
@@ -80,10 +80,10 @@ public:
cachedWallet.clear();
{
LOCK2(cs_main, wallet->cs_wallet);
- for(std::map<uint256, CWalletTx>::iterator it = wallet->mapWallet.begin(); it != wallet->mapWallet.end(); ++it)
+ for (const auto& entry : wallet->mapWallet)
{
- if(TransactionRecord::showTransaction(it->second))
- cachedWallet.append(TransactionRecord::decomposeTransaction(wallet, it->second));
+ if (TransactionRecord::showTransaction(entry.second))
+ cachedWallet.append(TransactionRecord::decomposeTransaction(wallet, entry.second));
}
}
}
diff --git a/src/qt/transactionview.cpp b/src/qt/transactionview.cpp
index 92e8e91600..4d2aac12f0 100644
--- a/src/qt/transactionview.cpp
+++ b/src/qt/transactionview.cpp
@@ -95,11 +95,11 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa
hlayout->addWidget(typeWidget);
- addressWidget = new QLineEdit(this);
+ search_widget = new QLineEdit(this);
#if QT_VERSION >= 0x040700
- addressWidget->setPlaceholderText(tr("Enter address or label to search"));
+ search_widget->setPlaceholderText(tr("Enter address, transaction id, or label to search"));
#endif
- hlayout->addWidget(addressWidget);
+ hlayout->addWidget(search_widget);
amountWidget = new QLineEdit(this);
#if QT_VERSION >= 0x040700
@@ -187,8 +187,8 @@ TransactionView::TransactionView(const PlatformStyle *platformStyle, QWidget *pa
connect(watchOnlyWidget, SIGNAL(activated(int)), this, SLOT(chooseWatchonly(int)));
connect(amountWidget, SIGNAL(textChanged(QString)), amount_typing_delay, SLOT(start()));
connect(amount_typing_delay, SIGNAL(timeout()), this, SLOT(changedAmount()));
- connect(addressWidget, SIGNAL(textChanged(QString)), prefix_typing_delay, SLOT(start()));
- connect(prefix_typing_delay, SIGNAL(timeout()), this, SLOT(changedPrefix()));
+ connect(search_widget, SIGNAL(textChanged(QString)), prefix_typing_delay, SLOT(start()));
+ connect(prefix_typing_delay, SIGNAL(timeout()), this, SLOT(changedSearch()));
connect(view, SIGNAL(doubleClicked(QModelIndex)), this, SIGNAL(doubleClicked(QModelIndex)));
connect(view, SIGNAL(customContextMenuRequested(QPoint)), this, SLOT(contextualMenu(QPoint)));
@@ -326,11 +326,11 @@ void TransactionView::chooseWatchonly(int idx)
(TransactionFilterProxy::WatchOnlyFilter)watchOnlyWidget->itemData(idx).toInt());
}
-void TransactionView::changedPrefix()
+void TransactionView::changedSearch()
{
if(!transactionProxyModel)
return;
- transactionProxyModel->setAddressPrefix(addressWidget->text());
+ transactionProxyModel->setSearchString(search_widget->text());
}
void TransactionView::changedAmount()
diff --git a/src/qt/transactionview.h b/src/qt/transactionview.h
index 5b28b34723..f72a828b00 100644
--- a/src/qt/transactionview.h
+++ b/src/qt/transactionview.h
@@ -66,7 +66,7 @@ private:
QComboBox *dateWidget;
QComboBox *typeWidget;
QComboBox *watchOnlyWidget;
- QLineEdit *addressWidget;
+ QLineEdit *search_widget;
QLineEdit *amountWidget;
QMenu *contextMenu;
@@ -113,7 +113,7 @@ public Q_SLOTS:
void chooseType(int idx);
void chooseWatchonly(int idx);
void changedAmount();
- void changedPrefix();
+ void changedSearch();
void exportClicked();
void focusTransaction(const QModelIndex&);
diff --git a/src/rpc/blockchain.cpp b/src/rpc/blockchain.cpp
index 601965d4c4..b7895b86f7 100644
--- a/src/rpc/blockchain.cpp
+++ b/src/rpc/blockchain.cpp
@@ -1113,13 +1113,13 @@ static UniValue BIP9SoftForkDesc(const Consensus::Params& consensusParams, Conse
return rv;
}
-void BIP9SoftForkDescPushBack(UniValue& bip9_softforks, const std::string &name, const Consensus::Params& consensusParams, Consensus::DeploymentPos id)
+void BIP9SoftForkDescPushBack(UniValue& bip9_softforks, const Consensus::Params& consensusParams, Consensus::DeploymentPos id)
{
// Deployments with timeout value of 0 are hidden.
// A timeout value of 0 guarantees a softfork will never be activated.
// This is used when softfork codes are merged without specifying the deployment schedule.
if (consensusParams.vDeployments[id].nTimeout > 0)
- bip9_softforks.push_back(Pair(name, BIP9SoftForkDesc(consensusParams, id)));
+ bip9_softforks.push_back(Pair(VersionBitsDeploymentInfo[id].name, BIP9SoftForkDesc(consensusParams, id)));
}
UniValue getblockchaininfo(const JSONRPCRequest& request)
@@ -1214,8 +1214,9 @@ UniValue getblockchaininfo(const JSONRPCRequest& request)
softforks.push_back(SoftForkDesc("bip34", 2, tip, consensusParams));
softforks.push_back(SoftForkDesc("bip66", 3, tip, consensusParams));
softforks.push_back(SoftForkDesc("bip65", 4, tip, consensusParams));
- BIP9SoftForkDescPushBack(bip9_softforks, "csv", consensusParams, Consensus::DEPLOYMENT_CSV);
- BIP9SoftForkDescPushBack(bip9_softforks, "segwit", consensusParams, Consensus::DEPLOYMENT_SEGWIT);
+ for (int pos = Consensus::DEPLOYMENT_CSV; pos != Consensus::MAX_VERSION_BITS_DEPLOYMENTS; ++pos) {
+ BIP9SoftForkDescPushBack(bip9_softforks, consensusParams, static_cast<Consensus::DeploymentPos>(pos));
+ }
obj.push_back(Pair("softforks", softforks));
obj.push_back(Pair("bip9_softforks", bip9_softforks));
diff --git a/src/rpc/misc.cpp b/src/rpc/misc.cpp
index b3e22f5f10..c511aa0eb2 100644
--- a/src/rpc/misc.cpp
+++ b/src/rpc/misc.cpp
@@ -533,6 +533,9 @@ uint32_t getCategoryMask(UniValue cats) {
if (!GetLogCategory(&flag, &cat)) {
throw JSONRPCError(RPC_INVALID_PARAMETER, "unknown logging category " + cat);
}
+ if (flag == BCLog::NONE) {
+ return 0;
+ }
mask |= flag;
}
return mask;
@@ -542,16 +545,32 @@ UniValue logging(const JSONRPCRequest& request)
{
if (request.fHelp || request.params.size() > 2) {
throw std::runtime_error(
- "logging [include,...] <exclude>\n"
+ "logging ( <include> <exclude> )\n"
"Gets and sets the logging configuration.\n"
- "When called without an argument, returns the list of categories that are currently being debug logged.\n"
- "When called with arguments, adds or removes categories from debug logging.\n"
+ "When called without an argument, returns the list of categories with status that are currently being debug logged or not.\n"
+ "When called with arguments, adds or removes categories from debug logging and return the lists above.\n"
+ "The arguments are evaluated in order \"include\", \"exclude\".\n"
+ "If an item is both included and excluded, it will thus end up being excluded.\n"
"The valid logging categories are: " + ListLogCategories() + "\n"
- "libevent logging is configured on startup and cannot be modified by this RPC during runtime."
- "Arguments:\n"
- "1. \"include\" (array of strings) add debug logging for these categories.\n"
- "2. \"exclude\" (array of strings) remove debug logging for these categories.\n"
- "\nResult: <categories> (string): a list of the logging categories that are active.\n"
+ "In addition, the following are available as category names with special meanings:\n"
+ " - \"all\", \"1\" : represent all logging categories.\n"
+ " - \"none\", \"0\" : even if other logging categories are specified, ignore all of them.\n"
+ "\nArguments:\n"
+ "1. \"include\" (array of strings, optional) A json array of categories to add debug logging\n"
+ " [\n"
+ " \"category\" (string) the valid logging category\n"
+ " ,...\n"
+ " ]\n"
+ "2. \"exclude\" (array of strings, optional) A json array of categories to remove debug logging\n"
+ " [\n"
+ " \"category\" (string) the valid logging category\n"
+ " ,...\n"
+ " ]\n"
+ "\nResult:\n"
+ "{ (json object where keys are the logging categories, and values indicates its status\n"
+ " \"category\": 0|1, (numeric) if being debug logged or not. 0:inactive, 1:active\n"
+ " ...\n"
+ "}\n"
"\nExamples:\n"
+ HelpExampleCli("logging", "\"[\\\"all\\\"]\" \"[\\\"http\\\"]\"")
+ HelpExampleRpc("logging", "[\"all\"], \"[libevent]\"")
diff --git a/src/rpc/net.cpp b/src/rpc/net.cpp
index 322a3e0cbd..9de249855c 100644
--- a/src/rpc/net.cpp
+++ b/src/rpc/net.cpp
@@ -571,11 +571,11 @@ UniValue listbanned(const JSONRPCRequest& request)
g_connman->GetBanned(banMap);
UniValue bannedAddresses(UniValue::VARR);
- for (banmap_t::iterator it = banMap.begin(); it != banMap.end(); it++)
+ for (const auto& entry : banMap)
{
- CBanEntry banEntry = (*it).second;
+ const CBanEntry& banEntry = entry.second;
UniValue rec(UniValue::VOBJ);
- rec.push_back(Pair("address", (*it).first.ToString()));
+ rec.push_back(Pair("address", entry.first.ToString()));
rec.push_back(Pair("banned_until", banEntry.nBanUntil));
rec.push_back(Pair("ban_created", banEntry.nCreateTime));
rec.push_back(Pair("ban_reason", banEntry.banReasonToString()));
diff --git a/src/rpc/server.cpp b/src/rpc/server.cpp
index fa81398272..0e4cd41689 100644
--- a/src/rpc/server.cpp
+++ b/src/rpc/server.cpp
@@ -160,8 +160,8 @@ std::string CRPCTable::help(const std::string& strCommand, const JSONRPCRequest&
std::set<rpcfn_type> setDone;
std::vector<std::pair<std::string, const CRPCCommand*> > vCommands;
- for (std::map<std::string, const CRPCCommand*>::const_iterator mi = mapCommands.begin(); mi != mapCommands.end(); ++mi)
- vCommands.push_back(make_pair(mi->second->category + mi->first, mi->second));
+ for (const auto& entry : mapCommands)
+ vCommands.push_back(make_pair(entry.second->category + entry.first, entry.second));
sort(vCommands.begin(), vCommands.end());
JSONRPCRequest jreq(helpreq);
diff --git a/src/serialize.h b/src/serialize.h
index 62ecde4823..02d3e4f7c6 100644
--- a/src/serialize.h
+++ b/src/serialize.h
@@ -732,8 +732,8 @@ template<typename Stream, typename K, typename T, typename Pred, typename A>
void Serialize(Stream& os, const std::map<K, T, Pred, A>& m)
{
WriteCompactSize(os, m.size());
- for (typename std::map<K, T, Pred, A>::const_iterator mi = m.begin(); mi != m.end(); ++mi)
- Serialize(os, (*mi));
+ for (const auto& entry : m)
+ Serialize(os, entry);
}
template<typename Stream, typename K, typename T, typename Pred, typename A>
diff --git a/src/test/README.md b/src/test/README.md
index dbaa9c27f3..01da32109b 100644
--- a/src/test/README.md
+++ b/src/test/README.md
@@ -52,4 +52,4 @@ examine `uint256_tests.cpp`.
For further reading, I found the following website to be helpful in
explaining how the boost unit test framework works:
-[http://www.alittlemadness.com/2009/03/31/c-unit-testing-with-boosttest/](http://www.alittlemadness.com/2009/03/31/c-unit-testing-with-boosttest/).
+[http://www.alittlemadness.com/2009/03/31/c-unit-testing-with-boosttest/](http://archive.is/dRBGf).
diff --git a/src/test/coins_tests.cpp b/src/test/coins_tests.cpp
index 7c3d02ba63..ed556c07f4 100644
--- a/src/test/coins_tests.cpp
+++ b/src/test/coins_tests.cpp
@@ -81,8 +81,8 @@ public:
// Manually recompute the dynamic usage of the whole data, and compare it.
size_t ret = memusage::DynamicUsage(cacheCoins);
size_t count = 0;
- for (CCoinsMap::iterator it = cacheCoins.begin(); it != cacheCoins.end(); it++) {
- ret += it->second.coin.DynamicMemoryUsage();
+ for (const auto& entry : cacheCoins) {
+ ret += entry.second.coin.DynamicMemoryUsage();
++count;
}
BOOST_CHECK_EQUAL(GetCacheSize(), count);
@@ -189,15 +189,15 @@ BOOST_AUTO_TEST_CASE(coins_cache_simulation_test)
// Once every 1000 iterations and at the end, verify the full cache.
if (InsecureRandRange(1000) == 1 || i == NUM_SIMULATION_ITERATIONS - 1) {
- for (auto it = result.begin(); it != result.end(); it++) {
- bool have = stack.back()->HaveCoin(it->first);
- const Coin& coin = stack.back()->AccessCoin(it->first);
+ for (const auto& entry : result) {
+ bool have = stack.back()->HaveCoin(entry.first);
+ const Coin& coin = stack.back()->AccessCoin(entry.first);
BOOST_CHECK(have == !coin.IsSpent());
- BOOST_CHECK(coin == it->second);
+ BOOST_CHECK(coin == entry.second);
if (coin.IsSpent()) {
missed_an_entry = true;
} else {
- BOOST_CHECK(stack.back()->HaveCoinInCache(it->first));
+ BOOST_CHECK(stack.back()->HaveCoinInCache(entry.first));
found_an_entry = true;
}
}
@@ -420,11 +420,11 @@ BOOST_AUTO_TEST_CASE(updatecoins_simulation_test)
// Once every 1000 iterations and at the end, verify the full cache.
if (InsecureRandRange(1000) == 1 || i == NUM_SIMULATION_ITERATIONS - 1) {
- for (auto it = result.begin(); it != result.end(); it++) {
- bool have = stack.back()->HaveCoin(it->first);
- const Coin& coin = stack.back()->AccessCoin(it->first);
+ for (const auto& entry : result) {
+ bool have = stack.back()->HaveCoin(entry.first);
+ const Coin& coin = stack.back()->AccessCoin(entry.first);
BOOST_CHECK(have == !coin.IsSpent());
- BOOST_CHECK(coin == it->second);
+ BOOST_CHECK(coin == entry.second);
}
}
diff --git a/src/util.cpp b/src/util.cpp
index d58f39e969..6631c236f1 100644
--- a/src/util.cpp
+++ b/src/util.cpp
@@ -89,6 +89,7 @@ const int64_t nStartupTime = GetTime();
const char * const BITCOIN_CONF_FILENAME = "bitcoin.conf";
const char * const BITCOIN_PID_FILENAME = "bitcoind.pid";
+const char * const DEFAULT_DEBUGLOGFILE = "debug.log";
ArgsManager gArgs;
bool fPrintToConsole = false;
@@ -189,26 +190,40 @@ static void DebugPrintInit()
vMsgsBeforeOpenLog = new std::list<std::string>;
}
-void OpenDebugLog()
+fs::path GetDebugLogPath()
+{
+ fs::path logfile(gArgs.GetArg("-debuglogfile", DEFAULT_DEBUGLOGFILE));
+ if (logfile.is_absolute()) {
+ return logfile;
+ } else {
+ return GetDataDir() / logfile;
+ }
+}
+
+bool OpenDebugLog()
{
boost::call_once(&DebugPrintInit, debugPrintInitFlag);
boost::mutex::scoped_lock scoped_lock(*mutexDebugLog);
assert(fileout == nullptr);
assert(vMsgsBeforeOpenLog);
- fs::path pathDebug = GetDataDir() / "debug.log";
+ fs::path pathDebug = GetDebugLogPath();
+
fileout = fsbridge::fopen(pathDebug, "a");
- if (fileout) {
- setbuf(fileout, nullptr); // unbuffered
- // dump buffered messages from before we opened the log
- while (!vMsgsBeforeOpenLog->empty()) {
- FileWriteStr(vMsgsBeforeOpenLog->front(), fileout);
- vMsgsBeforeOpenLog->pop_front();
- }
+ if (!fileout) {
+ return false;
+ }
+
+ setbuf(fileout, nullptr); // unbuffered
+ // dump buffered messages from before we opened the log
+ while (!vMsgsBeforeOpenLog->empty()) {
+ FileWriteStr(vMsgsBeforeOpenLog->front(), fileout);
+ vMsgsBeforeOpenLog->pop_front();
}
delete vMsgsBeforeOpenLog;
vMsgsBeforeOpenLog = nullptr;
+ return true;
}
struct CLogCategoryDesc
@@ -220,6 +235,7 @@ struct CLogCategoryDesc
const CLogCategoryDesc LogCategories[] =
{
{BCLog::NONE, "0"},
+ {BCLog::NONE, "none"},
{BCLog::NET, "net"},
{BCLog::TOR, "tor"},
{BCLog::MEMPOOL, "mempool"},
@@ -354,7 +370,7 @@ int LogPrintStr(const std::string &str)
// reopen the log file, if requested
if (fReopenDebugLog) {
fReopenDebugLog = false;
- fs::path pathDebug = GetDataDir() / "debug.log";
+ fs::path pathDebug = GetDebugLogPath();
if (fsbridge::freopen(pathDebug,"a",fileout) != nullptr)
setbuf(fileout, nullptr); // unbuffered
}
@@ -773,7 +789,7 @@ void ShrinkDebugFile()
// Amount of debug.log to save at end when shrinking (must fit in memory)
constexpr size_t RECENT_DEBUG_HISTORY_SIZE = 10 * 1000000;
// Scroll debug.log if it's getting too big
- fs::path pathLog = GetDataDir() / "debug.log";
+ fs::path pathLog = GetDebugLogPath();
FILE* file = fsbridge::fopen(pathLog, "r");
// If debug.log file is more than 10% bigger the RECENT_DEBUG_HISTORY_SIZE
// trim it down by saving only the last RECENT_DEBUG_HISTORY_SIZE bytes
diff --git a/src/util.h b/src/util.h
index 08de43d29f..6687b865d2 100644
--- a/src/util.h
+++ b/src/util.h
@@ -36,6 +36,7 @@ int64_t GetStartupTime();
static const bool DEFAULT_LOGTIMEMICROS = false;
static const bool DEFAULT_LOGIPS = false;
static const bool DEFAULT_LOGTIMESTAMPS = true;
+extern const char * const DEFAULT_DEBUGLOGFILE;
/** Signals for translation. */
class CTranslationInterface
@@ -180,7 +181,8 @@ void CreatePidFile(const fs::path &path, pid_t pid);
#ifdef WIN32
fs::path GetSpecialFolderPath(int nFolder, bool fCreate = true);
#endif
-void OpenDebugLog();
+fs::path GetDebugLogPath();
+bool OpenDebugLog();
void ShrinkDebugFile();
void runCommand(const std::string& strCommand);
diff --git a/src/validation.cpp b/src/validation.cpp
index 16d2ff2a50..986052771c 100644
--- a/src/validation.cpp
+++ b/src/validation.cpp
@@ -1671,6 +1671,18 @@ static bool ConnectBlock(const CBlock& block, CValidationState& state, CBlockInd
int64_t nTimeStart = GetTimeMicros();
// Check it again in case a previous version let a bad block in
+ // NOTE: We don't currently (re-)invoke ContextualCheckBlock() or
+ // ContextualCheckBlockHeader() here. This means that if we add a new
+ // consensus rule that is enforced in one of those two functions, then we
+ // may have let in a block that violates the rule prior to updating the
+ // software, and we would NOT be enforcing the rule here. Fully solving
+ // upgrade from one software version to the next after a consensus rule
+ // change is potentially tricky and issue-specific (see RewindBlockIndex()
+ // for one general approach that was used for BIP 141 deployment).
+ // Also, currently the rule against blocks more than 2 hours in the future
+ // is enforced in ContextualCheckBlockHeader(); we wouldn't want to
+ // re-enforce that rule here (at least until we make it impossible for
+ // GetAdjustedTime() to go backward).
if (!CheckBlock(block, state, chainparams.GetConsensus(), !fJustCheck, !fJustCheck))
return error("%s: Consensus::CheckBlock: %s", __func__, FormatStateMessage(state));
@@ -2956,7 +2968,13 @@ std::vector<unsigned char> GenerateCoinbaseCommitment(CBlock& block, const CBloc
/** Context-dependent validity checks.
* By "context", we mean only the previous block headers, but not the UTXO
- * set; UTXO-related validity checks are done in ConnectBlock(). */
+ * set; UTXO-related validity checks are done in ConnectBlock().
+ * NOTE: This function is not currently invoked by ConnectBlock(), so we
+ * should consider upgrade issues if we change which consensus rules are
+ * enforced in this function (eg by adding a new consensus rule). See comment
+ * in ConnectBlock().
+ * Note that -reindex-chainstate skips the validation that happens here!
+ */
static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationState& state, const CChainParams& params, const CBlockIndex* pindexPrev, int64_t nAdjustedTime)
{
assert(pindexPrev != nullptr);
@@ -2996,6 +3014,12 @@ static bool ContextualCheckBlockHeader(const CBlockHeader& block, CValidationSta
return true;
}
+/** NOTE: This function is not currently invoked by ConnectBlock(), so we
+ * should consider upgrade issues if we change which consensus rules are
+ * enforced in this function (eg by adding a new consensus rule). See comment
+ * in ConnectBlock().
+ * Note that -reindex-chainstate skips the validation that happens here!
+ */
static bool ContextualCheckBlock(const CBlock& block, CValidationState& state, const Consensus::Params& consensusParams, const CBlockIndex* pindexPrev)
{
const int nHeight = pindexPrev == nullptr ? 0 : pindexPrev->nHeight + 1;
@@ -3319,8 +3343,8 @@ void PruneOneBlockFile(const int fileNumber)
{
LOCK(cs_LastBlockFile);
- for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); ++it) {
- CBlockIndex* pindex = it->second;
+ for (const auto& entry : mapBlockIndex) {
+ CBlockIndex* pindex = entry.second;
if (pindex->nFile == fileNumber) {
pindex->nStatus &= ~BLOCK_HAVE_DATA;
pindex->nStatus &= ~BLOCK_HAVE_UNDO;
@@ -3468,7 +3492,7 @@ static FILE* OpenDiskFile(const CDiskBlockPos &pos, const char *prefix, bool fRe
return nullptr;
fs::path path = GetBlockPosFilename(pos, prefix);
fs::create_directories(path.parent_path());
- FILE* file = fsbridge::fopen(path, "rb+");
+ FILE* file = fsbridge::fopen(path, fReadOnly ? "rb": "rb+");
if (!file && !fReadOnly)
file = fsbridge::fopen(path, "wb+");
if (!file) {
@@ -3874,8 +3898,8 @@ bool RewindBlockIndex(const CChainParams& params)
// Reduce validity flag and have-data flags.
// We do this after actual disconnecting, otherwise we'll end up writing the lack of data
// to disk before writing the chainstate, resulting in a failure to continue if interrupted.
- for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
- CBlockIndex* pindexIter = it->second;
+ for (const auto& entry : mapBlockIndex) {
+ CBlockIndex* pindexIter = entry.second;
// Note: If we encounter an insufficiently validated block that
// is on chainActive, it must be because we are a pruning node, and
@@ -4151,8 +4175,8 @@ void static CheckBlockIndex(const Consensus::Params& consensusParams)
// Build forward-pointing map of the entire block tree.
std::multimap<CBlockIndex*,CBlockIndex*> forward;
- for (BlockMap::iterator it = mapBlockIndex.begin(); it != mapBlockIndex.end(); it++) {
- forward.insert(std::make_pair(it->second->pprev, it->second));
+ for (auto& entry : mapBlockIndex) {
+ forward.insert(std::make_pair(entry.second->pprev, entry.second));
}
assert(forward.size() == mapBlockIndex.size());
diff --git a/src/validationinterface.cpp b/src/validationinterface.cpp
index 5ceba17e06..abbd8cc4d2 100644
--- a/src/validationinterface.cpp
+++ b/src/validationinterface.cpp
@@ -49,7 +49,9 @@ void CMainSignals::UnregisterBackgroundSignalScheduler() {
}
void CMainSignals::FlushBackgroundCallbacks() {
- m_internals->m_schedulerClient.EmptyQueue();
+ if (m_internals) {
+ m_internals->m_schedulerClient.EmptyQueue();
+ }
}
void CMainSignals::RegisterWithMempoolSignals(CTxMemPool& pool) {
@@ -92,6 +94,9 @@ void UnregisterValidationInterface(CValidationInterface* pwalletIn) {
}
void UnregisterAllValidationInterfaces() {
+ if (!g_signals.m_internals) {
+ return;
+ }
g_signals.m_internals->BlockChecked.disconnect_all_slots();
g_signals.m_internals->Broadcast.disconnect_all_slots();
g_signals.m_internals->Inventory.disconnect_all_slots();
diff --git a/src/wallet/rpcdump.cpp b/src/wallet/rpcdump.cpp
index fcc4c43872..71d50be634 100644
--- a/src/wallet/rpcdump.cpp
+++ b/src/wallet/rpcdump.cpp
@@ -165,7 +165,7 @@ UniValue abortrescan(const JSONRPCRequest& request)
if (request.fHelp || request.params.size() > 0)
throw std::runtime_error(
"abortrescan\n"
- "\nStops current wallet rescan triggered e.g. by an importprivkey call.\n"
+ "\nStops current wallet rescan triggered by an RPC call, e.g. by an importprivkey call.\n"
"\nExamples:\n"
"\nImport a private key\n"
+ HelpExampleCli("importprivkey", "\"mykey\"") +
diff --git a/src/wallet/rpcwallet.cpp b/src/wallet/rpcwallet.cpp
index 9a7861f978..473acd8367 100644
--- a/src/wallet/rpcwallet.cpp
+++ b/src/wallet/rpcwallet.cpp
@@ -1439,14 +1439,14 @@ UniValue ListReceived(CWallet * const pwallet, const UniValue& params, bool fByA
if (fByAccounts)
{
- for (std::map<std::string, tallyitem>::iterator it = mapAccountTally.begin(); it != mapAccountTally.end(); ++it)
+ for (const auto& entry : mapAccountTally)
{
- CAmount nAmount = (*it).second.nAmount;
- int nConf = (*it).second.nConf;
+ CAmount nAmount = entry.second.nAmount;
+ int nConf = entry.second.nConf;
UniValue obj(UniValue::VOBJ);
- if((*it).second.fIsWatchonly)
+ if (entry.second.fIsWatchonly)
obj.push_back(Pair("involvesWatchonly", true));
- obj.push_back(Pair("account", (*it).first));
+ obj.push_back(Pair("account", entry.first));
obj.push_back(Pair("amount", ValueFromAmount(nAmount)));
obj.push_back(Pair("confirmations", (nConf == std::numeric_limits<int>::max() ? 0 : nConf)));
ret.push_back(obj);
diff --git a/src/wallet/wallet.cpp b/src/wallet/wallet.cpp
index edc1ca6ef8..ee1894501c 100644
--- a/src/wallet/wallet.cpp
+++ b/src/wallet/wallet.cpp
@@ -705,9 +705,9 @@ DBErrors CWallet::ReorderTransactions()
typedef std::multimap<int64_t, TxPair > TxItems;
TxItems txByTime;
- for (std::map<uint256, CWalletTx>::iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ for (auto& entry : mapWallet)
{
- CWalletTx* wtx = &((*it).second);
+ CWalletTx* wtx = &entry.second;
txByTime.insert(std::make_pair(wtx->nTimeReceived, TxPair(wtx, nullptr)));
}
std::list<CAccountingEntry> acentries;
@@ -2026,9 +2026,9 @@ CAmount CWallet::GetBalance() const
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
- for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ for (const auto& entry : mapWallet)
{
- const CWalletTx* pcoin = &(*it).second;
+ const CWalletTx* pcoin = &entry.second;
if (pcoin->IsTrusted())
nTotal += pcoin->GetAvailableCredit();
}
@@ -2042,9 +2042,9 @@ CAmount CWallet::GetUnconfirmedBalance() const
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
- for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ for (const auto& entry : mapWallet)
{
- const CWalletTx* pcoin = &(*it).second;
+ const CWalletTx* pcoin = &entry.second;
if (!pcoin->IsTrusted() && pcoin->GetDepthInMainChain() == 0 && pcoin->InMempool())
nTotal += pcoin->GetAvailableCredit();
}
@@ -2057,9 +2057,9 @@ CAmount CWallet::GetImmatureBalance() const
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
- for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ for (const auto& entry : mapWallet)
{
- const CWalletTx* pcoin = &(*it).second;
+ const CWalletTx* pcoin = &entry.second;
nTotal += pcoin->GetImmatureCredit();
}
}
@@ -2071,9 +2071,9 @@ CAmount CWallet::GetWatchOnlyBalance() const
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
- for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ for (const auto& entry : mapWallet)
{
- const CWalletTx* pcoin = &(*it).second;
+ const CWalletTx* pcoin = &entry.second;
if (pcoin->IsTrusted())
nTotal += pcoin->GetAvailableWatchOnlyCredit();
}
@@ -2087,9 +2087,9 @@ CAmount CWallet::GetUnconfirmedWatchOnlyBalance() const
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
- for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ for (const auto& entry : mapWallet)
{
- const CWalletTx* pcoin = &(*it).second;
+ const CWalletTx* pcoin = &entry.second;
if (!pcoin->IsTrusted() && pcoin->GetDepthInMainChain() == 0 && pcoin->InMempool())
nTotal += pcoin->GetAvailableWatchOnlyCredit();
}
@@ -2102,9 +2102,9 @@ CAmount CWallet::GetImmatureWatchOnlyBalance() const
CAmount nTotal = 0;
{
LOCK2(cs_main, cs_wallet);
- for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ for (const auto& entry : mapWallet)
{
- const CWalletTx* pcoin = &(*it).second;
+ const CWalletTx* pcoin = &entry.second;
nTotal += pcoin->GetImmatureWatchOnlyCredit();
}
}
@@ -2178,10 +2178,10 @@ void CWallet::AvailableCoins(std::vector<COutput> &vCoins, bool fOnlySafe, const
CAmount nTotal = 0;
- for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); ++it)
+ for (const auto& entry : mapWallet)
{
- const uint256& wtxid = it->first;
- const CWalletTx* pcoin = &(*it).second;
+ const uint256& wtxid = entry.first;
+ const CWalletTx* pcoin = &entry.second;
if (!CheckFinalTx(*pcoin->tx))
continue;
@@ -2242,10 +2242,10 @@ void CWallet::AvailableCoins(std::vector<COutput> &vCoins, bool fOnlySafe, const
if (pcoin->tx->vout[i].nValue < nMinimumAmount || pcoin->tx->vout[i].nValue > nMaximumAmount)
continue;
- if (coinControl && coinControl->HasSelected() && !coinControl->fAllowOtherInputs && !coinControl->IsSelected(COutPoint((*it).first, i)))
+ if (coinControl && coinControl->HasSelected() && !coinControl->fAllowOtherInputs && !coinControl->IsSelected(COutPoint(entry.first, i)))
continue;
- if (IsLockedCoin((*it).first, i))
+ if (IsLockedCoin(entry.first, i))
continue;
if (IsSpent(wtxid, i))
@@ -3705,9 +3705,9 @@ void CWallet::GetKeyBirthTimes(std::map<CTxDestination, int64_t> &mapKeyBirth) c
// find first block that affects those keys, if there are any left
std::vector<CKeyID> vAffected;
- for (std::map<uint256, CWalletTx>::const_iterator it = mapWallet.begin(); it != mapWallet.end(); it++) {
+ for (const auto& entry : mapWallet) {
// iterate over all wallet transactions...
- const CWalletTx &wtx = (*it).second;
+ const CWalletTx &wtx = entry.second;
BlockMap::const_iterator blit = mapBlockIndex.find(wtx.hashBlock);
if (blit != mapBlockIndex.end() && chainActive.Contains(blit->second)) {
// ... which are already in a block
@@ -3727,8 +3727,8 @@ void CWallet::GetKeyBirthTimes(std::map<CTxDestination, int64_t> &mapKeyBirth) c
}
// Extract block timestamps for those keys
- for (std::map<CKeyID, CBlockIndex*>::const_iterator it = mapKeyFirstBlock.begin(); it != mapKeyFirstBlock.end(); it++)
- mapKeyBirth[it->first] = it->second->GetBlockTime() - TIMESTAMP_WINDOW; // block times can be 2h off
+ for (const auto& entry : mapKeyFirstBlock)
+ mapKeyBirth[entry.first] = entry.second->GetBlockTime() - TIMESTAMP_WINDOW; // block times can be 2h off
}
/**
diff --git a/src/zmq/zmqnotificationinterface.cpp b/src/zmq/zmqnotificationinterface.cpp
index 26587d38a3..b663615752 100644
--- a/src/zmq/zmqnotificationinterface.cpp
+++ b/src/zmq/zmqnotificationinterface.cpp
@@ -40,15 +40,15 @@ CZMQNotificationInterface* CZMQNotificationInterface::Create()
factories["pubrawblock"] = CZMQAbstractNotifier::Create<CZMQPublishRawBlockNotifier>;
factories["pubrawtx"] = CZMQAbstractNotifier::Create<CZMQPublishRawTransactionNotifier>;
- for (std::map<std::string, CZMQNotifierFactory>::const_iterator i=factories.begin(); i!=factories.end(); ++i)
+ for (const auto& entry : factories)
{
- std::string arg("-zmq" + i->first);
+ std::string arg("-zmq" + entry.first);
if (gArgs.IsArgSet(arg))
{
- CZMQNotifierFactory factory = i->second;
+ CZMQNotifierFactory factory = entry.second;
std::string address = gArgs.GetArg(arg, "");
CZMQAbstractNotifier *notifier = factory();
- notifier->SetType(i->first);
+ notifier->SetType(entry.first);
notifier->SetAddress(address);
notifiers.push_back(notifier);
}
diff --git a/test/functional/README.md b/test/functional/README.md
index 2558bd017d..193ca947bc 100644
--- a/test/functional/README.md
+++ b/test/functional/README.md
@@ -63,12 +63,12 @@ wrappers for them, `msg_block`, `msg_tx`, etc).
with the bitcoind(s) being tested (using python's asyncore package); the other
implements the test logic.
-- `NodeConn` is the class used to connect to a bitcoind. If you implement
-a callback class that derives from `NodeConnCB` and pass that to the
-`NodeConn` object, your code will receive the appropriate callbacks when
-events of interest arrive.
+- `P2PConnection` is the class used to connect to a bitcoind. `P2PInterface`
+contains the higher level logic for processing P2P payloads and connecting to
+the Bitcoin Core node application logic. For custom behaviour, subclass the
+P2PInterface object and override the callback methods.
-- Call `NetworkThread.start()` after all `NodeConn` objects are created to
+- Call `NetworkThread.start()` after all `P2PInterface` objects are created to
start the networking thread. (Continue with the test logic in your existing
thread.)
diff --git a/test/functional/assumevalid.py b/test/functional/assumevalid.py
index 36761d359e..13104f71bc 100755
--- a/test/functional/assumevalid.py
+++ b/test/functional/assumevalid.py
@@ -39,14 +39,14 @@ from test_framework.mininode import (CBlockHeader,
CTxIn,
CTxOut,
NetworkThread,
- NodeConnCB,
+ P2PInterface,
msg_block,
msg_headers)
from test_framework.script import (CScript, OP_TRUE)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import assert_equal
-class BaseNode(NodeConnCB):
+class BaseNode(P2PInterface):
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [CBlockHeader(b) for b in new_blocks]
@@ -67,7 +67,7 @@ class AssumeValidTest(BitcoinTestFramework):
def send_blocks_until_disconnected(self, p2p_conn):
"""Keep sending blocks to the node until we're disconnected."""
for i in range(len(self.blocks)):
- if not p2p_conn.connection:
+ if p2p_conn.state != "connected":
break
try:
p2p_conn.send_message(msg_block(self.blocks[i]))
diff --git a/test/functional/bip65-cltv-p2p.py b/test/functional/bip65-cltv-p2p.py
index 3073324798..2af5eb275f 100755
--- a/test/functional/bip65-cltv-p2p.py
+++ b/test/functional/bip65-cltv-p2p.py
@@ -66,7 +66,7 @@ class BIP65Test(BitcoinTestFramework):
self.setup_clean_chain = True
def run_test(self):
- self.nodes[0].add_p2p_connection(NodeConnCB())
+ self.nodes[0].add_p2p_connection(P2PInterface())
NetworkThread().start() # Start up network handling in another thread
diff --git a/test/functional/bip9-softforks.py b/test/functional/bip9-softforks.py
index 904789301a..ec4d1d9365 100755
--- a/test/functional/bip9-softforks.py
+++ b/test/functional/bip9-softforks.py
@@ -246,7 +246,7 @@ class BIP9SoftForksTest(ComparisonTestFramework):
self.setup_network()
self.test.add_all_connections(self.nodes)
NetworkThread().start()
- self.test.test_nodes[0].wait_for_verack()
+ self.test.p2p_connections[0].wait_for_verack()
def get_tests(self):
for test in itertools.chain(
diff --git a/test/functional/bipdersig-p2p.py b/test/functional/bipdersig-p2p.py
index e5febde42d..7a3e565e2c 100755
--- a/test/functional/bipdersig-p2p.py
+++ b/test/functional/bipdersig-p2p.py
@@ -54,7 +54,7 @@ class BIP66Test(BitcoinTestFramework):
self.setup_clean_chain = True
def run_test(self):
- self.nodes[0].add_p2p_connection(NodeConnCB())
+ self.nodes[0].add_p2p_connection(P2PInterface())
NetworkThread().start() # Start up network handling in another thread
diff --git a/test/functional/example_test.py b/test/functional/example_test.py
index ba40f33016..289fa248e0 100755
--- a/test/functional/example_test.py
+++ b/test/functional/example_test.py
@@ -18,10 +18,11 @@ from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
NetworkThread,
- NodeConnCB,
+ P2PInterface,
mininode_lock,
msg_block,
msg_getdata,
+ NODE_NETWORK,
)
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import (
@@ -30,15 +31,15 @@ from test_framework.util import (
wait_until,
)
-# NodeConnCB is a class containing callbacks to be executed when a P2P
-# message is received from the node-under-test. Subclass NodeConnCB and
+# P2PInterface is a class containing callbacks to be executed when a P2P
+# message is received from the node-under-test. Subclass P2PInterface and
# override the on_*() methods if you need custom behaviour.
-class BaseNode(NodeConnCB):
+class BaseNode(P2PInterface):
def __init__(self):
- """Initialize the NodeConnCB
+ """Initialize the P2PInterface
Used to inialize custom properties for the Node that aren't
- included by default in the base class. Be aware that the NodeConnCB
+ included by default in the base class. Be aware that the P2PInterface
base class already stores a counter for each P2P message type and the
last received message of each type, which should be sufficient for the
needs of most tests.
@@ -49,14 +50,14 @@ class BaseNode(NodeConnCB):
# Stores a dictionary of all blocks received
self.block_receive_map = defaultdict(int)
- def on_block(self, conn, message):
+ def on_block(self, message):
"""Override the standard on_block callback
Store the hash of a received block in the dictionary."""
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
- def on_inv(self, conn, message):
+ def on_inv(self, message):
"""Override the standard on_inv callback"""
pass
@@ -174,7 +175,7 @@ class ExampleTest(BitcoinTestFramework):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
block_message = msg_block(block)
- # Send message is used to send a P2P message to the node over our NodeConn connection
+ # Send message is used to send a P2P message to the node over our P2PInterface
self.nodes[0].p2p.send_message(block_message)
self.tip = block.sha256
blocks.append(self.tip)
@@ -199,12 +200,12 @@ class ExampleTest(BitcoinTestFramework):
self.nodes[2].p2p.send_message(getdata_request)
# wait_until() will loop until a predicate condition is met. Use it to test properties of the
- # NodeConnCB objects.
+ # P2PInterface objects.
wait_until(lambda: sorted(blocks) == sorted(list(self.nodes[2].p2p.block_receive_map.keys())), timeout=5, lock=mininode_lock)
self.log.info("Check that each block was received only once")
- # The network thread uses a global lock on data access to the NodeConn objects when sending and receiving
- # messages. The test thread should acquire the global lock before accessing any NodeConn data to avoid locking
+ # The network thread uses a global lock on data access to the P2PConnection objects when sending and receiving
+ # messages. The test thread should acquire the global lock before accessing any P2PConnection data to avoid locking
# and synchronization issues. Note wait_until() acquires this global lock when testing the predicate.
with mininode_lock:
for block in self.nodes[2].p2p.block_receive_map.values():
diff --git a/test/functional/feature_logging.py b/test/functional/feature_logging.py
new file mode 100755
index 0000000000..da4e7b0398
--- /dev/null
+++ b/test/functional/feature_logging.py
@@ -0,0 +1,59 @@
+#!/usr/bin/env python3
+# Copyright (c) 2017 The Bitcoin Core developers
+# Distributed under the MIT software license, see the accompanying
+# file COPYING or http://www.opensource.org/licenses/mit-license.php.
+"""Test debug logging."""
+
+import os
+
+from test_framework.test_framework import BitcoinTestFramework
+
+class LoggingTest(BitcoinTestFramework):
+ def set_test_params(self):
+ self.num_nodes = 1
+ self.setup_clean_chain = True
+
+ def run_test(self):
+ # test default log file name
+ assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "debug.log"))
+
+ # test alternative log file name in datadir
+ self.restart_node(0, ["-debuglogfile=foo.log"])
+ assert os.path.isfile(os.path.join(self.nodes[0].datadir, "regtest", "foo.log"))
+
+ # test alternative log file name outside datadir
+ tempname = os.path.join(self.options.tmpdir, "foo.log")
+ self.restart_node(0, ["-debuglogfile=%s" % tempname])
+ assert os.path.isfile(tempname)
+
+ # check that invalid log (relative) will cause error
+ invdir = os.path.join(self.nodes[0].datadir, "regtest", "foo")
+ invalidname = os.path.join("foo", "foo.log")
+ self.stop_node(0)
+ self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % (invalidname)],
+ "Error: Could not open debug log file")
+ assert not os.path.isfile(os.path.join(invdir, "foo.log"))
+
+ # check that invalid log (relative) works after path exists
+ self.stop_node(0)
+ os.mkdir(invdir)
+ self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
+ assert os.path.isfile(os.path.join(invdir, "foo.log"))
+
+ # check that invalid log (absolute) will cause error
+ self.stop_node(0)
+ invdir = os.path.join(self.options.tmpdir, "foo")
+ invalidname = os.path.join(invdir, "foo.log")
+ self.assert_start_raises_init_error(0, ["-debuglogfile=%s" % invalidname],
+ "Error: Could not open debug log file")
+ assert not os.path.isfile(os.path.join(invdir, "foo.log"))
+
+ # check that invalid log (absolute) works after path exists
+ self.stop_node(0)
+ os.mkdir(invdir)
+ self.start_node(0, ["-debuglogfile=%s" % (invalidname)])
+ assert os.path.isfile(os.path.join(invdir, "foo.log"))
+
+
+if __name__ == '__main__':
+ LoggingTest().main()
diff --git a/test/functional/maxuploadtarget.py b/test/functional/maxuploadtarget.py
index 88e2ff2e16..5ef71c93cf 100755
--- a/test/functional/maxuploadtarget.py
+++ b/test/functional/maxuploadtarget.py
@@ -17,15 +17,15 @@ from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
-class TestNode(NodeConnCB):
+class TestNode(P2PInterface):
def __init__(self):
super().__init__()
self.block_receive_map = defaultdict(int)
- def on_inv(self, conn, message):
+ def on_inv(self, message):
pass
- def on_block(self, conn, message):
+ def on_block(self, message):
message.block.calc_sha256()
self.block_receive_map[message.block.sha256] += 1
diff --git a/test/functional/p2p-acceptblock.py b/test/functional/p2p-acceptblock.py
index ca0e0080a1..d9d7c24416 100755
--- a/test/functional/p2p-acceptblock.py
+++ b/test/functional/p2p-acceptblock.py
@@ -7,7 +7,7 @@
Setup: two nodes, node0+node1, not connected to each other. Node1 will have
nMinimumChainWork set to 0x10, so it won't process low-work unrequested blocks.
-We have one NodeConn connection to node0 called test_node, and one to node1
+We have one P2PInterface connection to node0 called test_node, and one to node1
called min_work_node.
The test:
@@ -79,9 +79,9 @@ class AcceptBlockTest(BitcoinTestFramework):
def run_test(self):
# Setup the p2p connections and start up the network thread.
# test_node connects to node0 (not whitelisted)
- test_node = self.nodes[0].add_p2p_connection(NodeConnCB())
- # min_work_node connects to node1
- min_work_node = self.nodes[1].add_p2p_connection(NodeConnCB())
+ test_node = self.nodes[0].add_p2p_connection(P2PInterface())
+ # min_work_node connects to node1 (whitelisted)
+ min_work_node = self.nodes[1].add_p2p_connection(P2PInterface())
NetworkThread().start() # Start up network handling in another thread
@@ -207,7 +207,7 @@ class AcceptBlockTest(BitcoinTestFramework):
# disconnect/reconnect first
self.nodes[0].disconnect_p2ps()
- test_node = self.nodes[0].add_p2p_connection(NodeConnCB())
+ test_node = self.nodes[0].add_p2p_connection(P2PInterface())
test_node.wait_for_verack()
test_node.send_message(msg_block(block_h1f))
@@ -292,7 +292,7 @@ class AcceptBlockTest(BitcoinTestFramework):
test_node.wait_for_disconnect()
self.nodes[0].disconnect_p2ps()
- test_node = self.nodes[0].add_p2p_connection(NodeConnCB())
+ test_node = self.nodes[0].add_p2p_connection(P2PInterface())
NetworkThread().start() # Start up network handling in another thread
test_node.wait_for_verack()
diff --git a/test/functional/p2p-compactblocks.py b/test/functional/p2p-compactblocks.py
index d2c4d39305..c43744328c 100755
--- a/test/functional/p2p-compactblocks.py
+++ b/test/functional/p2p-compactblocks.py
@@ -15,7 +15,7 @@ from test_framework.blocktools import create_block, create_coinbase, add_witness
from test_framework.script import CScript, OP_TRUE
# TestNode: A peer we use to send messages to bitcoind, and store responses.
-class TestNode(NodeConnCB):
+class TestNode(P2PInterface):
def __init__(self):
super().__init__()
self.last_sendcmpct = []
@@ -25,21 +25,21 @@ class TestNode(NodeConnCB):
# so we can eg wait until a particular block is announced.
self.announced_blockhashes = set()
- def on_sendcmpct(self, conn, message):
+ def on_sendcmpct(self, message):
self.last_sendcmpct.append(message)
- def on_cmpctblock(self, conn, message):
+ def on_cmpctblock(self, message):
self.block_announced = True
self.last_message["cmpctblock"].header_and_shortids.header.calc_sha256()
self.announced_blockhashes.add(self.last_message["cmpctblock"].header_and_shortids.header.sha256)
- def on_headers(self, conn, message):
+ def on_headers(self, message):
self.block_announced = True
for x in self.last_message["headers"].headers:
x.calc_sha256()
self.announced_blockhashes.add(x.sha256)
- def on_inv(self, conn, message):
+ def on_inv(self, message):
for x in self.last_message["inv"].inv:
if x.type == 2:
self.block_announced = True
@@ -60,7 +60,7 @@ class TestNode(NodeConnCB):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
- self.connection.send_message(msg)
+ self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
@@ -86,7 +86,7 @@ class TestNode(NodeConnCB):
This is used when we want to send a message into the node that we expect
will get us disconnected, eg an invalid block."""
self.send_message(message)
- wait_until(lambda: not self.connected, timeout=timeout, lock=mininode_lock)
+ wait_until(lambda: self.state != "connected", timeout=timeout, lock=mininode_lock)
class CompactBlocksTest(BitcoinTestFramework):
def set_test_params(self):
diff --git a/test/functional/p2p-feefilter.py b/test/functional/p2p-feefilter.py
index 624278df40..ac55336e3d 100755
--- a/test/functional/p2p-feefilter.py
+++ b/test/functional/p2p-feefilter.py
@@ -22,12 +22,12 @@ def allInvsMatch(invsExpected, testnode):
time.sleep(1)
return False
-class TestNode(NodeConnCB):
+class TestNode(P2PInterface):
def __init__(self):
super().__init__()
self.txinvs = []
- def on_inv(self, conn, message):
+ def on_inv(self, message):
for i in message.inv:
if (i.type == 1):
self.txinvs.append(hashToHex(i.hash))
diff --git a/test/functional/p2p-fingerprint.py b/test/functional/p2p-fingerprint.py
index a8ce68374c..209c789f22 100755
--- a/test/functional/p2p-fingerprint.py
+++ b/test/functional/p2p-fingerprint.py
@@ -14,7 +14,7 @@ from test_framework.blocktools import (create_block, create_coinbase)
from test_framework.mininode import (
CInv,
NetworkThread,
- NodeConnCB,
+ P2PInterface,
msg_headers,
msg_block,
msg_getdata,
@@ -75,7 +75,7 @@ class P2PFingerprintTest(BitcoinTestFramework):
# This does not currently test that stale blocks timestamped within the
# last month but that have over a month's worth of work are also withheld.
def run_test(self):
- node0 = self.nodes[0].add_p2p_connection(NodeConnCB())
+ node0 = self.nodes[0].add_p2p_connection(P2PInterface())
NetworkThread().start()
node0.wait_for_verack()
diff --git a/test/functional/p2p-leaktests.py b/test/functional/p2p-leaktests.py
index 719a03914d..b469a9a47a 100755
--- a/test/functional/p2p-leaktests.py
+++ b/test/functional/p2p-leaktests.py
@@ -20,7 +20,7 @@ from test_framework.util import *
banscore = 10
-class CLazyNode(NodeConnCB):
+class CLazyNode(P2PInterface):
def __init__(self):
super().__init__()
self.unexpected_msg = False
@@ -30,43 +30,42 @@ class CLazyNode(NodeConnCB):
self.unexpected_msg = True
self.log.info("should not have received message: %s" % message.command)
- def on_open(self, conn):
- self.connected = True
+ def on_open(self):
self.ever_connected = True
- def on_version(self, conn, message): self.bad_message(message)
- def on_verack(self, conn, message): self.bad_message(message)
- def on_reject(self, conn, message): self.bad_message(message)
- def on_inv(self, conn, message): self.bad_message(message)
- def on_addr(self, conn, message): self.bad_message(message)
- def on_getdata(self, conn, message): self.bad_message(message)
- def on_getblocks(self, conn, message): self.bad_message(message)
- def on_tx(self, conn, message): self.bad_message(message)
- def on_block(self, conn, message): self.bad_message(message)
- def on_getaddr(self, conn, message): self.bad_message(message)
- def on_headers(self, conn, message): self.bad_message(message)
- def on_getheaders(self, conn, message): self.bad_message(message)
- def on_ping(self, conn, message): self.bad_message(message)
- def on_mempool(self, conn): self.bad_message(message)
- def on_pong(self, conn, message): self.bad_message(message)
- def on_feefilter(self, conn, message): self.bad_message(message)
- def on_sendheaders(self, conn, message): self.bad_message(message)
- def on_sendcmpct(self, conn, message): self.bad_message(message)
- def on_cmpctblock(self, conn, message): self.bad_message(message)
- def on_getblocktxn(self, conn, message): self.bad_message(message)
- def on_blocktxn(self, conn, message): self.bad_message(message)
+ def on_version(self, message): self.bad_message(message)
+ def on_verack(self, message): self.bad_message(message)
+ def on_reject(self, message): self.bad_message(message)
+ def on_inv(self, message): self.bad_message(message)
+ def on_addr(self, message): self.bad_message(message)
+ def on_getdata(self, message): self.bad_message(message)
+ def on_getblocks(self, message): self.bad_message(message)
+ def on_tx(self, message): self.bad_message(message)
+ def on_block(self, message): self.bad_message(message)
+ def on_getaddr(self, message): self.bad_message(message)
+ def on_headers(self, message): self.bad_message(message)
+ def on_getheaders(self, message): self.bad_message(message)
+ def on_ping(self, message): self.bad_message(message)
+ def on_mempool(self, message): self.bad_message(message)
+ def on_pong(self, message): self.bad_message(message)
+ def on_feefilter(self, message): self.bad_message(message)
+ def on_sendheaders(self, message): self.bad_message(message)
+ def on_sendcmpct(self, message): self.bad_message(message)
+ def on_cmpctblock(self, message): self.bad_message(message)
+ def on_getblocktxn(self, message): self.bad_message(message)
+ def on_blocktxn(self, message): self.bad_message(message)
# Node that never sends a version. We'll use this to send a bunch of messages
# anyway, and eventually get disconnected.
class CNodeNoVersionBan(CLazyNode):
# send a bunch of veracks without sending a message. This should get us disconnected.
# NOTE: implementation-specific check here. Remove if bitcoind ban behavior changes
- def on_open(self, conn):
- super().on_open(conn)
+ def on_open(self):
+ super().on_open()
for i in range(banscore):
self.send_message(msg_verack())
- def on_reject(self, conn, message): pass
+ def on_reject(self, message): pass
# Node that never sends a version. This one just sits idle and hopes to receive
# any message (it shouldn't!)
@@ -80,15 +79,15 @@ class CNodeNoVerackIdle(CLazyNode):
self.version_received = False
super().__init__()
- def on_reject(self, conn, message): pass
- def on_verack(self, conn, message): pass
+ def on_reject(self, message): pass
+ def on_verack(self, message): pass
# When version is received, don't reply with a verack. Instead, see if the
# node will give us a message that it shouldn't. This is not an exhaustive
# list!
- def on_version(self, conn, message):
+ def on_version(self, message):
self.version_received = True
- conn.send_message(msg_ping())
- conn.send_message(msg_getaddr())
+ self.send_message(msg_ping())
+ self.send_message(msg_getaddr())
class P2PLeakTest(BitcoinTestFramework):
def set_test_params(self):
@@ -119,11 +118,11 @@ class P2PLeakTest(BitcoinTestFramework):
time.sleep(5)
#This node should have been banned
- assert not no_version_bannode.connected
+ assert no_version_bannode.state != "connected"
# These nodes should have been disconnected
- assert not unsupported_service_bit5_node.connected
- assert not unsupported_service_bit7_node.connected
+ assert unsupported_service_bit5_node.state != "connected"
+ assert unsupported_service_bit7_node.state != "connected"
self.nodes[0].disconnect_p2ps()
@@ -140,10 +139,10 @@ class P2PLeakTest(BitcoinTestFramework):
self.log.info("Service bits 5 and 7 are allowed after August 1st 2018")
self.nodes[0].setmocktime(1533168000) # August 2nd 2018
- allowed_service_bit5_node = self.nodes[0].add_p2p_connection(NodeConnCB(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)
- allowed_service_bit7_node = self.nodes[0].add_p2p_connection(NodeConnCB(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)
+ allowed_service_bit5_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_5)
+ allowed_service_bit7_node = self.nodes[0].add_p2p_connection(P2PInterface(), services=NODE_NETWORK|NODE_UNSUPPORTED_SERVICE_BIT_7)
- NetworkThread().start() # Network thread stopped when all previous NodeConnCBs disconnected. Restart it
+ NetworkThread().start() # Network thread stopped when all previous P2PInterfaces disconnected. Restart it
wait_until(lambda: allowed_service_bit5_node.message_count["verack"], lock=mininode_lock)
wait_until(lambda: allowed_service_bit7_node.message_count["verack"], lock=mininode_lock)
diff --git a/test/functional/p2p-mempool.py b/test/functional/p2p-mempool.py
index be467c4223..d24dbac51d 100755
--- a/test/functional/p2p-mempool.py
+++ b/test/functional/p2p-mempool.py
@@ -20,7 +20,7 @@ class P2PMempoolTests(BitcoinTestFramework):
def run_test(self):
# Add a p2p connection
- self.nodes[0].add_p2p_connection(NodeConnCB())
+ self.nodes[0].add_p2p_connection(P2PInterface())
NetworkThread().start()
self.nodes[0].p2p.wait_for_verack()
diff --git a/test/functional/p2p-segwit.py b/test/functional/p2p-segwit.py
index b940bc4096..a240d79013 100755
--- a/test/functional/p2p-segwit.py
+++ b/test/functional/p2p-segwit.py
@@ -31,13 +31,40 @@ def get_virtual_size(witness_block):
vsize = int((3*base_size + total_size + 3)/4)
return vsize
-class TestNode(NodeConnCB):
- def __init__(self, rpc):
+def test_transaction_acceptance(rpc, p2p, tx, with_witness, accepted, reason=None):
+ """Send a transaction to the node and check that it's accepted to the mempool
+
+ - Submit the transaction over the p2p interface
+ - use the getrawmempool rpc to check for acceptance."""
+ tx_message = msg_tx(tx)
+ if with_witness:
+ tx_message = msg_witness_tx(tx)
+ p2p.send_message(tx_message)
+ p2p.sync_with_ping()
+ assert_equal(tx.hash in rpc.getrawmempool(), accepted)
+ if (reason != None and not accepted):
+ # Check the rejection reason as well.
+ with mininode_lock:
+ assert_equal(p2p.last_message["reject"].reason, reason)
+
+def test_witness_block(rpc, p2p, block, accepted, with_witness=True):
+ """Send a block to the node and check that it's accepted
+
+ - Submit the block over the p2p interface
+ - use the getbestblockhash rpc to check for acceptance."""
+ if with_witness:
+ p2p.send_message(msg_witness_block(block))
+ else:
+ p2p.send_message(msg_block(block))
+ p2p.sync_with_ping()
+ assert_equal(rpc.getbestblockhash() == block.hash, accepted)
+
+class TestNode(P2PInterface):
+ def __init__(self):
super().__init__()
self.getdataset = set()
- self.rpc = rpc
- def on_getdata(self, conn, message):
+ def on_getdata(self, message):
for inv in message.inv:
self.getdataset.add(inv.hash)
@@ -68,27 +95,6 @@ class TestNode(NodeConnCB):
self.wait_for_block(blockhash, timeout)
return self.last_message["block"].block
- def test_transaction_acceptance(self, tx, with_witness, accepted, reason=None):
- tx_message = msg_tx(tx)
- if with_witness:
- tx_message = msg_witness_tx(tx)
- self.send_message(tx_message)
- self.sync_with_ping()
- assert_equal(tx.hash in self.rpc.getrawmempool(), accepted)
- if (reason != None and not accepted):
- # Check the rejection reason as well.
- with mininode_lock:
- assert_equal(self.last_message["reject"].reason, reason)
-
- # Test whether a witness block had the correct effect on the tip
- def test_witness_block(self, block, accepted, with_witness=True):
- if with_witness:
- self.send_message(msg_witness_block(block))
- else:
- self.send_message(msg_block(block))
- self.sync_with_ping()
- assert_equal(self.rpc.getbestblockhash() == block.hash, accepted)
-
# Used to keep track of anyone-can-spend outputs that we can use in the tests
class UTXO():
def __init__(self, sha256, n, nValue):
@@ -142,7 +148,7 @@ class SegWitTest(BitcoinTestFramework):
''' Individual tests '''
def test_witness_services(self):
self.log.info("Verifying NODE_WITNESS service bit")
- assert((self.test_node.connection.nServices & NODE_WITNESS) != 0)
+ assert((self.test_node.nServices & NODE_WITNESS) != 0)
# See if sending a regular transaction works, and create a utxo
@@ -201,7 +207,7 @@ class SegWitTest(BitcoinTestFramework):
self.update_witness_block_with_transactions(block, [tx])
# Sending witness data before activation is not allowed (anti-spam
# rule).
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# TODO: fix synchronization so we can test reject reason
# Right now, bitcoind delays sending reject messages for blocks
# until the future, making synchronization here difficult.
@@ -228,7 +234,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.vin.append(CTxIn(COutPoint(tx.sha256, 0), b""))
tx2.vout.append(CTxOut(tx.vout[0].nValue-1000, scriptPubKey))
tx2.rehash()
- self.test_node.test_transaction_acceptance(tx2, False, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
@@ -245,18 +251,18 @@ class SegWitTest(BitcoinTestFramework):
tx3.rehash()
# Note that this should be rejected for the premature witness reason,
# rather than a policy check, since segwit hasn't activated yet.
- self.std_node.test_transaction_acceptance(tx3, True, False, b'no-witness-yet')
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'no-witness-yet')
# If we send without witness, it should be accepted.
- self.std_node.test_transaction_acceptance(tx3, False, True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, False, True)
# Now create a new anyone-can-spend utxo for the next test.
tx4 = CTransaction()
tx4.vin.append(CTxIn(COutPoint(tx3.sha256, 0), CScript([p2sh_program])))
tx4.vout.append(CTxOut(tx3.vout[0].nValue-1000, CScript([OP_TRUE])))
tx4.rehash()
- self.test_node.test_transaction_acceptance(tx3, False, True)
- self.test_node.test_transaction_acceptance(tx4, False, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, False, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, False, True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
@@ -317,7 +323,7 @@ class SegWitTest(BitcoinTestFramework):
assert(msg_witness_block(block).serialize() != msg_block(block).serialize())
# This empty block should be valid.
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Try to tweak the nonce
block_2 = self.build_next_block()
@@ -328,7 +334,7 @@ class SegWitTest(BitcoinTestFramework):
assert(block_2.vtx[0].vout[-1] != block.vtx[0].vout[-1])
# This should also be valid.
- self.test_node.test_witness_block(block_2, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=True)
# Now test commitments with actual transactions
assert (len(self.utxo) > 0)
@@ -361,7 +367,7 @@ class SegWitTest(BitcoinTestFramework):
block_3.rehash()
block_3.solve()
- self.test_node.test_witness_block(block_3, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False)
# Add a different commitment with different nonce, but in the
# right location, and with some funds burned(!).
@@ -375,7 +381,7 @@ class SegWitTest(BitcoinTestFramework):
block_3.rehash()
assert(len(block_3.vtx[0].vout) == 4) # 3 OP_returns
block_3.solve()
- self.test_node.test_witness_block(block_3, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=True)
# Finally test that a block with no witness transactions can
# omit the commitment.
@@ -387,7 +393,7 @@ class SegWitTest(BitcoinTestFramework):
block_4.vtx.append(tx3)
block_4.hashMerkleRoot = block_4.calc_merkle_root()
block_4.solve()
- self.test_node.test_witness_block(block_4, with_witness=False, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_4, with_witness=False, accepted=True)
# Update available utxo's for use in later test.
self.utxo.pop(0)
@@ -428,11 +434,11 @@ class SegWitTest(BitcoinTestFramework):
# Change the nonce -- should not cause the block to be permanently
# failed
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(1) ]
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Changing the witness nonce doesn't change the block hash
block.vtx[0].wit.vtxinwit[0].scriptWitness.stack = [ ser_uint256(0) ]
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
def test_witness_block_size(self):
@@ -497,7 +503,7 @@ class SegWitTest(BitcoinTestFramework):
# limit
assert(len(block.serialize(True)) > 2*1024*1024)
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now resize the second transaction to make the block fit.
cur_length = len(block.vtx[-1].wit.vtxinwit[0].scriptWitness.stack[0])
@@ -507,7 +513,7 @@ class SegWitTest(BitcoinTestFramework):
block.solve()
assert(get_virtual_size(block) == MAX_BLOCK_BASE_SIZE)
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update available utxo's
self.utxo.pop(0)
@@ -574,7 +580,7 @@ class SegWitTest(BitcoinTestFramework):
self.update_witness_block_with_transactions(block, [tx])
# Extra witness data should not be allowed.
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Try extra signature data. Ok if we're not spending a witness output.
block.vtx[1].wit.vtxinwit = []
@@ -583,7 +589,7 @@ class SegWitTest(BitcoinTestFramework):
add_witness_commitment(block)
block.solve()
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try extra witness/signature data on an input that DOES require a
# witness
@@ -599,7 +605,7 @@ class SegWitTest(BitcoinTestFramework):
self.update_witness_block_with_transactions(block, [tx2])
# This has extra witness data, so it should fail.
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now get rid of the extra witness, but add extra scriptSig data
tx2.vin[0].scriptSig = CScript([OP_TRUE])
@@ -611,7 +617,7 @@ class SegWitTest(BitcoinTestFramework):
block.solve()
# This has extra signature data for a witness input, so it should fail.
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now get rid of the extra scriptsig on the witness input, and verify
# success (even with extra scriptsig data in the non-witness input)
@@ -620,7 +626,7 @@ class SegWitTest(BitcoinTestFramework):
add_witness_commitment(block)
block.solve()
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update utxo for later tests
self.utxo.pop(0)
@@ -653,14 +659,14 @@ class SegWitTest(BitcoinTestFramework):
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx, tx2])
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now reduce the length of the stack element
tx2.wit.vtxinwit[0].scriptWitness.stack[0] = b'a'*(MAX_SCRIPT_ELEMENT_SIZE)
add_witness_commitment(block)
block.solve()
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Update the utxo for later tests
self.utxo.pop()
@@ -695,7 +701,7 @@ class SegWitTest(BitcoinTestFramework):
self.update_witness_block_with_transactions(block, [tx, tx2])
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Try again with one less byte in the witness program
witness_program = CScript([b'a'*520]*19 + [OP_DROP]*62 + [OP_TRUE])
@@ -710,7 +716,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.rehash()
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx, tx2])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@@ -736,7 +742,7 @@ class SegWitTest(BitcoinTestFramework):
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Try various ways to spend tx that should all break.
# This "broken" transaction serializer will not normalize
@@ -771,7 +777,7 @@ class SegWitTest(BitcoinTestFramework):
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now try using a too short vtxinwit
tx2.wit.vtxinwit.pop()
@@ -779,7 +785,7 @@ class SegWitTest(BitcoinTestFramework):
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now make one of the intermediate witnesses be incorrect
tx2.wit.vtxinwit.append(CTxInWitness())
@@ -788,13 +794,13 @@ class SegWitTest(BitcoinTestFramework):
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Fix the broken witness and the block should be accepted.
tx2.wit.vtxinwit[5].scriptWitness.stack = [b'a', witness_program]
block.vtx = [block.vtx[0]]
self.update_witness_block_with_transactions(block, [tx2])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop()
self.utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@@ -834,11 +840,11 @@ class SegWitTest(BitcoinTestFramework):
# its from)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
assert_equal(len(self.nodes[1].getrawmempool()), 0)
- self.old_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
- self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.old_node, tx, with_witness=True, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=False)
# But eliminating the witness should fix it
- self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
# Cleanup: mine the first transaction and update utxo
self.nodes[0].generate(1)
@@ -870,11 +876,11 @@ class SegWitTest(BitcoinTestFramework):
# Verify that unnecessary witnesses are rejected.
self.test_node.announce_tx_and_wait_for_getdata(tx)
assert_equal(len(self.nodes[0].getrawmempool()), 0)
- self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=False)
# Verify that removing the witness succeeds.
self.test_node.announce_tx_and_wait_for_getdata(tx)
- self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
# Now try to add extra witness data to a valid witness tx.
witness_program = CScript([OP_TRUE])
@@ -899,24 +905,24 @@ class SegWitTest(BitcoinTestFramework):
# Node will not be blinded to the transaction
self.std_node.announce_tx_and_wait_for_getdata(tx3)
- self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'tx-size')
self.std_node.announce_tx_and_wait_for_getdata(tx3)
- self.std_node.test_transaction_acceptance(tx3, True, False, b'tx-size')
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx3, True, False, b'tx-size')
# Remove witness stuffing, instead add extra witness push on stack
tx3.vout[0] = CTxOut(tx2.vout[0].nValue-1000, CScript([OP_TRUE]))
tx3.wit.vtxinwit[0].scriptWitness.stack = [CScript([CScriptNum(1)]), witness_program ]
tx3.rehash()
- self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
- self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False)
# Get rid of the extra witness, and verify acceptance.
tx3.wit.vtxinwit[0].scriptWitness.stack = [ witness_program ]
# Also check that old_node gets a tx announcement, even though this is
# a witness transaction.
self.old_node.wait_for_inv([CInv(1, tx2.sha256)]) # wait until tx2 was inv'ed
- self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
self.old_node.wait_for_inv([CInv(1, tx3.sha256)])
# Test that getrawtransaction returns correct witness information
@@ -955,20 +961,20 @@ class SegWitTest(BitcoinTestFramework):
self.test_node.announce_block_and_wait_for_getdata(block1, use_header=False)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
- self.test_node.test_witness_block(block1, True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block1, True)
block2 = self.build_next_block(nVersion=4)
block2.solve()
self.test_node.announce_block_and_wait_for_getdata(block2, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
- self.test_node.test_witness_block(block2, True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block2, True)
block3 = self.build_next_block(nVersion=(VB_TOP_BITS | (1<<15)))
block3.solve()
self.test_node.announce_block_and_wait_for_getdata(block3, use_header=True)
assert(self.test_node.last_message["getdata"].inv[0].type == blocktype)
- self.test_node.test_witness_block(block3, True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block3, True)
# Check that we can getdata for witness blocks or regular blocks,
# and the right thing happens.
@@ -998,7 +1004,7 @@ class SegWitTest(BitcoinTestFramework):
# This gives us a witness commitment.
assert(len(block.vtx[0].wit.vtxinwit) == 1)
assert(len(block.vtx[0].wit.vtxinwit[0].scriptWitness.stack) == 1)
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try to retrieve it...
rpc_block = self.nodes[0].getblock(block.hash, False)
non_wit_block = self.test_node.request_block(block.sha256, 2)
@@ -1052,7 +1058,7 @@ class SegWitTest(BitcoinTestFramework):
p2sh_tx.rehash()
# Mine it on test_node to create the confirmed output.
- self.test_node.test_transaction_acceptance(p2sh_tx, with_witness=True, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_tx, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
@@ -1064,7 +1070,7 @@ class SegWitTest(BitcoinTestFramework):
tx.vout.append(CTxOut(8000, scriptPubKey)) # Might burn this later
tx.rehash()
- self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=segwit_activated)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=segwit_activated)
# Now create something that looks like a P2PKH output. This won't be spendable.
scriptPubKey = CScript([OP_0, hash160(witness_hash)])
@@ -1081,7 +1087,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, scriptPubKey)]
tx2.rehash()
- self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=segwit_activated)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=segwit_activated)
# Now update self.utxo for later tests.
tx3 = CTransaction()
@@ -1094,13 +1100,13 @@ class SegWitTest(BitcoinTestFramework):
tx3.wit.vtxinwit.append(CTxInWitness())
tx3.wit.vtxinwit[0].scriptWitness.stack = [witness_program]
tx3.rehash()
- self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
else:
# tx and tx2 didn't go anywhere; just clean up the p2sh_tx output.
tx3.vin = [CTxIn(COutPoint(p2sh_tx.sha256, 0), CScript([witness_program]))]
tx3.vout = [CTxOut(p2sh_tx.vout[0].nValue-1000, witness_program)]
tx3.rehash()
- self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
@@ -1124,7 +1130,7 @@ class SegWitTest(BitcoinTestFramework):
tx.rehash()
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.pop(0)
for i in range(NUM_TESTS):
self.utxo.append(UTXO(tx.sha256, i, split_value))
@@ -1143,8 +1149,8 @@ class SegWitTest(BitcoinTestFramework):
tx.vin = [CTxIn(COutPoint(self.utxo[0].sha256, self.utxo[0].n), b"")]
tx.vout = [CTxOut(self.utxo[0].nValue-1000, scriptPubKey)]
tx.rehash()
- self.std_node.test_transaction_acceptance(tx, with_witness=True, accepted=False)
- self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx, with_witness=True, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True)
self.utxo.pop(0)
temp_utxo.append(UTXO(tx.sha256, 0, tx.vout[0].nValue))
@@ -1163,8 +1169,8 @@ class SegWitTest(BitcoinTestFramework):
tx2.rehash()
# Gets accepted to test_node, because standardness of outputs isn't
# checked with fRequireStandard
- self.test_node.test_transaction_acceptance(tx2, with_witness=True, accepted=True)
- self.std_node.test_transaction_acceptance(tx2, with_witness=True, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, with_witness=True, accepted=True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, tx2, with_witness=True, accepted=False)
temp_utxo.pop() # last entry in temp_utxo was the output we just spent
temp_utxo.append(UTXO(tx2.sha256, 0, tx2.vout[0].nValue))
@@ -1180,7 +1186,7 @@ class SegWitTest(BitcoinTestFramework):
tx3.rehash()
# Spending a higher version witness output is not allowed by policy,
# even with fRequireStandard=false.
- self.test_node.test_transaction_acceptance(tx3, with_witness=True, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, with_witness=True, accepted=False)
self.test_node.sync_with_ping()
with mininode_lock:
assert(b"reserved for soft-fork upgrades" in self.test_node.last_message["reject"].reason)
@@ -1188,7 +1194,7 @@ class SegWitTest(BitcoinTestFramework):
# Building a block with the transaction must be valid, however.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2, tx3])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
sync_blocks(self.nodes)
# Add utxo to our list
@@ -1206,7 +1212,7 @@ class SegWitTest(BitcoinTestFramework):
# This next line will rehash the coinbase and update the merkle
# root, and solve.
self.update_witness_block_with_transactions(block, [])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
spend_tx = CTransaction()
spend_tx.vin = [CTxIn(COutPoint(block.vtx[0].sha256, 0), b"")]
@@ -1220,13 +1226,13 @@ class SegWitTest(BitcoinTestFramework):
sync_blocks(self.nodes)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
- self.test_node.test_witness_block(block2, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=False)
# Advancing one more block should allow the spend.
self.nodes[0].generate(1)
block2 = self.build_next_block()
self.update_witness_block_with_transactions(block2, [spend_tx])
- self.test_node.test_witness_block(block2, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block2, accepted=True)
sync_blocks(self.nodes)
@@ -1247,11 +1253,11 @@ class SegWitTest(BitcoinTestFramework):
tx.vout.append(CTxOut(self.utxo[0].nValue-1000, scriptPubKey))
tx.rehash()
- self.test_node.test_transaction_acceptance(tx, with_witness=True, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=True, accepted=True)
# Mine this transaction in preparation for following tests.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
sync_blocks(self.nodes)
self.utxo.pop(0)
@@ -1268,19 +1274,19 @@ class SegWitTest(BitcoinTestFramework):
# Too-large input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue+1, key)
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Too-small input value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue-1, key)
block.vtx.pop() # remove last tx
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Now try correct value
sign_P2PK_witness_input(witness_program, tx, 0, hashtype, prev_utxo.nValue, key)
block.vtx.pop()
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
prev_utxo = UTXO(tx.sha256, 0, tx.vout[0].nValue)
@@ -1304,7 +1310,7 @@ class SegWitTest(BitcoinTestFramework):
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
block = self.build_next_block()
used_sighash_single_out_of_bounds = False
@@ -1346,7 +1352,7 @@ class SegWitTest(BitcoinTestFramework):
# Test the block periodically, if we're close to maxblocksize
if (get_virtual_size(block) > MAX_BLOCK_BASE_SIZE - 1000):
self.update_witness_block_with_transactions(block, [])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
block = self.build_next_block()
if (not used_sighash_single_out_of_bounds):
@@ -1354,7 +1360,7 @@ class SegWitTest(BitcoinTestFramework):
# Test the transactions we've added to the block
if (len(block.vtx) > 1):
self.update_witness_block_with_transactions(block, [])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now test witness version 0 P2PKH transactions
pubkeyhash = hash160(pubkey)
@@ -1376,7 +1382,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.vin[0].scriptSig = CScript([signature, pubkey])
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx, tx2])
- self.test_node.test_witness_block(block, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=False)
# Move the signature to the witness.
block.vtx.pop()
@@ -1386,7 +1392,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.rehash()
self.update_witness_block_with_transactions(block, [tx2])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
temp_utxos.pop(0)
@@ -1405,7 +1411,7 @@ class SegWitTest(BitcoinTestFramework):
index += 1
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
for i in range(len(tx.vout)):
self.utxo.append(UTXO(tx.sha256, i, tx.vout[i].nValue))
@@ -1432,10 +1438,10 @@ class SegWitTest(BitcoinTestFramework):
tx.rehash()
# Verify mempool acceptance and block validity
- self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=True, with_witness=segwit_activated)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=segwit_activated)
sync_blocks(self.nodes)
# Now test attempts to spend the output.
@@ -1449,12 +1455,12 @@ class SegWitTest(BitcoinTestFramework):
# will require a witness to spend a witness program regardless of
# segwit activation. Note that older bitcoind's that are not
# segwit-aware would also reject this for failing CLEANSTACK.
- self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False)
# Try to put the witness script in the scriptSig, should also fail.
spend_tx.vin[0].scriptSig = CScript([p2wsh_pubkey, b'a'])
spend_tx.rehash()
- self.test_node.test_transaction_acceptance(spend_tx, with_witness=False, accepted=False)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=False, accepted=False)
# Now put the witness script in the witness, should succeed after
# segwit activates.
@@ -1464,7 +1470,7 @@ class SegWitTest(BitcoinTestFramework):
spend_tx.wit.vtxinwit[0].scriptWitness.stack = [ b'a', witness_program ]
# Verify mempool acceptance
- self.test_node.test_transaction_acceptance(spend_tx, with_witness=True, accepted=segwit_activated)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, spend_tx, with_witness=True, accepted=segwit_activated)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [spend_tx])
@@ -1472,9 +1478,9 @@ class SegWitTest(BitcoinTestFramework):
# should be valid. If we're after activation, then sending this with
# witnesses should be valid.
if segwit_activated:
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
else:
- self.test_node.test_witness_block(block, accepted=True, with_witness=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True, with_witness=False)
# Update self.utxo
self.utxo.pop(0)
@@ -1558,7 +1564,7 @@ class SegWitTest(BitcoinTestFramework):
block_1 = self.build_next_block()
self.update_witness_block_with_transactions(block_1, [tx])
- self.test_node.test_witness_block(block_1, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_1, accepted=True)
tx2 = CTransaction()
# If we try to spend the first n-1 outputs from tx, that should be
@@ -1575,7 +1581,7 @@ class SegWitTest(BitcoinTestFramework):
block_2 = self.build_next_block()
self.update_witness_block_with_transactions(block_2, [tx2])
- self.test_node.test_witness_block(block_2, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_2, accepted=False)
# Try dropping the last input in tx2, and add an output that has
# too many sigops (contributing to legacy sigop count).
@@ -1588,14 +1594,14 @@ class SegWitTest(BitcoinTestFramework):
tx2.rehash()
block_3 = self.build_next_block()
self.update_witness_block_with_transactions(block_3, [tx2])
- self.test_node.test_witness_block(block_3, accepted=False)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_3, accepted=False)
# If we drop the last checksig in this output, the tx should succeed.
block_4 = self.build_next_block()
tx2.vout[-1].scriptPubKey = CScript([OP_CHECKSIG]*(checksig_count-1))
tx2.rehash()
self.update_witness_block_with_transactions(block_4, [tx2])
- self.test_node.test_witness_block(block_4, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_4, accepted=True)
# Reset the tip back down for the next test
sync_blocks(self.nodes)
@@ -1611,7 +1617,7 @@ class SegWitTest(BitcoinTestFramework):
tx2.wit.vtxinwit[-1].scriptWitness.stack = [ witness_program_justright ]
tx2.rehash()
self.update_witness_block_with_transactions(block_5, [tx2])
- self.test_node.test_witness_block(block_5, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block_5, accepted=True)
# TODO: test p2sh sigop counting
@@ -1689,7 +1695,7 @@ class SegWitTest(BitcoinTestFramework):
# Confirm it in a block.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Now try to spend it. Send it to a P2WSH output, which we'll
# use in the next test.
@@ -1708,11 +1714,11 @@ class SegWitTest(BitcoinTestFramework):
tx2.rehash()
# Should fail policy test.
- self.test_node.test_transaction_acceptance(tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx2, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx2])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 2: P2WSH
# Try to spend the P2WSH output created in last test.
@@ -1728,11 +1734,11 @@ class SegWitTest(BitcoinTestFramework):
sign_P2PK_witness_input(witness_program, tx3, 0, SIGHASH_ALL, tx2.vout[0].nValue, key)
# Should fail policy test.
- self.test_node.test_transaction_acceptance(tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx3, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
# But passes consensus.
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx3])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 3: P2SH(P2WSH)
# Try to spend the P2SH output created in the last test.
@@ -1745,10 +1751,10 @@ class SegWitTest(BitcoinTestFramework):
sign_P2PK_witness_input(witness_program, tx4, 0, SIGHASH_ALL, tx3.vout[0].nValue, key)
# Should fail policy test.
- self.test_node.test_transaction_acceptance(tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx4, True, False, b'non-mandatory-script-verify-flag (Using non-compressed keys in segwit)')
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx4])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
# Test 4: Uncompressed pubkeys should still be valid in non-segwit
# transactions.
@@ -1760,10 +1766,10 @@ class SegWitTest(BitcoinTestFramework):
tx5.vin[0].scriptSig = CScript([signature, pubkey])
tx5.rehash()
# Should pass policy and consensus.
- self.test_node.test_transaction_acceptance(tx5, True, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx5, True, True)
block = self.build_next_block()
self.update_witness_block_with_transactions(block, [tx5])
- self.test_node.test_witness_block(block, accepted=True)
+ test_witness_block(self.nodes[0].rpc, self.test_node, block, accepted=True)
self.utxo.append(UTXO(tx5.sha256, 0, tx5.vout[0].nValue))
def test_non_standard_witness(self):
@@ -1793,7 +1799,7 @@ class SegWitTest(BitcoinTestFramework):
tx.vout.append(CTxOut(outputvalue, CScript([OP_HASH160, p2sh, OP_EQUAL])))
tx.rehash()
txid = tx.sha256
- self.test_node.test_transaction_acceptance(tx, with_witness=False, accepted=True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, tx, with_witness=False, accepted=True)
self.nodes[0].generate(1)
sync_blocks(self.nodes)
@@ -1818,45 +1824,45 @@ class SegWitTest(BitcoinTestFramework):
# Testing native P2WSH
# Witness stack size, excluding witnessScript, over 100 is non-standard
p2wsh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
- self.std_node.test_transaction_acceptance(p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[0], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
- self.test_node.test_transaction_acceptance(p2wsh_txs[0], True, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[0], True, True)
# Stack element size over 80 bytes is non-standard
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
- self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
- self.test_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[1], True, True)
# Standard nodes should accept if element size is not over 80 bytes
p2wsh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
- self.std_node.test_transaction_acceptance(p2wsh_txs[1], True, True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[1], True, True)
# witnessScript size at 3600 bytes is standard
p2wsh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
- self.test_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
- self.std_node.test_transaction_acceptance(p2wsh_txs[2], True, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[2], True, True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[2], True, True)
# witnessScript size at 3601 bytes is non-standard
p2wsh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
- self.std_node.test_transaction_acceptance(p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2wsh_txs[3], True, False, b'bad-witness-nonstandard')
# Non-standard nodes should accept
- self.test_node.test_transaction_acceptance(p2wsh_txs[3], True, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2wsh_txs[3], True, True)
# Repeating the same tests with P2SH-P2WSH
p2sh_txs[0].wit.vtxinwit[0].scriptWitness.stack = [pad] * 101 + [scripts[0]]
- self.std_node.test_transaction_acceptance(p2sh_txs[0], True, False, b'bad-witness-nonstandard')
- self.test_node.test_transaction_acceptance(p2sh_txs[0], True, True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[0], True, False, b'bad-witness-nonstandard')
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[0], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 81] * 100 + [scripts[1]]
- self.std_node.test_transaction_acceptance(p2sh_txs[1], True, False, b'bad-witness-nonstandard')
- self.test_node.test_transaction_acceptance(p2sh_txs[1], True, True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, False, b'bad-witness-nonstandard')
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[1], True, True)
p2sh_txs[1].wit.vtxinwit[0].scriptWitness.stack = [pad * 80] * 100 + [scripts[1]]
- self.std_node.test_transaction_acceptance(p2sh_txs[1], True, True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[1], True, True)
p2sh_txs[2].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, scripts[2]]
- self.test_node.test_transaction_acceptance(p2sh_txs[2], True, True)
- self.std_node.test_transaction_acceptance(p2sh_txs[2], True, True)
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[2], True, True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[2], True, True)
p2sh_txs[3].wit.vtxinwit[0].scriptWitness.stack = [pad, pad, pad, scripts[3]]
- self.std_node.test_transaction_acceptance(p2sh_txs[3], True, False, b'bad-witness-nonstandard')
- self.test_node.test_transaction_acceptance(p2sh_txs[3], True, True)
+ test_transaction_acceptance(self.nodes[1].rpc, self.std_node, p2sh_txs[3], True, False, b'bad-witness-nonstandard')
+ test_transaction_acceptance(self.nodes[0].rpc, self.test_node, p2sh_txs[3], True, True)
self.nodes[0].generate(1) # Mine and clean up the mempool of non-standard node
# Valid but non-standard transactions in a block should be accepted by standard node
@@ -1870,11 +1876,11 @@ class SegWitTest(BitcoinTestFramework):
def run_test(self):
# Setup the p2p connections and start up the network thread.
# self.test_node sets NODE_WITNESS|NODE_NETWORK
- self.test_node = self.nodes[0].add_p2p_connection(TestNode(self.nodes[0].rpc), services=NODE_NETWORK|NODE_WITNESS)
+ self.test_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS)
# self.old_node sets only NODE_NETWORK
- self.old_node = self.nodes[0].add_p2p_connection(TestNode(self.nodes[0].rpc), services=NODE_NETWORK)
+ self.old_node = self.nodes[0].add_p2p_connection(TestNode(), services=NODE_NETWORK)
# self.std_node is for testing node1 (fRequireStandard=true)
- self.std_node = self.nodes[1].add_p2p_connection(TestNode(self.nodes[1].rpc), services=NODE_NETWORK|NODE_WITNESS)
+ self.std_node = self.nodes[1].add_p2p_connection(TestNode(), services=NODE_NETWORK|NODE_WITNESS)
NetworkThread().start() # Start up network handling in another thread
diff --git a/test/functional/p2p-timeouts.py b/test/functional/p2p-timeouts.py
index 14a3bf48fb..b2f3a861cf 100755
--- a/test/functional/p2p-timeouts.py
+++ b/test/functional/p2p-timeouts.py
@@ -27,8 +27,8 @@ from test_framework.mininode import *
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
-class TestNode(NodeConnCB):
- def on_version(self, conn, message):
+class TestNode(P2PInterface):
+ def on_version(self, message):
# Don't send a verack in response
pass
diff --git a/test/functional/p2p-versionbits-warning.py b/test/functional/p2p-versionbits-warning.py
index 464ca5a312..be137381d0 100755
--- a/test/functional/p2p-versionbits-warning.py
+++ b/test/functional/p2p-versionbits-warning.py
@@ -23,8 +23,8 @@ WARN_UNKNOWN_RULES_MINED = "Unknown block versions being mined! It's possible un
WARN_UNKNOWN_RULES_ACTIVE = "unknown new rules activated (versionbit {})".format(VB_UNKNOWN_BIT)
VB_PATTERN = re.compile("^Warning.*versionbit")
-class TestNode(NodeConnCB):
- def on_inv(self, conn, message):
+class TestNode(P2PInterface):
+ def on_inv(self, message):
pass
class VersionBitsWarningTest(BitcoinTestFramework):
diff --git a/test/functional/sendheaders.py b/test/functional/sendheaders.py
index 68c0d95b4f..99b7f6b99e 100755
--- a/test/functional/sendheaders.py
+++ b/test/functional/sendheaders.py
@@ -91,7 +91,7 @@ from test_framework.mininode import (
CInv,
NODE_WITNESS,
NetworkThread,
- NodeConnCB,
+ P2PInterface,
mininode_lock,
msg_block,
msg_getblocks,
@@ -110,9 +110,10 @@ from test_framework.util import (
DIRECT_FETCH_RESPONSE_TIME = 0.05
-class BaseNode(NodeConnCB):
+class BaseNode(P2PInterface):
def __init__(self):
super().__init__()
+
self.block_announced = False
self.last_blockhash_announced = None
@@ -121,18 +122,18 @@ class BaseNode(NodeConnCB):
msg = msg_getdata()
for x in block_hashes:
msg.inv.append(CInv(2, x))
- self.connection.send_message(msg)
+ self.send_message(msg)
def send_get_headers(self, locator, hashstop):
msg = msg_getheaders()
msg.locator.vHave = locator
msg.hashstop = hashstop
- self.connection.send_message(msg)
+ self.send_message(msg)
def send_block_inv(self, blockhash):
msg = msg_inv()
msg.inv = [CInv(2, blockhash)]
- self.connection.send_message(msg)
+ self.send_message(msg)
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
@@ -155,11 +156,11 @@ class BaseNode(NodeConnCB):
test_function = lambda: self.last_blockhash_announced == block_hash
wait_until(test_function, timeout=timeout, lock=mininode_lock)
- def on_inv(self, conn, message):
+ def on_inv(self, message):
self.block_announced = True
self.last_blockhash_announced = message.inv[-1].hash
- def on_headers(self, conn, message):
+ def on_headers(self, message):
if len(message.headers):
self.block_announced = True
message.headers[-1].calc_sha256()
diff --git a/test/functional/test_framework/comptool.py b/test/functional/test_framework/comptool.py
index 723826bae4..f0f5c847ca 100755
--- a/test/functional/test_framework/comptool.py
+++ b/test/functional/test_framework/comptool.py
@@ -39,11 +39,10 @@ class RejectResult():
def __repr__(self):
return '%i:%s' % (self.code,self.reason or '*')
-class TestNode(NodeConnCB):
+class TestNode(P2PInterface):
def __init__(self, block_store, tx_store):
super().__init__()
- self.conn = None
self.bestblockhash = None
self.block_store = block_store
self.block_request_map = {}
@@ -58,26 +57,23 @@ class TestNode(NodeConnCB):
self.lastInv = []
self.closed = False
- def on_close(self, conn):
+ def on_close(self):
self.closed = True
- def add_connection(self, conn):
- self.conn = conn
-
- def on_headers(self, conn, message):
+ def on_headers(self, message):
if len(message.headers) > 0:
best_header = message.headers[-1]
best_header.calc_sha256()
self.bestblockhash = best_header.sha256
- def on_getheaders(self, conn, message):
+ def on_getheaders(self, message):
response = self.block_store.headers_for(message.locator, message.hashstop)
if response is not None:
- conn.send_message(response)
+ self.send_message(response)
- def on_getdata(self, conn, message):
- [conn.send_message(r) for r in self.block_store.get_blocks(message.inv)]
- [conn.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
+ def on_getdata(self, message):
+ [self.send_message(r) for r in self.block_store.get_blocks(message.inv)]
+ [self.send_message(r) for r in self.tx_store.get_transactions(message.inv)]
for i in message.inv:
if i.type == 1 or i.type == 1 | (1 << 30): # MSG_TX or MSG_WITNESS_TX
@@ -85,16 +81,16 @@ class TestNode(NodeConnCB):
elif i.type == 2 or i.type == 2 | (1 << 30): # MSG_BLOCK or MSG_WITNESS_BLOCK
self.block_request_map[i.hash] = True
- def on_inv(self, conn, message):
+ def on_inv(self, message):
self.lastInv = [x.hash for x in message.inv]
- def on_pong(self, conn, message):
+ def on_pong(self, message):
try:
del self.pingMap[message.nonce]
except KeyError:
raise AssertionError("Got pong for unknown ping [%s]" % repr(message))
- def on_reject(self, conn, message):
+ def on_reject(self, message):
if message.message == b'tx':
self.tx_reject_map[message.data] = RejectResult(message.code, message.reason)
if message.message == b'block':
@@ -102,30 +98,30 @@ class TestNode(NodeConnCB):
def send_inv(self, obj):
mtype = 2 if isinstance(obj, CBlock) else 1
- self.conn.send_message(msg_inv([CInv(mtype, obj.sha256)]))
+ self.send_message(msg_inv([CInv(mtype, obj.sha256)]))
def send_getheaders(self):
# We ask for headers from their last tip.
m = msg_getheaders()
m.locator = self.block_store.get_locator(self.bestblockhash)
- self.conn.send_message(m)
+ self.send_message(m)
def send_header(self, header):
m = msg_headers()
m.headers.append(header)
- self.conn.send_message(m)
+ self.send_message(m)
# This assumes BIP31
def send_ping(self, nonce):
self.pingMap[nonce] = True
- self.conn.send_message(msg_ping(nonce))
+ self.send_message(msg_ping(nonce))
def received_ping_response(self, nonce):
return nonce not in self.pingMap
def send_mempool(self):
self.lastInv = []
- self.conn.send_message(msg_mempool())
+ self.send_message(msg_mempool())
# TestInstance:
#
@@ -166,8 +162,7 @@ class TestManager():
def __init__(self, testgen, datadir):
self.test_generator = testgen
- self.connections = []
- self.test_nodes = []
+ self.p2p_connections= []
self.block_store = BlockStore(datadir)
self.tx_store = TxStore(datadir)
self.ping_counter = 1
@@ -175,28 +170,24 @@ class TestManager():
def add_all_connections(self, nodes):
for i in range(len(nodes)):
# Create a p2p connection to each node
- test_node = TestNode(self.block_store, self.tx_store)
- self.test_nodes.append(test_node)
- self.connections.append(NodeConn('127.0.0.1', p2p_port(i), test_node))
- # Make sure the TestNode (callback class) has a reference to its
- # associated NodeConn
- test_node.add_connection(self.connections[-1])
+ node = TestNode(self.block_store, self.tx_store)
+ node.peer_connect('127.0.0.1', p2p_port(i))
+ self.p2p_connections.append(node)
def clear_all_connections(self):
- self.connections = []
- self.test_nodes = []
+ self.p2p_connections = []
def wait_for_disconnections(self):
def disconnected():
- return all(node.closed for node in self.test_nodes)
+ return all(node.closed for node in self.p2p_connections)
wait_until(disconnected, timeout=10, lock=mininode_lock)
def wait_for_verack(self):
- return all(node.wait_for_verack() for node in self.test_nodes)
+ return all(node.wait_for_verack() for node in self.p2p_connections)
def wait_for_pings(self, counter):
def received_pongs():
- return all(node.received_ping_response(counter) for node in self.test_nodes)
+ return all(node.received_ping_response(counter) for node in self.p2p_connections)
wait_until(received_pongs, lock=mininode_lock)
# sync_blocks: Wait for all connections to request the blockhash given
@@ -206,17 +197,17 @@ class TestManager():
def blocks_requested():
return all(
blockhash in node.block_request_map and node.block_request_map[blockhash]
- for node in self.test_nodes
+ for node in self.p2p_connections
)
# --> error if not requested
wait_until(blocks_requested, attempts=20*num_blocks, lock=mininode_lock)
# Send getheaders message
- [ c.cb.send_getheaders() for c in self.connections ]
+ [ c.send_getheaders() for c in self.p2p_connections ]
# Send ping and wait for response -- synchronization hack
- [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ [ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
@@ -226,42 +217,42 @@ class TestManager():
def transaction_requested():
return all(
txhash in node.tx_request_map and node.tx_request_map[txhash]
- for node in self.test_nodes
+ for node in self.p2p_connections
)
# --> error if not requested
wait_until(transaction_requested, attempts=20*num_events, lock=mininode_lock)
# Get the mempool
- [ c.cb.send_mempool() for c in self.connections ]
+ [ c.send_mempool() for c in self.p2p_connections ]
# Send ping and wait for response -- synchronization hack
- [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ [ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
# Sort inv responses from each node
with mininode_lock:
- [ c.cb.lastInv.sort() for c in self.connections ]
+ [ c.lastInv.sort() for c in self.p2p_connections ]
# Verify that the tip of each connection all agree with each other, and
# with the expected outcome (if given)
def check_results(self, blockhash, outcome):
with mininode_lock:
- for c in self.connections:
+ for c in self.p2p_connections:
if outcome is None:
- if c.cb.bestblockhash != self.connections[0].cb.bestblockhash:
+ if c.bestblockhash != self.p2p_connections[0].bestblockhash:
return False
elif isinstance(outcome, RejectResult): # Check that block was rejected w/ code
- if c.cb.bestblockhash == blockhash:
+ if c.bestblockhash == blockhash:
return False
- if blockhash not in c.cb.block_reject_map:
+ if blockhash not in c.block_reject_map:
logger.error('Block not in reject map: %064x' % (blockhash))
return False
- if not outcome.match(c.cb.block_reject_map[blockhash]):
- logger.error('Block rejected with %s instead of expected %s: %064x' % (c.cb.block_reject_map[blockhash], outcome, blockhash))
+ if not outcome.match(c.block_reject_map[blockhash]):
+ logger.error('Block rejected with %s instead of expected %s: %064x' % (c.block_reject_map[blockhash], outcome, blockhash))
return False
- elif ((c.cb.bestblockhash == blockhash) != outcome):
+ elif ((c.bestblockhash == blockhash) != outcome):
return False
return True
@@ -273,21 +264,21 @@ class TestManager():
# a particular tx's existence in the mempool is the same across all nodes.
def check_mempool(self, txhash, outcome):
with mininode_lock:
- for c in self.connections:
+ for c in self.p2p_connections:
if outcome is None:
# Make sure the mempools agree with each other
- if c.cb.lastInv != self.connections[0].cb.lastInv:
+ if c.lastInv != self.p2p_connections[0].lastInv:
return False
elif isinstance(outcome, RejectResult): # Check that tx was rejected w/ code
- if txhash in c.cb.lastInv:
+ if txhash in c.lastInv:
return False
- if txhash not in c.cb.tx_reject_map:
+ if txhash not in c.tx_reject_map:
logger.error('Tx not in reject map: %064x' % (txhash))
return False
- if not outcome.match(c.cb.tx_reject_map[txhash]):
- logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.cb.tx_reject_map[txhash], outcome, txhash))
+ if not outcome.match(c.tx_reject_map[txhash]):
+ logger.error('Tx rejected with %s instead of expected %s: %064x' % (c.tx_reject_map[txhash], outcome, txhash))
return False
- elif ((txhash in c.cb.lastInv) != outcome):
+ elif ((txhash in c.lastInv) != outcome):
return False
return True
@@ -332,25 +323,25 @@ class TestManager():
first_block_with_hash = False
with mininode_lock:
self.block_store.add_block(block)
- for c in self.connections:
- if first_block_with_hash and block.sha256 in c.cb.block_request_map and c.cb.block_request_map[block.sha256] == True:
+ for c in self.p2p_connections:
+ if first_block_with_hash and block.sha256 in c.block_request_map and c.block_request_map[block.sha256] == True:
# There was a previous request for this block hash
# Most likely, we delivered a header for this block
# but never had the block to respond to the getdata
c.send_message(msg_block(block))
else:
- c.cb.block_request_map[block.sha256] = False
+ c.block_request_map[block.sha256] = False
# Either send inv's to each node and sync, or add
# to invqueue for later inv'ing.
if (test_instance.sync_every_block):
# if we expect success, send inv and sync every block
# if we expect failure, just push the block and see what happens.
if outcome == True:
- [ c.cb.send_inv(block) for c in self.connections ]
+ [ c.send_inv(block) for c in self.p2p_connections ]
self.sync_blocks(block.sha256, 1)
else:
- [ c.send_message(msg_block(block)) for c in self.connections ]
- [ c.cb.send_ping(self.ping_counter) for c in self.connections ]
+ [ c.send_message(msg_block(block)) for c in self.p2p_connections ]
+ [ c.send_ping(self.ping_counter) for c in self.p2p_connections ]
self.wait_for_pings(self.ping_counter)
self.ping_counter += 1
if (not self.check_results(tip, outcome)):
@@ -360,7 +351,7 @@ class TestManager():
elif isinstance(b_or_t, CBlockHeader):
block_header = b_or_t
self.block_store.add_header(block_header)
- [ c.cb.send_header(block_header) for c in self.connections ]
+ [ c.send_header(block_header) for c in self.p2p_connections ]
else: # Tx test runner
assert(isinstance(b_or_t, CTransaction))
@@ -369,11 +360,11 @@ class TestManager():
# Add to shared tx store and clear map entry
with mininode_lock:
self.tx_store.add_transaction(tx)
- for c in self.connections:
- c.cb.tx_request_map[tx.sha256] = False
+ for c in self.p2p_connections:
+ c.tx_request_map[tx.sha256] = False
# Again, either inv to all nodes or save for later
if (test_instance.sync_every_tx):
- [ c.cb.send_inv(tx) for c in self.connections ]
+ [ c.send_inv(tx) for c in self.p2p_connections ]
self.sync_transaction(tx.sha256, 1)
if (not self.check_mempool(tx.sha256, outcome)):
raise AssertionError("Test failed at test %d" % test_number)
@@ -381,26 +372,26 @@ class TestManager():
invqueue.append(CInv(1, tx.sha256))
# Ensure we're not overflowing the inv queue
if len(invqueue) == MAX_INV_SZ:
- [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ [ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
# Do final sync if we weren't syncing on every block or every tx.
if (not test_instance.sync_every_block and block is not None):
if len(invqueue) > 0:
- [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ [ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
self.sync_blocks(block.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_results(tip, block_outcome)):
raise AssertionError("Block test failed at test %d" % test_number)
if (not test_instance.sync_every_tx and tx is not None):
if len(invqueue) > 0:
- [ c.send_message(msg_inv(invqueue)) for c in self.connections ]
+ [ c.send_message(msg_inv(invqueue)) for c in self.p2p_connections ]
invqueue = []
self.sync_transaction(tx.sha256, len(test_instance.blocks_and_transactions))
if (not self.check_mempool(tx.sha256, tx_outcome)):
raise AssertionError("Mempool test failed at test %d" % test_number)
- [ c.disconnect_node() for c in self.connections ]
+ [ c.disconnect_node() for c in self.p2p_connections ]
self.wait_for_disconnections()
self.block_store.close()
self.tx_store.close()
diff --git a/test/functional/test_framework/mininode.py b/test/functional/test_framework/mininode.py
index 24c96b5681..9e92a70da1 100755
--- a/test/functional/test_framework/mininode.py
+++ b/test/functional/test_framework/mininode.py
@@ -9,10 +9,8 @@
This python code was modified from ArtForz' public domain half-a-node, as
found in the mini-node branch of http://github.com/jgarzik/pynode.
-NodeConn: an object which manages p2p connectivity to a bitcoin node
-NodeConnCB: a base class that describes the interface for receiving
- callbacks with network messages from a NodeConn
-"""
+P2PConnection: A low-level connection object to a node's P2P interface
+P2PInterface: A high-level interface object for communicating to a node over P2P"""
import asyncore
from collections import defaultdict
from io import BytesIO
@@ -20,10 +18,10 @@ import logging
import socket
import struct
import sys
-import time
from threading import RLock, Thread
from test_framework.messages import *
+from test_framework.util import wait_until
logger = logging.getLogger("TestFramework.mininode")
@@ -57,173 +55,32 @@ MAGIC_BYTES = {
"regtest": b"\xfa\xbf\xb5\xda", # regtest
}
-class NodeConnCB():
- """Callback and helper functions for P2P connection to a bitcoind node.
+class P2PConnection(asyncore.dispatcher):
+ """A low-level connection object to a node's P2P interface.
- Individual testcases should subclass this and override the on_* methods
- if they want to alter message handling behaviour."""
- def __init__(self):
- # Track whether we have a P2P connection open to the node
- self.connected = False
- self.connection = None
-
- # Track number of messages of each type received and the most recent
- # message of each type
- self.message_count = defaultdict(int)
- self.last_message = {}
-
- # A count of the number of ping messages we've sent to the node
- self.ping_counter = 1
-
- # Message receiving methods
-
- def deliver(self, conn, message):
- """Receive message and dispatch message to appropriate callback.
-
- We keep a count of how many of each message type has been received
- and the most recent message of each type."""
- with mininode_lock:
- try:
- command = message.command.decode('ascii')
- self.message_count[command] += 1
- self.last_message[command] = message
- getattr(self, 'on_' + command)(conn, message)
- except:
- print("ERROR delivering %s (%s)" % (repr(message),
- sys.exc_info()[0]))
- raise
-
- # Callback methods. Can be overridden by subclasses in individual test
- # cases to provide custom message handling behaviour.
-
- def on_open(self, conn):
- self.connected = True
-
- def on_close(self, conn):
- self.connected = False
- self.connection = None
-
- def on_addr(self, conn, message): pass
- def on_block(self, conn, message): pass
- def on_blocktxn(self, conn, message): pass
- def on_cmpctblock(self, conn, message): pass
- def on_feefilter(self, conn, message): pass
- def on_getaddr(self, conn, message): pass
- def on_getblocks(self, conn, message): pass
- def on_getblocktxn(self, conn, message): pass
- def on_getdata(self, conn, message): pass
- def on_getheaders(self, conn, message): pass
- def on_headers(self, conn, message): pass
- def on_mempool(self, conn): pass
- def on_pong(self, conn, message): pass
- def on_reject(self, conn, message): pass
- def on_sendcmpct(self, conn, message): pass
- def on_sendheaders(self, conn, message): pass
- def on_tx(self, conn, message): pass
-
- def on_inv(self, conn, message):
- want = msg_getdata()
- for i in message.inv:
- if i.type != 0:
- want.inv.append(i)
- if len(want.inv):
- conn.send_message(want)
-
- def on_ping(self, conn, message):
- conn.send_message(msg_pong(message.nonce))
-
- def on_verack(self, conn, message):
- self.verack_received = True
-
- def on_version(self, conn, message):
- assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
- conn.send_message(msg_verack())
- conn.nServices = message.nServices
-
- # Connection helper methods
-
- def add_connection(self, conn):
- self.connection = conn
-
- def wait_for_disconnect(self, timeout=60):
- test_function = lambda: not self.connected
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- # Message receiving helper methods
-
- def wait_for_block(self, blockhash, timeout=60):
- test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- def wait_for_getdata(self, timeout=60):
- test_function = lambda: self.last_message.get("getdata")
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- def wait_for_getheaders(self, timeout=60):
- test_function = lambda: self.last_message.get("getheaders")
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- def wait_for_inv(self, expected_inv, timeout=60):
- """Waits for an INV message and checks that the first inv object in the message was as expected."""
- if len(expected_inv) > 1:
- raise NotImplementedError("wait_for_inv() will only verify the first inv object")
- test_function = lambda: self.last_message.get("inv") and \
- self.last_message["inv"].inv[0].type == expected_inv[0].type and \
- self.last_message["inv"].inv[0].hash == expected_inv[0].hash
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- def wait_for_verack(self, timeout=60):
- test_function = lambda: self.message_count["verack"]
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
-
- # Message sending helper functions
-
- def send_message(self, message):
- if self.connection:
- self.connection.send_message(message)
- else:
- logger.error("Cannot send message. No connection to node!")
+ This class is responsible for:
- def send_and_ping(self, message):
- self.send_message(message)
- self.sync_with_ping()
+ - opening and closing the TCP connection to the node
+ - reading bytes from and writing bytes to the socket
+ - deserializing and serializing the P2P message header
+ - logging messages as they are sent and received
- # Sync up with the node
- def sync_with_ping(self, timeout=60):
- self.send_message(msg_ping(nonce=self.ping_counter))
- test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
- wait_until(test_function, timeout=timeout, lock=mininode_lock)
- self.ping_counter += 1
-
-class NodeConn(asyncore.dispatcher):
- """The actual NodeConn class
+ This class contains no logic for handing the P2P message payloads. It must be
+ sub-classed and the on_message() callback overridden."""
- This class provides an interface for a p2p connection to a specified node."""
+ def __init__(self):
+ super().__init__(map=mininode_socket_map)
- def __init__(self, dstaddr, dstport, callback, net="regtest", services=NODE_NETWORK|NODE_WITNESS, send_version=True):
- asyncore.dispatcher.__init__(self, map=mininode_socket_map)
+ def peer_connect(self, dstaddr, dstport, net="regtest"):
self.dstaddr = dstaddr
self.dstport = dstport
self.create_socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
self.sendbuf = b""
self.recvbuf = b""
- self.last_sent = 0
self.state = "connecting"
self.network = net
- self.cb = callback
self.disconnect = False
- self.nServices = 0
-
- if send_version:
- # stuff version msg into sendbuf
- vt = msg_version()
- vt.nServices = services
- vt.addrTo.ip = self.dstaddr
- vt.addrTo.port = self.dstport
- vt.addrFrom.ip = "0.0.0.0"
- vt.addrFrom.port = 0
- self.send_message(vt, True)
logger.info('Connecting to Bitcoin Node: %s:%d' % (self.dstaddr, self.dstport))
@@ -232,15 +89,22 @@ class NodeConn(asyncore.dispatcher):
except:
self.handle_close()
+ def peer_disconnect(self):
+ # Connection could have already been closed by other end.
+ if self.state == "connected":
+ self.disconnect_node()
+
# Connection and disconnection methods
def handle_connect(self):
+ """asyncore callback when a connection is opened."""
if self.state != "connected":
logger.debug("Connected & Listening: %s:%d" % (self.dstaddr, self.dstport))
self.state = "connected"
- self.cb.on_open(self)
+ self.on_open()
def handle_close(self):
+ """asyncore callback when a connection is closed."""
logger.debug("Closing connection to: %s:%d" % (self.dstaddr, self.dstport))
self.state = "closed"
self.recvbuf = b""
@@ -249,10 +113,10 @@ class NodeConn(asyncore.dispatcher):
self.close()
except:
pass
- self.cb.on_close(self)
+ self.on_close()
def disconnect_node(self):
- """ Disconnect the p2p connection.
+ """Disconnect the p2p connection.
Called by the test logic thread. Causes the p2p connection
to be disconnected on the next iteration of the asyncore loop."""
@@ -260,16 +124,19 @@ class NodeConn(asyncore.dispatcher):
# Socket read methods
- def readable(self):
- return True
-
def handle_read(self):
+ """asyncore callback when data is read from the socket."""
t = self.recv(8192)
if len(t) > 0:
self.recvbuf += t
- self.got_data()
+ self._on_data()
+
+ def _on_data(self):
+ """Try to read P2P messages from the recv buffer.
- def got_data(self):
+ This method reads data from the buffer in a loop. It deserializes,
+ parses and verifies the P2P header, then passes the P2P payload to
+ the on_message callback for processing."""
try:
while True:
if len(self.recvbuf) < 4:
@@ -294,26 +161,27 @@ class NodeConn(asyncore.dispatcher):
f = BytesIO(msg)
t = MESSAGEMAP[command]()
t.deserialize(f)
- self.got_message(t)
+ self._log_message("receive", t)
+ self.on_message(t)
except Exception as e:
logger.exception('Error reading message:', repr(e))
raise
- def got_message(self, message):
- if self.last_sent + 30 * 60 < time.time():
- self.send_message(MESSAGEMAP[b'ping']())
- self._log_message("receive", message)
- self.cb.deliver(self, message)
+ def on_message(self, message):
+ """Callback for processing a P2P payload. Must be overridden by derived class."""
+ raise NotImplementedError
# Socket write methods
def writable(self):
+ """asyncore method to determine whether the handle_write() callback should be called on the next loop."""
with mininode_lock:
pre_connection = self.state == "connecting"
length = len(self.sendbuf)
return (length > 0 or pre_connection)
def handle_write(self):
+ """asyncore callback when data should be written to the socket."""
with mininode_lock:
# asyncore does not expose socket connection, only the first read/write
# event, thus we must check connection manually here to know when we
@@ -331,6 +199,10 @@ class NodeConn(asyncore.dispatcher):
self.sendbuf = self.sendbuf[sent:]
def send_message(self, message, pushbuf=False):
+ """Send a P2P message over the socket.
+
+ This method takes a P2P payload, builds the P2P header and adds
+ the message to the send buffer to be sent over the socket."""
if self.state != "connected" and not pushbuf:
raise IOError('Not connected, no pushbuf')
self._log_message("send", message)
@@ -353,11 +225,11 @@ class NodeConn(asyncore.dispatcher):
self.sendbuf = tmsg
else:
self.sendbuf += tmsg
- self.last_sent = time.time()
# Class utility methods
def _log_message(self, direction, msg):
+ """Logs a message being sent or received over the connection."""
if direction == "send":
log_message = "Send message to "
elif direction == "receive":
@@ -368,6 +240,152 @@ class NodeConn(asyncore.dispatcher):
logger.debug(log_message)
+class P2PInterface(P2PConnection):
+ """A high-level P2P interface class for communicating with a Bitcoin node.
+
+ This class provides high-level callbacks for processing P2P message
+ payloads, as well as convenience methods for interacting with the
+ node over P2P.
+
+ Individual testcases should subclass this and override the on_* methods
+ if they want to alter message handling behaviour."""
+ def __init__(self):
+ super().__init__()
+
+ # Track number of messages of each type received and the most recent
+ # message of each type
+ self.message_count = defaultdict(int)
+ self.last_message = {}
+
+ # A count of the number of ping messages we've sent to the node
+ self.ping_counter = 1
+
+ # The network services received from the peer
+ self.nServices = 0
+
+ def peer_connect(self, *args, services=NODE_NETWORK|NODE_WITNESS, send_version=True, **kwargs):
+ super().peer_connect(*args, **kwargs)
+
+ if send_version:
+ # Send a version msg
+ vt = msg_version()
+ vt.nServices = services
+ vt.addrTo.ip = self.dstaddr
+ vt.addrTo.port = self.dstport
+ vt.addrFrom.ip = "0.0.0.0"
+ vt.addrFrom.port = 0
+ self.send_message(vt, True)
+
+ # Message receiving methods
+
+ def on_message(self, message):
+ """Receive message and dispatch message to appropriate callback.
+
+ We keep a count of how many of each message type has been received
+ and the most recent message of each type."""
+ with mininode_lock:
+ try:
+ command = message.command.decode('ascii')
+ self.message_count[command] += 1
+ self.last_message[command] = message
+ getattr(self, 'on_' + command)(message)
+ except:
+ print("ERROR delivering %s (%s)" % (repr(message), sys.exc_info()[0]))
+ raise
+
+ # Callback methods. Can be overridden by subclasses in individual test
+ # cases to provide custom message handling behaviour.
+
+ def on_open(self):
+ pass
+
+ def on_close(self):
+ pass
+
+ def on_addr(self, message): pass
+ def on_block(self, message): pass
+ def on_blocktxn(self, message): pass
+ def on_cmpctblock(self, message): pass
+ def on_feefilter(self, message): pass
+ def on_getaddr(self, message): pass
+ def on_getblocks(self, message): pass
+ def on_getblocktxn(self, message): pass
+ def on_getdata(self, message): pass
+ def on_getheaders(self, message): pass
+ def on_headers(self, message): pass
+ def on_mempool(self, message): pass
+ def on_pong(self, message): pass
+ def on_reject(self, message): pass
+ def on_sendcmpct(self, message): pass
+ def on_sendheaders(self, message): pass
+ def on_tx(self, message): pass
+
+ def on_inv(self, message):
+ want = msg_getdata()
+ for i in message.inv:
+ if i.type != 0:
+ want.inv.append(i)
+ if len(want.inv):
+ self.send_message(want)
+
+ def on_ping(self, message):
+ self.send_message(msg_pong(message.nonce))
+
+ def on_verack(self, message):
+ self.verack_received = True
+
+ def on_version(self, message):
+ assert message.nVersion >= MIN_VERSION_SUPPORTED, "Version {} received. Test framework only supports versions greater than {}".format(message.nVersion, MIN_VERSION_SUPPORTED)
+ self.send_message(msg_verack())
+ self.nServices = message.nServices
+
+ # Connection helper methods
+
+ def wait_for_disconnect(self, timeout=60):
+ test_function = lambda: self.state != "connected"
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ # Message receiving helper methods
+
+ def wait_for_block(self, blockhash, timeout=60):
+ test_function = lambda: self.last_message.get("block") and self.last_message["block"].block.rehash() == blockhash
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_getdata(self, timeout=60):
+ test_function = lambda: self.last_message.get("getdata")
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_getheaders(self, timeout=60):
+ test_function = lambda: self.last_message.get("getheaders")
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_inv(self, expected_inv, timeout=60):
+ """Waits for an INV message and checks that the first inv object in the message was as expected."""
+ if len(expected_inv) > 1:
+ raise NotImplementedError("wait_for_inv() will only verify the first inv object")
+ test_function = lambda: self.last_message.get("inv") and \
+ self.last_message["inv"].inv[0].type == expected_inv[0].type and \
+ self.last_message["inv"].inv[0].hash == expected_inv[0].hash
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ def wait_for_verack(self, timeout=60):
+ test_function = lambda: self.message_count["verack"]
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+
+ # Message sending helper functions
+
+ def send_and_ping(self, message):
+ self.send_message(message)
+ self.sync_with_ping()
+
+ # Sync up with the node
+ def sync_with_ping(self, timeout=60):
+ self.send_message(msg_ping(nonce=self.ping_counter))
+ test_function = lambda: self.last_message.get("pong") and self.last_message["pong"].nonce == self.ping_counter
+ wait_until(test_function, timeout=timeout, lock=mininode_lock)
+ self.ping_counter += 1
+
+
# Keep our own socket map for asyncore, so that we can track disconnects
# ourselves (to workaround an issue with closing an asyncore socket when
# using select)
@@ -375,10 +393,10 @@ mininode_socket_map = dict()
# One lock for synchronizing all data access between the networking thread (see
# NetworkThread below) and the thread running the test logic. For simplicity,
-# NodeConn acquires this lock whenever delivering a message to a NodeConnCB,
+# P2PConnection acquires this lock whenever delivering a message to a P2PInterface,
# and whenever adding anything to the send buffer (in send_message()). This
# lock should be acquired in the thread running the test logic to synchronize
-# access to any data shared with the NodeConnCB or NodeConn.
+# access to any data shared with the P2PInterface or P2PConnection.
mininode_lock = RLock()
class NetworkThread(Thread):
diff --git a/test/functional/test_framework/test_framework.py b/test/functional/test_framework/test_framework.py
index 4590b4c650..54fe689686 100755
--- a/test/functional/test_framework/test_framework.py
+++ b/test/functional/test_framework/test_framework.py
@@ -4,7 +4,6 @@
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
-from collections import deque
from enum import Enum
import logging
import optparse
@@ -149,32 +148,19 @@ class BitcoinTestFramework():
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
- if os.getenv("PYTHON_DEBUG", ""):
- # Dump the end of the debug logs, to aid in debugging rare
- # travis failures.
- import glob
- filenames = [self.options.tmpdir + "/test_framework.log"]
- filenames += glob.glob(self.options.tmpdir + "/node*/regtest/debug.log")
- MAX_LINES_TO_PRINT = 1000
- for fn in filenames:
- try:
- with open(fn, 'r') as f:
- print("From", fn, ":")
- print("".join(deque(f, MAX_LINES_TO_PRINT)))
- except OSError:
- print("Opening file %s failed." % fn)
- traceback.print_exc()
if success == TestStatus.PASSED:
self.log.info("Tests successful")
- sys.exit(TEST_EXIT_PASSED)
+ exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
- sys.exit(TEST_EXIT_SKIPPED)
+ exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
- logging.shutdown()
- sys.exit(TEST_EXIT_FAILED)
+ self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
+ exit_code = TEST_EXIT_FAILED
+ logging.shutdown()
+ sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
diff --git a/test/functional/test_framework/test_node.py b/test/functional/test_framework/test_node.py
index 34b458482a..a9248c764e 100755
--- a/test/functional/test_framework/test_node.py
+++ b/test/functional/test_framework/test_node.py
@@ -14,7 +14,6 @@ import subprocess
import time
from .authproxy import JSONRPCException
-from .mininode import NodeConn
from .util import (
assert_equal,
get_rpc_proxy,
@@ -158,7 +157,7 @@ class TestNode():
self.encryptwallet(passphrase)
self.wait_until_stopped()
- def add_p2p_connection(self, p2p_conn, **kwargs):
+ def add_p2p_connection(self, p2p_conn, *args, **kwargs):
"""Add a p2p connection to the node.
This method adds the p2p connection to the self.p2ps list and also
@@ -167,9 +166,9 @@ class TestNode():
kwargs['dstport'] = p2p_port(self.index)
if 'dstaddr' not in kwargs:
kwargs['dstaddr'] = '127.0.0.1'
+
+ p2p_conn.peer_connect(*args, **kwargs)
self.p2ps.append(p2p_conn)
- kwargs.update({'callback': p2p_conn})
- p2p_conn.add_connection(NodeConn(**kwargs))
return p2p_conn
@@ -185,10 +184,8 @@ class TestNode():
def disconnect_p2ps(self):
"""Close all p2p connections to the node."""
for p in self.p2ps:
- # Connection could have already been closed by other end.
- if p.connection is not None:
- p.connection.disconnect_node()
- self.p2ps = []
+ p.peer_disconnect()
+ del self.p2ps[:]
class TestNodeCLI():
diff --git a/test/functional/test_runner.py b/test/functional/test_runner.py
index d953e1585c..e38146a79a 100755
--- a/test/functional/test_runner.py
+++ b/test/functional/test_runner.py
@@ -15,6 +15,7 @@ For a description of arguments recognized by test scripts, see
"""
import argparse
+from collections import deque
import configparser
import datetime
import os
@@ -126,6 +127,7 @@ BASE_SCRIPTS= [
'p2p-fingerprint.py',
'uacomment.py',
'p2p-acceptblock.py',
+ 'feature_logging.py',
]
EXTENDED_SCRIPTS = [
@@ -174,6 +176,7 @@ def main():
epilog='''
Help text and arguments for individual test script:''',
formatter_class=argparse.RawTextHelpFormatter)
+ parser.add_argument('--combinedlogslen', '-c', type=int, default=0, help='print a combined log (of length n lines) from all test nodes and test framework to the console on failure.')
parser.add_argument('--coverage', action='store_true', help='generate a basic coverage report for the RPC interface')
parser.add_argument('--exclude', '-x', help='specify a comma-separated-list of scripts to exclude.')
parser.add_argument('--extended', action='store_true', help='run the extended test suite in addition to the basic tests')
@@ -266,9 +269,9 @@ def main():
if not args.keepcache:
shutil.rmtree("%s/test/cache" % config["environment"]["BUILDDIR"], ignore_errors=True)
- run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args)
+ run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], tmpdir, args.jobs, args.coverage, passon_args, args.combinedlogslen)
-def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[]):
+def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_coverage=False, args=[], combined_logs_len=0):
# Warn if bitcoind is already running (unix only)
try:
if subprocess.check_output(["pidof", "bitcoind"]) is not None:
@@ -314,7 +317,7 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove
max_len_name = len(max(test_list, key=len))
for _ in range(len(test_list)):
- test_result, stdout, stderr = job_queue.get_next()
+ test_result, testdir, stdout, stderr = job_queue.get_next()
test_results.append(test_result)
if test_result.status == "Passed":
@@ -325,6 +328,14 @@ def run_tests(test_list, src_dir, build_dir, exeext, tmpdir, jobs=1, enable_cove
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time))
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n')
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n')
+ if combined_logs_len and os.path.isdir(testdir):
+ # Print the final `combinedlogslen` lines of the combined logs
+ print('{}Combine the logs and print the last {} lines ...{}'.format(BOLD[1], combined_logs_len, BOLD[0]))
+ print('\n============')
+ print('{}Combined log for {}:{}'.format(BOLD[1], testdir, BOLD[0]))
+ print('============\n')
+ combined_logs, _ = subprocess.Popen([os.path.join(tests_dir, 'combine_logs.py'), '-c', testdir], universal_newlines=True, stdout=subprocess.PIPE).communicate()
+ print("\n".join(deque(combined_logs.splitlines(), combined_logs_len)))
print_results(test_results, max_len_name, (int(time.time() - time0)))
@@ -389,13 +400,15 @@ class TestHandler:
log_stdout = tempfile.SpooledTemporaryFile(max_size=2**16)
log_stderr = tempfile.SpooledTemporaryFile(max_size=2**16)
test_argv = t.split()
- tmpdir = ["--tmpdir=%s/%s_%s" % (self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)]
+ testdir = "{}/{}_{}".format(self.tmpdir, re.sub(".py$", "", test_argv[0]), portseed)
+ tmpdir_arg = ["--tmpdir={}".format(testdir)]
self.jobs.append((t,
time.time(),
- subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir,
+ subprocess.Popen([self.tests_dir + test_argv[0]] + test_argv[1:] + self.flags + portseed_arg + tmpdir_arg,
universal_newlines=True,
stdout=log_stdout,
stderr=log_stderr),
+ testdir,
log_stdout,
log_stderr))
if not self.jobs:
@@ -404,7 +417,7 @@ class TestHandler:
# Return first proc that finishes
time.sleep(.5)
for j in self.jobs:
- (name, time0, proc, log_out, log_err) = j
+ (name, time0, proc, testdir, log_out, log_err) = j
if os.getenv('TRAVIS') == 'true' and int(time.time() - time0) > 20 * 60:
# In travis, timeout individual tests after 20 minutes (to stop tests hanging and not
# providing useful output.
@@ -422,7 +435,7 @@ class TestHandler:
self.num_running -= 1
self.jobs.remove(j)
- return TestResult(name, status, int(time.time() - time0)), stdout, stderr
+ return TestResult(name, status, int(time.time() - time0)), testdir, stdout, stderr
print('.', end='', flush=True)
class TestResult():